+ ./ya make . -T --test-size=small --test-size=medium --stat --test-threads 52 --link-threads 12 -DUSE_EAT_MY_DATA --build relwithdebinfo -DDEBUGINFO_LINES_ONLY --bazel-remote-store --bazel-remote-base-uri http://cachesrv.internal:8081 --bazel-remote-username cache_user --bazel-remote-password-file /tmp/tmp.ZuY6finTsT --bazel-remote-put --dist-cache-max-file-size=209715200 -A --retest --stat -DCONSISTENT_DEBUG --no-dir-outputs --test-failure-code 0 --build-all --cache-size 2TB --force-build-depends --log-file /home/runner/actions_runner/_work/ydb/ydb/tmp/results/ya_log.txt --evlog-file /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/ya_evlog.jsonl --junit /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/junit.xml --build-results-report /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/report.json --output /home/runner/actions_runner/_work/ydb/ydb/tmp/out Output root is subdirectory of Arcadia root, this may cause non-idempotent build Configuring dependencies for platform default-linux-x86_64-relwithdebinfo [2 ymakes processing] [7234/7236 modules configured] [2 ymakes processing] [7306/7306 modules configured] [2 ymakes processing] [7533/7536 modules configured] [2 ymakes processing] [7598/7598 modules configured] Configuring dependencies for platform tools [3 ymakes processing] [8202/8202 modules configured] Warn[-WPluginErr]: in $B/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium: Requirement ram is redefined 16 -> 28 [3 ymakes processing] [8202/8202 modules configured] [143/143 modules rendered] [2 ymakes processing] [8202/8202 modules configured] [4518/4597 modules rendered] [2 ymakes processing] [8202/8202 modules configured] [4597/4597 modules rendered] Configuring dependencies for platform test_tool_tc1-global [0 ymakes processing] [8208/8208 modules configured] [4597/4597 modules rendered] Configuring tests execution Configuring local and dist store caches Configuration done. Preparing for execution |33.3%| CLEANING SYMRES | 0.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/shard/libcore-graph-shard.a | 2.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/query_actor/libydb-library-query_actor.a | 2.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/commands/libclicommands.a | 3.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots | 3.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_aggregate_data.cpp | 3.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/test_connection/libfq-libs-test_connection.a | 4.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/signals/libydb-library-signals.a | 5.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/out/libcore-protos-out.a | 5.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a | 5.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a | 6.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/graph/shard/backends.cpp | 6.3%| PREPARE $(VCS) | 6.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/deprecated/http-parser/libcontrib-deprecated-http-parser.a | 6.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/accessors/libcpp-deprecated-accessors.a | 6.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dbg_output/liblibrary-cpp-dbg_output.a | 7.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lzmasdk/libcontrib-libs-lzmasdk.a | 7.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cpuid_check/liblibrary-cpp-cpuid_check.global.a | 7.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/coroutine/listener/libcpp-coroutine-listener.a | 8.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/coroutine/engine/libcpp-coroutine-engine.a | 7.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libiconv/static/liblibs-libiconv-static.a | 7.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/test_connection/test_object_storage.cpp | 7.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/curl/libcontrib-libs-curl.a | 8.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a | 9.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/private.cpp | 9.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/slide_limiter/service/liblibrary-slide_limiter-service.a |10.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/ydb_cli/commands/ydb_sql.cpp |10.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/str_map/libcpp-containers-str_map.a |11.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/enumbitset/liblibrary-cpp-enumbitset.a |11.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/slide_limiter/usage/liblibrary-slide_limiter-usage.a |11.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_auth.cpp |11.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/base64/libcpp-string_utils-base64.a |12.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/resource/liblibrary-cpp-resource.a |12.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |12.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_ping.cpp |12.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/deprecated/json/libmonlib-deprecated-json.a |12.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/abstract/libengines-scheme-abstract.a |12.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_export.cpp |12.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/random_provider/liblibrary-cpp-random_provider.a |12.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/csv/libcpp-string_utils-csv.a |13.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/libydb-library-schlab.a |13.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/ydb_discovery/libydb_cli_command_ydb_discovery.a |13.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/mime/types/libcpp-mime-types.a |13.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/hyperscan/libcpp-regex-hyperscan.a |13.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/liblibs-aws-sdk-cpp-aws-cpp-sdk-core.a |13.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/tcmalloc/libcpp-malloc-tcmalloc.a |13.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_monitoring.cpp |13.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_operation.cpp |13.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/sorted_vector/libcpp-containers-sorted_vector.a |13.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_import.cpp |13.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/pcre/libcpp-regex-pcre.a |13.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/iam_private/libsrc-client-iam_private.a |13.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/actor/libmessagebus_actor.a |13.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/protos/liblibs-row_dispatcher-protos.a |13.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_storage_config.cpp |13.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_scripting.cpp |13.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/protos/out/out_sequenceshard.cpp |13.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_scheme.cpp |14.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/object_counter.cpp |14.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_tools.cpp |14.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_tools_infer.cpp |14.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/test_connection/test_monitoring.cpp |14.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/histogram.cpp |14.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/test_connection/counters.cpp |14.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/client.cpp |14.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_workload.cpp |14.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_topic.cpp |14.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/test_connection/probes.cpp |14.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/agent.cpp |14.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/protos/out/out_long_tx_service.cpp |14.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/protos/out/out_cms.cpp |14.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_table.cpp |15.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/states.cpp |15.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/svn_version/libpy3library-python-svn_version.global.a |15.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/resource/libpy3library-python-resource.global.a |15.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/svn_version/libpy3library-python-svn_version.a |15.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/func/libpy3library-python-func.global.a |16.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/rsa/py3/libpy3python-rsa-py3.global.a |16.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pure-eval/libpy3contrib-python-pure-eval.global.a |16.2%| PREPARE $(YMAKE_PYTHON3-4256832079) |16.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyasn1/py3/libpy3python-pyasn1-py3.global.a |16.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml.clib/py3/libpy3python-ruamel.yaml.clib-py3.a |16.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ptyprocess/py3/libpy3python-ptyprocess-py3.global.a |16.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/protobuf/py3/libpy3python-protobuf-py3.global.a |16.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/py/py3/libpy3python-py-py3.global.a |16.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.text/libpy3contrib-python-jaraco.text.global.a |16.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyasn1-modules/py3/libpy3python-pyasn1-modules-py3.global.a |17.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jedi/py3/libpy3python-jedi-py3.global.a |17.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/requests-oauthlib/libpy3contrib-python-requests-oauthlib.global.a |18.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyrsistent/py3/libpy3python-pyrsistent-py3.global.a |18.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libpy3essentials-public-types.global.a |18.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/python-dateutil/py3/libpy3python-python-dateutil-py3.global.a |18.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libpy3core-file_storage-proto.global.a |18.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pycparser/py3/libpy3python-pycparser-py3.global.a |18.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/query_actor/query_actor.cpp |18.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytest/py3/libpy3python-pytest-py3.global.a |19.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/certifi/libpy3library-python-certifi.global.a |20.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/libffi/libcontrib-restricted-libffi.a |20.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/requests/py3/libpy3python-requests-py3.global.a |20.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml.clib/py3/libpy3python-ruamel.yaml.clib-py3.global.a |20.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/fs/libpy3library-python-fs.global.a |20.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/jemalloc/libcpp-malloc-jemalloc.a |20.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/llhttp/libcontrib-restricted-llhttp.a |20.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml/py3/libpy3python-ruamel.yaml-py3.global.a |21.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/find_root/libpy3library-python-find_root.global.a |21.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/slide_limiter/usage/events.cpp |21.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libpy3api-protos-annotations.global.a |21.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/filelock/libpy3library-python-filelock.global.a |21.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/cores/libpy3library-python-cores.global.a |21.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/iniconfig/libpy3contrib-python-iniconfig.global.a |21.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ipdb/py3/libpy3python-ipdb-py3.global.a |21.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/importlib-resources/libpy3contrib-python-importlib-resources.global.a |20.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |20.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/allure-pytest/libpy3contrib-python-allure-pytest.global.a |21.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/slide_limiter/usage/abstract.cpp |21.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.context/libpy3contrib-python-jaraco.context.global.a |21.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/idna/py3/libpy3python-idna-py3.global.a |21.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/grpcio/py3/libpy3python-grpcio-py3.global.a |21.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/reservoir_sampling/libpy3library-python-reservoir_sampling.global.a |21.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/allure-python-commons/libpy3contrib-python-allure-python-commons.global.a |21.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libpy3columnshard-common-protos.global.a |22.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/registry/libpython-symbols-registry.a |22.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libpy3scheme-defaults-protos.global.a |22.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/python/libpy3cpython-symbols-python.global.a |22.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/filter/libpy3python-testing-filter.global.a |22.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/kubernetes/libpy3contrib-python-kubernetes.global.a |22.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/import_test/libpy3python-testing-import_test.global.a |22.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/frozenlist/libpy3contrib-python-frozenlist.global.a |22.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libpy3core-scheme-protos.global.a |22.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/yatest_common/libpy3python-testing-yatest_common.global.a |22.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/frozenlist/libpy3contrib-python-frozenlist.a |22.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyYAML/py3/libpy3python-PyYAML-py3.global.a |22.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/test_connection/test_data_streams.cpp |22.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/google-auth/py3/libpy3python-google-auth-py3.global.a |22.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/libpy3libs-config-protos.global.a |22.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libpy3core-config-protos.global.a |22.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libpy3core-protos-schemeshard.global.a |23.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/pytest/plugins/libpy3python-pytest-plugins.global.a |23.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ipython/py3/libpy3python-ipython-py3.global.a |23.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/import_tracing/lib/libpy3python-import_tracing-lib.global.a |23.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/import_tracing/constructor/libpy3python-import_tracing-constructor.global.a |23.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/test_connection/test_connection.cpp |23.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/yatest_lib/libpy3python-testing-yatest_lib.global.a |23.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_startup.cpp |23.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libpy3columnshard-engines-protos.global.a |23.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/windows/libpy3library-python-windows.global.a |23.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/pytest/libpy3library-python-pytest.global.a |23.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/strings/libpy3library-python-strings.global.a |24.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/strings/libpy3library-python-strings.a |24.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/libpy3library-python-runtime_py3.a |24.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/libpy3library-python-runtime_py3.global.a |24.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/main/libpython-runtime_py3-main.a |24.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/builtin_proto/protos_from_protoc/libpy3protobuf-builtin_proto-protos_from_protoc.global.a |24.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/typing-extensions/py3/libpy3python-typing-extensions-py3.global.a |24.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/signals/owner.cpp |24.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Lib/libpy3tools-python3-Lib.global.a |25.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Modules/_sqlite/libpy3python3-Modules-_sqlite.a |25.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/libc/libpython-symbols-libc.global.a |25.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/formats/libyt_proto-yt-formats.a |25.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/module/libpy3python-symbols-module.a |25.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/module/libpy3python-symbols-module.global.a |26.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_init_schema.cpp |26.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/undumpable/libyt-library-undumpable.a |26.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/ytprof/api/liblibrary-ytprof-api.a |26.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_monitoring.cpp |26.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/libyt_proto-yt-core.a |26.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tz_types/libyt-library-tz_types.a |26.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tvm/libyt-library-tvm.a |26.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tracing/libyt-library-tracing.a |27.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/signals/libyt-library-signals.a |27.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/re2/libyt-library-re2.a |27.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/quantile_digest/libyt-library-quantile_digest.a |27.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Modules/_sqlite/libpy3python3-Modules-_sqlite.global.a |27.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/skiff_ext/libyt-library-skiff_ext.a |27.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/psutil/py3/libpy3python-psutil-py3.global.a |27.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/stack-data/libpy3contrib-python-stack-data.global.a |28.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tenacity/py3/libpy3python-tenacity-py3.global.a |28.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/psutil/py3/libpy3python-psutil-py3.a |28.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/typeguard/libpy3contrib-python-typeguard.global.a |26.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/lib2/py/libpy3python3-lib2-py.global.a |26.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/numpy/random/libpy3py3-numpy-random.global.a |26.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/traitlets/py3/libpy3python-traitlets-py3.global.a |26.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/six/py3/libpy3python-six-py3.global.a |26.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jmespath/py3/libpy3python-jmespath-py3.global.a |26.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_store_metrics.cpp |26.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/wcwidth/py3/libpy3python-wcwidth-py3.global.a |26.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libf2c/libcontrib-libs-libf2c.a |26.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/builtin_proto/protos_from_protobuf/libpy3protobuf-builtin_proto-protos_from_protobuf.global.a |26.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/wheel/libpy3contrib-python-wheel.global.a |27.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/client/libyt-yt-client.a |27.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/websocket-client/libpy3contrib-python-websocket-client.global.a |27.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/urllib3/py3/libpy3python-urllib3-py3.global.a |27.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/yarl/libpy3contrib-python-yarl.global.a |27.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cryptography/py3/libpy3python-cryptography-py3.global.a |27.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/s3transfer/py3/libpy3python-s3transfer-py3.global.a |27.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/erasure/libyt-library-erasure.a |27.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/protos/out/out.cpp |27.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/resource_tracker/liblibrary-profiling-resource_tracker.global.a |27.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/decimal/libyt-library-decimal.a |28.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/setuptools/py3/libpy3python-setuptools-py3.global.a |28.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/integration/proto/libytflow-integration-proto.a |28.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytz/py3/libpy3python-pytz-py3.global.a |28.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ydb/py3/libpy3python-ydb-py3.global.a |28.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/resource_tracker/liblibrary-profiling-resource_tracker.a |28.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/libyt-library-profiling.a |28.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/arrow/libyt-client-arrow.a |28.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/skiff/libyt-lib-skiff.a |28.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/key_filter/libyt-lib-key_filter.a |28.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/lambda_builder/libyt-lib-lambda_builder.a |28.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/build/libyt-yt-build.a |28.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/column_converters/libyt-library-column_converters.a |28.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/hash/libyt-lib-hash.a |28.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/query_tracker_client/libyt-client-query_tracker_client.a |28.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/arrow/fbs/libclient-arrow-fbs.a |29.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/libpy3python-numpy-py3.global.a |28.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/job/libproviders-yt-job.a |29.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/qplayer/libyt-gateway-qplayer.a |29.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr4/libv1-proto_parser-antlr4.a |29.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/numpy/random/libpy3py3-numpy-random.a |29.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/exception_policy/libudf-service-exception_policy.global.a |29.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4/libv1-lexer-antlr4.a |29.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/botocore/py3/libpy3python-botocore-py3.global.a |29.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/decimal/libessentials-public-decimal.a |29.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/misc/isa_crc64/libisa-l_crc_yt_patch.a |30.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/libessentials-public-udf.a |30.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/expr_nodes/libproviders-pg-expr_nodes.a |30.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/structured_token/libproviders-common-structured_token.a |30.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/config/libessentials-providers-config.a |30.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/expr/libcommon-schema-expr.a |30.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/metrics/protos/libcommon-metrics-protos.a |31.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/antlr3/libparser-proto_ast-antlr3.a |31.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/formats/libyt-client-formats.a |31.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/https/libyt-core-https.a |31.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/libessentials-parser-pg_catalog.global.a |31.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/acl.cpp |31.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/provider/libproviders-common-provider.a |31.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/slide_limiter/service/service.cpp |31.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/operation_id_or_alias.cpp |31.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/mkql/libproviders-common-mkql.a |31.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/numeric/libyt-library-numeric.a |31.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/libyt_proto-yt-client.a |31.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/procfs/libyt-library-procfs.a |31.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/auth/libyt-library-auth.a |31.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/libessentials-parser-pg_catalog.a |31.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/ydb_serializable/ydb_serializable |31.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/noop_timestamp_provider.cpp |31.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/libyt-yt-core.global.a |31.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_upload_options.cpp |31.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/base32/libcpp-string_utils-base32.a |32.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/helpers.cpp |32.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/operation_cache.cpp |32.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libproviders-common-proto.a |32.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/table_mount_cache.cpp |32.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/timestamp_provider_base.cpp |32.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_io_options.cpp |32.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/yarl/libpy3contrib-python-yarl.a |32.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_batch.cpp |32.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/http/libyt-core-http.a |32.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/record_helpers.cpp |32.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_base.cpp |32.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/columnar_statistics.cpp |32.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/chunk_stripe_statistics.cpp |32.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/merge_table_schemas.cpp |32.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/column_sort_schema.cpp |32.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/column_rename_descriptor.cpp |32.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/helpers.cpp |32.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/check_schema_compatibility.cpp |32.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/queue_rowset.cpp |33.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/query_client/query_statistics.cpp |33.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/ut_common.cpp |33.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/common.cpp |33.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/libpy3python-numpy-py3.a |33.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |33.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/method_helpers.cpp |33.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/partition_reader.cpp |33.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v0/libproto_ast-gen-v0.a |33.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/workload.cpp |33.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/config.cpp |33.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/producer_client.cpp |33.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/node_directory.cpp |33.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/slide_limiter/usage/config.cpp |34.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/transaction.cpp |34.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/bundle_controller_client/bundle_controller_settings.cpp |34.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/shuffle_client.cpp |34.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/bundle_controller_client/bundle_controller_client.cpp |34.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_stream.cpp |34.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/security_client.cpp |34.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |34.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/sqlite3/libcontrib-libs-sqlite3.a |34.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_batch_writer.cpp |34.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/libcontrib-tools-python3.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/normalization/libcpp-unicode-normalization.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/proto/libjsonpath-rewrapper-proto.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/hyperscan/libjsonpath-rewrapper-hyperscan.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_ansi_antlr4/libproto_ast-gen-v1_ansi_antlr4.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/libminikql-jsonpath-rewrapper.a |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/config.cpp |35.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/libessentials-minikql-jsonpath.a |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/datetime/libessentials-minikql-datetime.a |35.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/parser/libminikql-jsonpath-parser.a |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rowset.cpp |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/arrow/libessentials-minikql-arrow.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/codegen/llvm16/libminikql-codegen-llvm16.a |35.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/libydb-services-ydb.a |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/query_tracker_client.cpp |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/dom/libessentials-minikql-dom.a |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/llvm16/libminikql-computation-llvm16.a |35.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ymq/libydb-services-ymq.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_lister/interface/libcore-url_lister-interface.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/user_data/libessentials-core-user_data.a |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_preprocessing/interface/libcore-url_preprocessing-interface.a |35.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/sql_types/libessentials-core-sql_types.a |35.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/mounts/libcore-services-mounts.global.a |35.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |35.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |35.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |35.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |35.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/mounts/libcore-services-mounts.a |35.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/kesus/libydb-services-kesus.a |35.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/fq/libydb-services-fq.a |35.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/interface/libqplayer-storage-interface.a |35.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |36.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/udf_resolver/libcore-qplayer-udf_resolver.a |36.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |36.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/pg_settings/libessentials-core-pg_settings.a |36.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes/libessentials-core-expr_nodes.a |36.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/histogram/libessentials-core-histogram.a |36.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_a65a4fae8912a32233240d3c51.o |36.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes_gen/libessentials-core-expr_nodes_gen.a |36.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/slide_limiter/usage/service.cpp |36.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/dq_integration/transform/libcore-dq_integration-transform.a |36.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/ast/serialize/libessentials-ast-serialize.a |36.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/cbo/libessentials-core-cbo.a |36.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v0_proto_split/libproto_ast-gen-v0_proto_split.a |36.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/libessentials-core-services.a |36.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |36.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/view/libydb-services-view.a |36.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_cluster_discovery/cluster_ordering/libservices-persqueue_cluster_discovery-cluster_ordering.a |36.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/monitoring/libydb-services-monitoring.a |36.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/extract_predicate/libessentials-core-extract_predicate.a |36.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/lib/libpy3tests-olap-lib.global.a |37.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/extractor/libext_index-metadata-extractor.global.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/stress/libpy3tests-library-stress.global.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/value/libsrc-client-value.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/protos/libapi-protos-persqueue-deprecated.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/status/libclient-types-status.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/rate_limiter/libydb-services-rate_limiter.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/operation/libclient-types-operation.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/string_utils/helpers/liblibrary-string_utils-helpers.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/fatal_error_handlers/libclient-types-fatal_error_handlers.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/codecs/libclient-topic-codecs.global.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/exceptions/libclient-types-exceptions.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/common/libimpl-ydb_internal-common.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/libsrc-client-federated_topic.a |37.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_endpoints/libclient-impl-ydb_endpoints.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/keyvalue/libydb-services-keyvalue.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/iam/libsrc-client-iam.a |37.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/db_driver_state/libimpl-ydb_internal-db_driver_state.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/ast/libyql-essentials-ast.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extensions/solomon_stats/libclient-extensions-solomon_stats.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extension_common/libsrc-client-extension_common.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/formats/libyt-library-formats.a |38.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ymq/utils.cpp |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/driver/libsrc-client-driver.a |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_scripting.cpp |38.6%| PREPARE $(LLD_ROOT-3808007503) |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/rate_limiter/libsrc-client-rate_limiter.a |38.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_object_storage.cpp |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/type_ann/libessentials-core-type_ann.a |38.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_worker.cpp |38.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/codecs.cpp |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/export/libsrc-client-export.a |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/helpers.cpp |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/libessentials-minikql-computation.a |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_export.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_import.cpp |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/impl/libclient-federated_topic-impl.a |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_operation.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_scheme.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_query.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_logstore.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_clickhouse_internal.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_cluster_discovery/counters.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_debug.cpp |40.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/deprecated/kicli/liblib-deprecated-kicli.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/events/libproviders-solomon-events.a |40.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/worker_manager/interface/libdq-worker_manager-interface.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/transform/libyql-dq-transform.a |40.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/runtime/libyql-dq-runtime.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libapi-service-protos.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/runtime/libproviders-dq-runtime.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/state/libyql-dq-state.a |40.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/fq/private_grpc.cpp |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/task_runner/libproviders-dq-task_runner.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/type_ann/libyql-dq-type_ann.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/task_runner_actor/libproviders-dq-task_runner_actor.a |40.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_table.cpp |41.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/actors/libproviders-generic-actors.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/libconnector-api-service.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/probes/liblibrary-schlab-probes.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/worker_manager/libproviders-dq-worker_manager.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/liblibrary-login-protos.a |41.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/client_impl.cpp |41.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/account_lockout/liblibrary-login-account_lockout.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/provider/libproviders-generic-provider.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/http_proxy/error/liblibrary-http_proxy-error.a |41.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/libydb-library-mkql_proto.a |40.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/cloud_events/libymq-actor-cloud_events.a |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/libydb-library-aclib.a |42.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/actors/yql_generic_token_provider.cpp |42.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/actors/yql_generic_provider_factories.cpp |42.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |41.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/actors/yql_generic_read_actor.cpp |41.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/ut_helpers/libcore-wrappers-ut_helpers.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/json/libcore-viewer-json.a |41.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |41.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_columns_resolve.cpp |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/pushdown/libproviders-generic-pushdown.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/proto/libproviders-generic-proto.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/expr_nodes/libproviders-generic-expr_nodes.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/compute/libdq-actors-compute.a |42.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |40.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/general_cache/service/libtx-general_cache-service.a |40.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/libydb-core-tx.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.global.a |41.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |41.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |41.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/lib/auth/auth_helpers.cpp |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/common/libproviders-solomon-common.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/libcpp/libgeneric-connector-libcpp.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/actors_factory/libproviders-s3-actors_factory.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/gateway/native/libpq-gateway-native.a |41.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/pq/async_io/libproviders-pq-async_io.a |41.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |41.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/expr_nodes/libproviders-pq-expr_nodes.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/common/libproviders-pq-common.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/task_meta/libproviders-pq-task_meta.a |41.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/credentials/libproviders-s3-credentials.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/libproviders-pq-proto.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/actors/libproviders-solomon-actors.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/common/libproviders-s3-common.a |42.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/re2_udf.cpp |42.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/math_udf.cpp |42.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/json2_udf.cpp |42.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/digest_udf.cpp |42.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_identificators.cpp |42.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service.cpp |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/actors/libproviders-s3-actors.a |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/invoke_builtins/llvm16/libminikql-invoke_builtins-llvm16.a |44.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_dummy.cpp |44.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_cluster_discovery/grpc_service.cpp |44.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/opt/libyql-dq-opt.a |45.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/pq/async_io/dq_pq_write_actor.cpp |46.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ymq/grpc_service.cpp |47.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.cpp |47.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/generic/actors/yql_generic_lookup_actor.cpp |47.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.cpp |48.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/persqueue_utils.cpp |49.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/initializer.cpp |49.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/manager.cpp |49.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/access.cpp |49.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/secret.cpp |49.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/fetcher.cpp |49.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/fetcher.cpp |49.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.cpp |50.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/direct_read_actor.cpp |50.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/fq/ydb_over_fq.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/fq/grpc_service.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/update_offsets_in_transaction_actor.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/snapshot.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/object.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/checker_secret.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cansel_build_index.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/read_init_auth_actor.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/manager.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ymq/ymq_proxy.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/lib/actors/pq_schema_actor.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/topic.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/kesus/grpc_service.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/checker_access.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/read_session_actor.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/snapshot.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/behaviour.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/read_info_actor.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/initializer.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/schema_actors.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_billing_helpers.cpp |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/services_initializer.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/partition_writer_cache_actor.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/distributed_commit_helper.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/partition_actor.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/grpc_pq_write.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_impl.cpp |51.2%| PREPARE $(OS_SDK_ROOT-sbr:243881345) |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/write_session_actor.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/commit_offset_actor.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_scan.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/grpc_pq_read.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_common.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_index.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/object.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__forget.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/grpc_pq_schema.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/partition_writer.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_fs.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/common/kqp_ut_common.cpp |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/libyt-yt-core.a |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__background_compaction.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__find_subdomain_path_id.cpp |50.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |50.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__init_populator.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__data_erasure_manager.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__backup_collection_common.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__list_users.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__list.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_cdc_stream.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__init.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__clean_pathes.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_incremental_backup_collection.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_bsv.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_continuous_backup.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__conditional_erase.cpp |50.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp |50.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_fs.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log_fragment.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_continuous_backup.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__background_cleaning.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_table.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/pq/async_io/dq_pq_meta_extractor.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/pq/async_io/probes.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_table.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_solomon.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_sequence.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_effective_acl.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__borrowed_compaction.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__notify.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/unicode_udf.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_data_source.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_index.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_system_names.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__init_root.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__login.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_data_source.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_continuous_backup.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/string_udf.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_bsv.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_login_helper.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path_element.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/datetime2_udf.cpp |50.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp |50.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_build_index.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/user_attributes.cpp |50.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_xxport__helpers.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore_incremental_backup.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_types.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_types.h_serialized.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_blob_depot.cpp |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/serializations/libproviders-s3-serializations.a |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp |51.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/libstorage-optimizer-tiling.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/events/libproviders-s3-events.a |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__fix_bad_paths.cpp |51.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/cli_config_base/libcore-driver_lib-cli_config_base.a |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp |51.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/metadata/libcore-client-metadata.a |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__login_finalize.cpp |50.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blockstore/core/libcore-blockstore-core.a |50.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__make_access_database_no_inheritable.cpp |50.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_cdc_stream.cpp |50.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_pq.cpp |50.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_backup_collection.cpp |50.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__init_schema.cpp |50.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |50.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |50.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/server/libcore-client-server.a |50.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_bsv.cpp |50.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cancel_tx.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_apply_build_index.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/ic_nodes_cache_service.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_sequence.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_change_path_state.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/http_ping.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_view.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgreader.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgimpl.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup_collection.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_consistent_copy_tables.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__op_traits.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmem.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_solomon.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_backup_collection.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_cdc_stream.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_assign_bsv.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sysview.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_lock.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_index.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogformat.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/compile_result.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_fs.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/compile_context.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_lock.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogneighbors.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgwriter.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_indexed_table.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_replication.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/db_key_resolver.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_resource_pool.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_bg_tasks__list.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_view.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_cdc_stream.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_table.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_configdummy.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import_scheme_query_executor.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sysview.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_sequence.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_resource_pool.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_self_pinger.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_rtmr.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_initiate_build_index.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_index.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_just_reject.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/minikql_compile/mkql_compile_service.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_tables.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_memory_changes.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__publish_to_scheme_board.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table_index.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log.cpp |51.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |50.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/libydb-core-cms.a |50.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_table.cpp |50.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/util/libcms-console-util.a |50.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_restore_backup_collection.cpp |50.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/program/libcore-tx-program.a |50.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/libcore-cms-console.a |50.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |50.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/validators/libcms-console-validators.a |50.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/jsonpath/libproto_ast-gen-jsonpath.a |50.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/libcore-config-init.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/control/lib/libcore-control-lib.a |50.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/discovery/libydb-core-discovery.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/debug/libydb-core-debug.a |50.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp |50.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor_composite/service/libtx-conveyor_composite-service.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/libydb-core-cms.global.a |50.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/docapi/libydb-core-docapi.a |49.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor_composite/usage/libtx-conveyor_composite-usage.a |50.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |50.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_db.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogrecovery.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_schema.cpp |51.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/control/libydb-core-control.a |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmultiput_actor.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/services.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/services.h_serialized.cpp |51.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/base_utils/libbase_utils.a |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogreader.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_backup.cpp |51.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdsk.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/audit_log.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_utils.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_overload_handler.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__create.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_syncloghttp.cpp |51.6%| PREPARE $(CLANG_FORMAT-3855767795) |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_tracker.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmovedpatch_actor.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_info_types.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_part.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/grpc_proxy_status.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_committer.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__forget.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_task.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxyobtain.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__cancel.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__forget.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_firstrun.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__cancel.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export_flow_proposals.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__get.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxywrite.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_access_database.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata_proxy.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/tiling.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_fill_node.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_domain_links.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_config_base/config_base.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_state.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__get.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog_private_events.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_recovery.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_shred.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_console.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_tx_request.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/common/columnshard.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__list.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_hive_create_tablet.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_shard_deleter.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_resolve_node.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_persqueue.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_compactionstate.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_scheme_initroot.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_path.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_svp_migration.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__unmark_restore_tables.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_propagator.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_scheme_request.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/console_audit.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_scheduler.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__list.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__get.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import_getters.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_types.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_test_shard_request.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/tx_processor.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__create.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/grpc_library_helper.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/control/immediate_control_board_actor.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_drain_node.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_metacache.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_block_and_get.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_tablet_counters.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_http_server.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_tablet_state.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/conveyor_composite/service/counters.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_node_registration.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_blobstorage_config.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_mon_dbmainpage.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor_composite/usage/common.h_serialized.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_cms.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_proxy.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_loggedrec.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_monactors.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/node_checkers.h_serialized.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_actor.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_read_session_info.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/node_checkers.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/conveyor_composite/service/common.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfull.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/discovery/discovery.cpp |51.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfullhandler.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_data_source.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_source_factory.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/validation_functions.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_ic_debug.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_source_builder.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_store_walle_task.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_load_state.cpp |51.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__init_scheme.cpp |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/MarkupSafe/py3/libpy3python-MarkupSafe-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/composite_serial/libarrow-accessor-composite_serial.a |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/grpc_server.cpp |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/object_storage/libcore-external_sources-object_storage.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/erasure/libydb-core-erasure.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/abstract/libarrow-accessor-abstract.a |51.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/version/libversion.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/filestore/core/libcore-filestore-core.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/object_storage/inference/libexternal_sources-object_storage-inference.a |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schemaless_row_reorderer.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/worker.cpp |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libcore-config-protos.a |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_process_notification.cpp |51.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/run/librun.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/composite/liblibrary-formats-arrow-accessor-composite.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/common/liblibrary-formats-arrow-accessor-common.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/expr_nodes/libproviders-s3-expr_nodes.a |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_consumer.cpp |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sparsed/libarrow-accessor-sparsed.a |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/base_utils/node_by_host.cpp |51.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/minikql/minikql_engine_host.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/cli_utils/melancholic_gopher.cpp |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/dictionary/libformats-arrow-dictionary.a |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_remove_request.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/downtime.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_log_and_send.cpp |51.9%| PREPARE $(FLAKE8_PY3-715603131) |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_get_log_tail.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/util.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/info_collector.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_cache.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/workers_pool.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_dispatcher_proxy.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/init/init_noop.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_reject_notification.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__configure.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/erasure_checkers.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_list_tasks_adapter.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_config_subscription.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/api_adapters.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_update_downtimes.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/hulldb_bulksst_add.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__cleanup_subscriptions.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/sentinel.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/service.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_check_task_adapter.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cluster_info.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_init_scheme.cpp |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/tx_reader/libtx-columnshard-tx_reader.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/dictionary/libarrow-accessor-dictionary.global.a |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_output.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_config.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schemaless_dynamic_table_writer.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__add_config_subscription.cpp |52.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/public/libtx-coordinator-public.a |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/log_settings_configurator.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_remove_permissions.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/config_helpers.cpp |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/general_cache/source/libtx-general_cache-source.a |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_last_provided_config.cpp |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/data_events/common/libtx-data_events-common.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/plain/libarrow-accessor-plain.global.a |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/base_utils/format_util.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_create_task_adapter.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_log_cleanup.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unordered_schemaful_reader.cpp |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/dictionary/libarrow-accessor-dictionary.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyJWT/py3/libpy3python-PyJWT-py3.global.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/protos/libtx-coordinator-protos.a |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_buffer.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/timestamped_schema_helpers.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/spec_patch.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/delegating_client.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_tenant.cpp |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sparsed/libarrow-accessor-sparsed.global.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/hash/libformats-arrow-hash.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/common/libformats-arrow-common.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sub_columns/libarrow-accessor-sub_columns.global.a |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/logger.cpp |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/proto/libparser-pg_catalog-proto.a |51.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/limiter/grouped_memory/service/liblimiter-grouped_memory-service.a |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_pool_state.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/util/config_index.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_dispatcher.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/change_record_body_serializer.cpp |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/reader/libformats-arrow-reader.a |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_config_subscriptions.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/backup_restore_traits.h_serialized.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_api_handler.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/backup_restore_traits.cpp |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sub_columns/libarrow-accessor-sub_columns.a |51.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.a |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/feature_flags_configurator.cpp |51.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/formats/arrow/libcore-formats-arrow.a |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_remove_expired_notifications.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__alter_tenant.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/external_sources/object_storage.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_configs_manager.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__set_config.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_configs_provider.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__get_yaml_metadata.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/arrow_filter.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/arrow_batch_builder.cpp |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__revert_pool_state.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/net_classifier_updater.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/jaeger_tracing_configurator.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_tenant_failed.cpp |51.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_configs_subscriber.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/engine/minikql/flat_local_tx_factory.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/usage/events.cpp |52.1%| PREPARE $(TEST_TOOL_HOST-sbr:9029509511) |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_confirmed_subdomain.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_store_permissions.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__drop_yaml_config.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__get_yaml_config.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/key_conflicts.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/extstorage_usage_config.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/http.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_handshake.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/validator.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/incr_restore_helpers.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/export_iface.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/base_utils/format_info.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_computational_units.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/usage/config.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_remove_task_adapter.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/optimizer.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/range_ops.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__log_cleanup.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_tenant_done.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_tenant_pool_config.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/service_initializer.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/init/init.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_tenant_state.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__replace_config_subscriptions.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_configuration_info_collector.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_subdomain_key.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_remove_task.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__replace_yaml_config.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__toggle_config_validator.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__get_log_tail.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/logger.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/usage/common.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/probes.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/process.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_update_config.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__create_tenant.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/modifications_validator.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__load_state.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/init/dummy.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/scope.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/immediate_controls_configurator.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/version/version.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/scan_common.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp_delete_rows.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/http.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/validator_bootstrap.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_tenants_manager.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/export_s3_buffer.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/category.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/events.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/registry.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/usage/service.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/core_validators.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/init.h_serialized.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/type_serialization.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/validator_nameservice.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/export_scan.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp_effects.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_cdc_stream_unit.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender_table_base.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/manager.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_locks_db.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_write_unit.cpp |52.3%| [CP] {default-linux-x86_64, relwithdebinfo} $(B)/yql/essentials/minikql/comp_nodes/llvm16/yql/essentials/minikql/computation/mkql_computation_node_codegen.h |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/backup_unit.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/auto_config_initializer.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_record.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_cms.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmd_config.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_actorsystem_perftest.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/kmeans_helper.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_failpoints.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_distributed_erase_tx_unit.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender_incr_restore.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/key_validator.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_write_tx_in_rs_unit.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/config_helpers.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/load_and_wait_in_rs_unit.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_loans.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/finish_propose_unit.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/follower_edge.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/alter_cdc_stream_unit.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_distributed_erase_tx_unit.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_admin.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_s3_download.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/remove_locks.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_initroot.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/make_scan_snapshot_unit.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_scheme_tx_unit.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/datashard_s3_upload.h_serialized.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/run.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_disk.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/general_cache/service/service.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/incr_restore_scan.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_debug.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/config_parser.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/complete_data_tx_unit.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/protect_scheme_echoes_unit.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_persistent_snapshot_unit.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/config.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_table_unit.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_user_table.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/load_tx_details_unit.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_data_tx_in_rs_unit.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_exchange_split.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/upload_stats.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__write.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_data_tx_unit.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/import_s3.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/remove_lock_change_records.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_config.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_node.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/make_snapshot_unit.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/execution_unit_kind.h_serialized.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/factories.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/read_table_scan.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_distributed_erase_tx_out_rs_unit.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_scheme_tx_in_rs_unit.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_cluster_discovery.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_fakeinitshard.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_stress.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/export_s3_uploader.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/operation.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_distributed_erase_tx_in_rs_unit.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__engine_host.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/restore_unit.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/volatile_tx_mon.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/receive_snapshot_unit.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/move_index_unit.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/read_op_unit.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_and_send_out_rs_unit.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/stream_scan_common.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/main.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_write_unit.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__conditional_erase_rows.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/volatile_tx.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_bs.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/finalize_plan_tx_unit.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/permutations.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/remove_schema_snapshots.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_and_send_write_out_rs_unit.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/converter.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_console.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/size_calcer.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/special_keys.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/sample_k.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/process_columns.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/plan_queue_unit.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/secondary_index.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_direct_erase.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/change_record_cdc_serializer.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_validate_config.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender_cdc_stream.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_root.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_change_receiving.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/prefix_kmeans.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_commit_writes_tx_unit.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/memory_state_migration.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/change_exchange.h_serialized.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/change_exchange.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_cache_append.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/initiate_build_index_unit.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/finalize_build_index_unit.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/conflicts_cache.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/wait_for_plan_unit.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/load_write_details_unit.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_genconfig.cpp |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/long_tx_service/public/libtx-long_tx_service-public.a |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/receive_snapshot_cleanup_unit.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_change_sender_activation.cpp |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/antlr4/libparser-proto_ast-antlr4.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohttp/libpy3contrib-python-aiohttp.a |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/reshuffle_kmeans.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/erase_rows_condition.cpp |52.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/priorities/usage/libtx-priorities-usage.a |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_read_unit.cpp |52.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/priorities/service/libtx-priorities-service.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiosignal/libpy3contrib-python-aiosignal.global.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/common/libtx-replication-common.a |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__compaction.cpp |52.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/rows/libformats-arrow-rows.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.global.a |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/proto/libproviders-ydb-proto.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/gateway/libproviders-solomon-gateway.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/accessservice/libclient-nc_private-accessservice.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/jemalloc/libcontrib-libs-jemalloc.a |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/abstract.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/aggr_common.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/assign_internal.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/visitor.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/reserve.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/graph_optimization.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/header.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/stream_logic.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/aggr_common.h_serialized.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/functions.cpp |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/finish_propose_write_unit.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/abstract.h_serialized.cpp |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__mon_reset_schema_version.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/assign_const.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/aggr_keys.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/filter.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/collection.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/execution.h_serialized.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_direct_transaction.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/projection.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_s3_upload_rows.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_s3_uploads.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/index.cpp |52.7%| PREPARE $(GDB) |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/save_load/libformats-arrow-save_load.a |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/original.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/graph_execute.cpp |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_kqp_data_tx_in_rs_unit.cpp |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/switch/libformats-arrow-switch.a |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_active_transaction.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/priorities/service/counters.cpp |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/provider/libproviders-solomon-provider.a |52.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.a |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/custom_registry.cpp |52.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/deprecated/client/liblib-deprecated-client.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/transformer/libformats-arrow-transformer.a |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/priorities/usage/events.cpp |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/splitter/libformats-arrow-splitter.a |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_change_sending.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/priorities/usage/abstract.cpp |52.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_split_src.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/serializer/stream.cpp |52.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_pipeline.cpp |52.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_repl_offsets.cpp |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/proto/libproviders-solomon-proto.a |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/config.cpp |52.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_snapshot_tx_unit.cpp |52.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/serializer/parsing.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/serializer/utils.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/table_mount_cache_detail.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/config.cpp |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/libydb-core-formats.a |52.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_kqp_scan_tx_unit.cpp |53.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.global.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/expr_nodes/libproviders-ydb-expr_nodes.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/audit/libfq-libs-audit.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing_common/libfq-libs-checkpointing_common.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/events/liblibs-checkpoint_storage-events.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/solomon_accessor/grpc/libsolomon-solomon_accessor-grpc.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/proto/liblibs-checkpoint_storage-proto.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/solomon_accessor/client/libsolomon-solomon_accessor-client.a |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_s3_downloads.cpp |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actor_log/libyql-utils-actor_log.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/public/ydb_issue/libyql-public-ydb_issue.a |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_subdomain_path_id.cpp |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/ydb_dump/libcore-io_formats-ydb_dump.a |52.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/compute/ydb/liblibs-compute-ydb.a |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/service/counters.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/service/ids.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/service/process.cpp |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/cloud_audit/libfq-libs-cloud_audit.a |52.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_write_operation.cpp |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/libfq-libs-checkpoint_storage.a |52.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_distributed_erase.cpp |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/control_plane/libcompute-ydb-control_plane.a |53.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__get_state_tx.cpp |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_split_dst.cpp |52.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/compute/common/liblibs-compute-common.a |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_data_tx_unit.cpp |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/read_table_scan_unit.cpp |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/provider/libproviders-ydb-provider.a |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/task.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/common.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/status_channel.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__init.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/config.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_dep_tracker.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/control_plane_storage_counters.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_overload.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/probes.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_data_tx_unit.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/util.cpp |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_outreadset.cpp |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/move_table_unit.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/compute/common/pinger.cpp |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_scheme_tx_unit.cpp |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_write_unit.cpp |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/libfq-libs-config.a |53.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/service/allocation.h_serialized.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_client.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_common_upload.cpp |53.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/general_cache/usage/libtx-general_cache-usage.a |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__read_columns.cpp |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/lexer_common/libessentials-parser-lexer_common.a |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__plan_step.cpp |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actors/libyql-utils-actors.a |53.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/completed_operations_unit.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/recompute_kmeans.cpp |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/liblibs-config-protos.a |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_cdc_stream_unit.cpp |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_config/libfq-libs-control_plane_config.a |53.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/ydb_schema_query_actor.h_serialized.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_repl_apply.cpp |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/events/liblibs-control_plane_storage-events.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/events/liblibs-control_plane_proxy-events.a |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/control_plane_storage_requester_actor.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/export_common.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_schema_snapshots.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/general_cache/usage/events.cpp |53.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execution_unit.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/ydb_schema_query_actor.cpp |53.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/base/libpublic-lib-base.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/libclient-yc_public-iam.a |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__stats.cpp |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/audit/events/liblibs-audit-events.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/db_schema/libfq-libs-db_schema.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clapack/part2/liblibs-clapack-part2.a |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__data_cleanup.cpp |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/plan/libyql-utils-plan.a |53.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor/usage/libtx-conveyor-usage.a |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/response_tasks.cpp |53.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/libclient-yc_private-resourcemanager.a |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_persistent_snapshot_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/wire_protocol.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_server.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__monitoring.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/config.cpp |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/libclient-yc_private-iam.a |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__op_rows.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__migrate_schemeshard.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__schema_changed.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/probes.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/complete_write_unit.cpp |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/lwtrace_probes/libcore-blobstorage-lwtrace_probes.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Jinja2/py3/libpy3python-Jinja2-py3.global.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/gateway/libfq-libs-gateway.a |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__snapshot_txs.cpp |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/events/libfq-libs-events.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohttp/libpy3contrib-python-aiohttp.global.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/operation/libclient-yc_private-operation.a |53.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |53.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/graph_params/proto/liblibs-graph_params-proto.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/hmac/libfq-libs-hmac.a |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/libclient-yc_private-accessservice.a |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_user_db.cpp |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Pygments/py3/libpy3python-Pygments-py3.global.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/libclient-yc_public-common.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/grpc/libfq-libs-grpc.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/metrics/libfq-libs-metrics.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/events/libclient-yc_public-events.a |53.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/libclient-yc_private-servicecontrol.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/events/liblibs-quota_manager-events.a |53.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/lib/libcommon-unicode_base-lib.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/libfq-libs-quota_manager.a |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_commit_writes_tx_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/topic_reader.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__read_iterator.cpp |53.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/compute/ydb/synchronization_service/libcompute-ydb-synchronization_service.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/libmath_udf.global.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/private_client/libfq-libs-private_client.a |53.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/service/worker.h_serialized.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__store_table_path.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/ydb/status_tracker_actor.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_incremental_restore_src_unit.cpp |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/protos/libolap-bg_tasks-protos.a |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/local_kmeans.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/wait_for_stream_clearance_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__store_scan_state.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cleanup_uncommitted.cpp |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/libunicode_udf.global.a |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cleanup_borrowed.cpp |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/string/libstring_udf.global.a |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_volatile_snapshot_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__column_stats.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_scheme_tx_out_rs_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__propose_tx_base.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__s3_download_txs.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_distributed_erase_tx_unit.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/kikimr_services_initializers.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_table_unit.cpp |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1/libproto_ast-gen-v1.a |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_collector.cpp |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/libfq-libs-db_id_async_resolver_impl.a |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_transaction_actor_sql.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_trans_queue.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cleanup_tx.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/kafka_proxy/kafka.h_serialized.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp_compute.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_direct_upload.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/direct_tx_unit.cpp |53.6%| PREPARE $(CLANG-1922233694) |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_consumer_protocol.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_records.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/ydb/ydb_connector_actor.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_messages_int.cpp |53.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/stress_tool/ydb_stress_tool |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_volatile_snapshot_unit.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cancel_tx_proposal.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_messages.cpp |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/out/libapi-protos-out.a |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/ydb/resources_cleaner_actor.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/ydb/initializer_actor.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__progress_tx.cpp |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/proto/liblibs-quota_manager-proto.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libapi-protos-annotations.a |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/ydb/executer_actor.cpp |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/events/liblibs-rate_limiter-events.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/utils/liblibs-rate_limiter-utils.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/control_plane_service/liblibs-rate_limiter-control_plane_service.a |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__progress_resend_rs.cpp |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/session.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/quoter_service/liblibs-rate_limiter-quoter_service.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libssh2/libcontrib-libs-libssh2.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/liburing/libcontrib-libs-liburing.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/linuxvdso/libcontrib-libs-linuxvdso.a |53.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/ydb/actors_factory.cpp |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tenant.cpp |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_commit_writes_tx_unit.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IRReader/libllvm16-lib-IRReader.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_snapshots.cpp |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_snapshot_tx_unit.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/BinaryFormat/libllvm16-lib-BinaryFormat.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__readset.cpp |53.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |53.7%| PREPARE $(CLANG-874354456) |53.7%| PREPARE $(PYTHON) |53.7%| PREPARE $(CLANG18-1866954364) |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/common_client/libsrc-client-common_client.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitcode/Writer/liblib-Bitcode-Writer.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libxml/libcontrib-libs-libxml.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/command_base/libydb_cli_command_base.a |53.8%| [CP] {default-linux-x86_64, relwithdebinfo} $(B)/common_test.context |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/common/run_actor_params.cpp |53.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/library/cpp/svnversion/svn_interface.c |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/request_validators.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_collector_base.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/range_helpers/libproviders-s3-range_helpers.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/CodeView/liblib-DebugInfo-CodeView.a |53.8%| [CF] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/build_info/build_info.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/tasks/libyql-dq-tasks.a |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_data_tx_out_rs_unit.cpp |53.8%| [CF] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/build_info/sandbox.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/MSF/liblib-DebugInfo-MSF.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/libapi-grpc-draft.a |53.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/build_info/build_info.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/AsmPrinter/liblib-CodeGen-AsmPrinter.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/Symbolize/liblib-DebugInfo-Symbolize.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/Orc/Shared/libExecutionEngine-Orc-Shared.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/MCJIT/liblib-ExecutionEngine-MCJIT.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/ydb/libfq-libs-ydb.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Demangle/libllvm16-lib-Demangle.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/libapi-grpc.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-s3/librestricted-aws-aws-c-s3.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/cdc_stream_heartbeat.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/libllvm16-lib-ExecutionEngine.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__object_storage_listing.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/PerfJITEvents/liblib-ExecutionEngine-PerfJITEvents.a |53.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IRPrinter/libllvm16-lib-IRPrinter.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/experimental/libpublic-lib-experimental.a |53.9%| [BI] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/build_info/buildinfo_data.h |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/protos/libcore-graph-protos.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/MCDisassembler/liblib-MC-MCDisassembler.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/DWARF/liblib-DebugInfo-DWARF.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/alter_table_unit.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/PDB/liblib-DebugInfo-PDB.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Linker/libllvm16-lib-Linker.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Frontend/OpenMP/liblib-Frontend-OpenMP.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/yql_parser/libydb_cli-common-yql_parser.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/service/libcore-graph-service.a |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/ydb/stopper_actor.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/RuntimeDyld/liblib-ExecutionEngine-RuntimeDyld.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/json_value/libpublic-lib-json_value.a |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_collector_cdc_stream.cpp |53.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/library/cpp/build_info/build_info_static.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitcode/Reader/liblib-Bitcode-Reader.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_index_notice_unit.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/fq/libpublic-lib-fq.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/TargetInfo/libTarget-X86-TargetInfo.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Remarks/libllvm16-lib-Remarks.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/AsmParser/libTarget-X86-AsmParser.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/libcore-grpc_services-cancelation.a |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |53.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/library/cpp/svnversion/svnversion.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/CFGuard/liblib-Transforms-CFGuard.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/TargetParser/libllvm16-lib-TargetParser.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/AggressiveInstCombine/liblib-Transforms-AggressiveInstCombine.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/Disassembler/libTarget-X86-Disassembler.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/TextAPI/libllvm16-lib-TextAPI.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/Orc/TargetProcess/libExecutionEngine-Orc-TargetProcess.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ProfileData/libllvm16-lib-ProfileData.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sharding/hash.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sharding/unboxed_reader.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/libllvm16-lib-MC.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/ObjCARC/liblib-Transforms-ObjCARC.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Coroutines/liblib-Transforms-Coroutines.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nayuki_md5/libcontrib-libs-nayuki_md5.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lz4/libcontrib-libs-lz4.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/MCTargetDesc/libTarget-X86-MCTargetDesc.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lzma/libcontrib-libs-lzma.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/worker.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nghttp2/libcontrib-libs-nghttp2.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nghttp3/libcontrib-libs-nghttp3.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openldap/libraries/liblber/libopenldap-libraries-liblber.a |54.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/SelectionDAG/liblib-CodeGen-SelectionDAG.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openldap/libcontrib-libs-openldap.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/ngtcp2/libcontrib-libs-ngtcp2.a |54.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/build_info/sandbox.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/pcre16/liblibs-pcre-pcre16.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/pcre32/liblibs-pcre-pcre32.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender_async_index.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/libcontrib-libs-pcre.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Crypto/liblibs-poco-Crypto.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/JSON/liblibs-poco-JSON.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/t1ha/libcontrib-libs-t1ha.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Util/liblibs-poco-Util.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/snappy/libcontrib-libs-snappy.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/sasl/libcontrib-libs-sasl.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/simdjson/libcontrib-libs-simdjson.a |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/utils.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/libcontrib-libs-opentelemetry-proto.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Instrumentation/liblib-Transforms-Instrumentation.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/malloc_extension/liblibs-tcmalloc-malloc_extension.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/no_percpu_cache/liblibs-tcmalloc-no_percpu_cache.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Support/libllvm16-lib-Support.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/re2/libcontrib-libs-re2.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/protos/libgrpc_services-cancelation-protos.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/xxhash/libcontrib-libs-xxhash.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zlib/libcontrib-libs-zlib.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/util/libydb_cli-dump-util.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yaml/libcontrib-libs-yaml.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Foundation/liblibs-poco-Foundation.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_input_producer.cpp |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_and_wait_dependencies_unit.cpp |53.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/health_check/libydb-core-health_check.a |53.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/InstCombine/liblib-Transforms-InstCombine.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yaml-cpp/libcontrib-libs-yaml-cpp.a |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd06/libcontrib-libs-zstd06.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-event-stream/librestricted-aws-aws-c-event-stream.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-auth/librestricted-aws-aws-c-auth.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-cal/librestricted-aws-aws-c-cal.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd/libcontrib-libs-zstd.a |54.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-io/librestricted-aws-aws-c-io.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-http/librestricted-aws-aws-c-http.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/cell_maker/libcore-io_formats-cell_maker.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Analysis/libllvm16-lib-Analysis.a |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/counters/counters.cpp |54.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/NetSSL_OpenSSL/liblibs-poco-NetSSL_OpenSSL.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openssl/libcontrib-libs-openssl.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/no_percpu_cache/liblibs-tcmalloc-no_percpu_cache.global.a |54.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/mock/libfq-libs-mock.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/libcontrib-restricted-abseil-cpp.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/XML/liblibs-poco-XML.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/formats/arrow/serializer/abstract.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/ydb/ydb_run_actor.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/exceptions_mapping.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/s2n/librestricted-aws-s2n.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_collector_async_index.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/libcontrib-restricted-abseil-cpp-tstring.a |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/metrics_actor.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Utils/liblib-Transforms-Utils.a |54.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |54.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |54.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cffi/py3/libpy3python-cffi-py3.global.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/formats/arrow/program/execution.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/formats/arrow/arrow_helpers.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/ydb/result_writer_actor.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_quotas.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/table_writer.cpp |54.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_ping.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.a |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cleanup_in_rs.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/rate_limiter_resources.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/in_memory_control_plane_storage.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/libapi-protos.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/IPO/liblib-Transforms-IPO.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_compute_database.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/json_change_record.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Scalar/liblib-Transforms-Scalar.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__kqp_scan.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/audit_dml_operations.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/priorities/service/manager.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/audit_log.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/limiter/grouped_memory/service/manager.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__compact_borrowed.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/grpc_helper.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/grpc_mon.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/grpc_publisher_service_actor.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/txn_actor_response_builder.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__s3_upload_txs.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/cdc_stream_scan.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/general_cache/usage/config.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/ydb/finalizer_actor.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/actor.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/ydb/synchronization_service/synchronization_service.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/MCParser/liblib-MC-MCParser.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_result_write.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor/usage/events.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_bindings.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/compute/common/utils.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_kqp_data_tx_unit.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/validators.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/constructor.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_get.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/priorities/usage/config.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/libydb-cpp-sdk-client-topic.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/libllvm16-lib-CodeGen.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/yson_value/libpublic-lib-yson_value.a |54.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/jaeger_tracing/libydb-core-jaeger_tracing.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/src/common.h_serialized.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/rpc_calls.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/nodes_health_check.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_proxy/control_plane_proxy.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/libfq-libs-protos.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/client/msgbus_client.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor/usage/config.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_write_out_rs_unit.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/base_table_writer.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/service.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/src/actors.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/limiter/grouped_memory/service/actor.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/jaeger_tracing/sampling_throttling_control_internals.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/jaeger_tracing/throttler.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/jaeger_tracing/sampling_throttling_control.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/jaeger_tracing/request_discriminator.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/query_utils.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_handshake_actor.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_metrics_actor.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_view.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/health/health.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_replication.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/base/msgbus.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_create_partitions_actor.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/general_cache/usage/service.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/formats/arrow/serializer/native.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_common/rpc_common_kqp_session.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_kqp_tx.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_find_coordinator_actor.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor/usage/abstract.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_create_topics_actor.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_auth_actor.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_transactional_producers_initializers.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/client/grpc_client.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor/usage/service.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/src/kqp_runner.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_rename_tables.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/resolve_local_db_table.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_rate_limiter_api.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_metrics.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/src/ydb_setup.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_kqp_data_tx_out_rs_unit.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/logs/log.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_remove_directory.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_backup.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_scheme_base.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_alter_coordination_node.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_clusters_updater_actor.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_metadata_actor.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_copy_table.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_fetch_actor.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/audit_logins.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_describe_configs_actor.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_list_offsets_actor.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/atomic/librestricted-boost-atomic.a |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/actorsys_test/testactorsys.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_topic_offsets_actor.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_coordination_node.cpp |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/container/librestricted-boost-container.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/chrono/librestricted-boost-chrono.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/impl_common/libboost-context-impl_common.a |54.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/exception/librestricted-boost-exception.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/coroutine/librestricted-boost-coroutine.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/operation_helpers.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/iostreams/librestricted-boost-iostreams.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_commit_actor.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/random/librestricted-boost-random.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/batch/libkqp-common-batch.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_transaction_actor.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/graph/librestricted-boost-graph.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/events.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_balancer_actor.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tablet.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/regex/librestricted-boost-regex.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_runtime.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/program_options/librestricted-boost-program_options.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_object_storage.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_produce_actor.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_resource_tree.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_fetch_actor.cpp |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/serialization/librestricted-boost-serialization.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/rate_accounting.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tablet_html.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tablet_db.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_config_get.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/proxy/events.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tablet_impl.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_dummy.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_init_schema.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_add.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/random.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_delete.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/proxy/proxy_actor.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_update.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_describe.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_acquire.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_create.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_data.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_delete.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_collect_operation.cpp |54.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ut/ydb-core-security-ut |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_describe.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_simple_db_flat.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_helpers.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/update.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_stored_state_data.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_consumer_groups_metadata_initializers.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/update.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_release.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_actor.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_api_versions_actor.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_read_session_actor.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_update.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_session_attach.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_create_coordination_node.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_session_destroy.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/schema.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_sessions_describe.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_session_detach.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/actorsys_test/single_thread_ic_mock.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/probes.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/local_rate_limiter.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/protos/libcore-keyvalue-protos.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/dragonbox/libdragonbox.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/cityhash-1.0.2/libcontrib-restricted-cityhash-1.0.2.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/thread/librestricted-boost-thread.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/2d_array/libcpp-containers-2d_array.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/archive/liblibrary-cpp-archive.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/compact_vector/libcpp-containers-compact_vector.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_stream_execute_scan_query.cpp |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/binsaver/liblibrary-cpp-binsaver.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_balance_actor_sql.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/comptrie/libcpp-containers-comptrie.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/fastlz/libblockcodecs-codecs-fastlz.global.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/adapters/issue/libcpp-adapters-issue.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/common_client/impl/libclient-common_client-impl.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/shard/protos/libgraph-shard-protos.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/buffer/libkqp-common-buffer.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/update.cpp |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/lz4/libblockcodecs-codecs-lz4.global.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/snappy/libblockcodecs-codecs-snappy.global.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/lzma/libblockcodecs-codecs-lzma.global.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zstd/libblockcodecs-codecs-zstd.global.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/core/libcpp-blockcodecs-core.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/liblibrary-cpp-blockcodecs.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cache/liblibrary-cpp-cache.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/thrift/libcontrib-restricted-thrift.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/case_insensitive_string/liblibrary-cpp-case_insensitive_string.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cgiparam/liblibrary-cpp-cgiparam.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_external_data_source.cpp |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/charset/liblibrary-cpp-charset.a |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/common/compilation/libkqp-common-compilation.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/keep_alive.cpp |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-crt-cpp/librestricted-aws-aws-crt-cpp.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/codecs/greedy_dict/libcpp-codecs-greedy_dict.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/colorizer/liblibrary-cpp-colorizer.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/bit_io/liblibrary-cpp-bit_io.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/comptable/liblibrary-cpp-comptable.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_copy_tables.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/atomizer/libcpp-containers-atomizer.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/charset/lite/libcpp-charset-lite.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/brotli/libblockcodecs-codecs-brotli.global.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/config/liblibrary-cpp-config.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/legacy_zstd06/libblockcodecs-codecs-legacy_zstd06.global.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/intrusive_rb_tree/libcpp-containers-intrusive_rb_tree.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/paged_vector/libcpp-containers-paged_vector.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/enum_codegen/libcpp-deprecated-enum_codegen.a |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/common/simple/libkqp-common-simple.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zlib/libblockcodecs-codecs-zlib.global.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/kmp/libcpp-deprecated-kmp.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/diff/liblibrary-cpp-diff.a |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/common/shutdown/libkqp-common-shutdown.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/googletest/googletest/librestricted-googletest-googletest.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/scheme_types/libpublic-lib-scheme_types.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/value/libpublic-lib-value.a |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/executer_actor/shards_resolver/libkqp-executer_actor-shards_resolver.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_connection.cpp |54.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |54.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/common/events/libkqp-common-events.a |54.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/ref/libinternal-proxies-ref.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/avx2/libinternal-proxies-avx2.a |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/shutdown/state.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/sse2/libinternal-proxies-sse2.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/ssse3/libinternal-proxies-ssse3.a |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/shutdown/events.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/murmur/libcpp-digest-murmur.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_consumer_members_metadata_initializers.cpp |54.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/grpc_endpoint_publish_actor.cpp |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/http_service.cpp |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/expr_nodes/libcore-kqp-expr_nodes.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/simple/kqp_event_ids.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/simple/reattach.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/simple/services.cpp |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/old_crc/libcpp-digest-old_crc.a |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/simple/temp_tables.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/mock/yql_mock.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/sharding.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/execute_data_query.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_execute_query.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/auth_factory.cpp |54.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/hash_slider.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_get_shard_locations.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/describe_table.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_export.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_path.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_drop_table.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_stat.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/table_settings.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_transactions_coordinator.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_fq_internal.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_stream_execute_yql_script.cpp |55.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_bridge.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_script_executions.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_forget_operation.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_node_registration.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_alter_configs_actor.cpp |55.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kqp_helper.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_fetch_script_results.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/kqp_tx_info.h_serialized.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_make_directory.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_discovery.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_types.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_yql.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/kqp_yql.h_serialized.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/sse41/libinternal-proxies-sse41.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_user_request_context.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/grpc_service.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_import.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_read_table.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_prepare_data_query.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libpy3providers-common-proto.global.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/events/workload_service.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/libyql-essentials-utils.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_external_table.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_kh_snapshots.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dot_product/liblibrary-cpp-dot_product.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/execprofile/liblibrary-cpp-execprofile.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_modify_permissions.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_config.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dwarf_backtrace/liblibrary-cpp-dwarf_backtrace.a |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/getopt/liblibrary-cpp-getopt.global.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_list_objects_in_s3_export.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/common/common.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/list_directory.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/html/pcdata/libcpp-html-pcdata.a |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/hdr/libcpp-histogram-hdr.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/intrusive_avl_tree/libcpp-containers-intrusive_avl_tree.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_begin_transaction.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_config_set.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/object.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/fetch/libcpp-http-fetch.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_table.cpp |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_drop_coordination_node.cpp |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/misc/libcpp-http-misc.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/server/libcpp-http-server.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/simple/helpers.cpp |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/int128/liblibrary-cpp-int128.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ipmath/liblibrary-cpp-ipmath.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/csv/converter/libarrow-csv-converter.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ipv6_address/liblibrary-cpp-ipv6_address.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_import_data.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_attach_session.cpp |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_self_check.cpp |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/rbo/libkqp-opt-rbo.a |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/grpc_request_proxy.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_kh_describe.cpp |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/liblibrary-formats-arrow.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_ping.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_store.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read_actor.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_read_columns.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/create_table.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_store.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_session_timeout.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/persqueue.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/create_store.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_service.cpp |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_monitoring.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/compilation/result.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_alter_table.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/compilation/events.cpp |55.2%| PREPARE $(WITH_JDK-sbr:7832760150) |55.2%| PREPARE $(WITH_JDK17-sbr:7832760150) |55.2%| PREPARE $(JDK_DEFAULT-472926544) |55.3%| PREPARE $(JDK17-472926544) |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/common/libcpp-json-common.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/iterator/liblibrary-cpp-iterator.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/fast_sax/libcpp-json-fast_sax.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_semaphore_timeout.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/yson/libcpp-json-yson.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/l2_distance/liblibrary-cpp-l2_distance.a |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/writer/libcpp-json-writer.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/alloc_profiler/libcpp-lfalloc-alloc_profiler.a |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_execute_scheme_query.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/simple/query_id.cpp |55.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_init.cpp |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_cms.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/mock/liblibrary-folder_service-mock.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_execute_data_query.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/providers/stat/expr_nodes/libproviders-stat-expr_nodes.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_locks_helper.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/counters/kqp_counters.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/yaml/libcore-viewer-yaml.a |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/liblibrary-cpp-json.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/splitter/liblibrary-formats-arrow-splitter.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/modifier/liblibrary-formats-arrow-modifier.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/dbg_info/libcpp-lfalloc-dbg_info.a |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/global/libcpp-logger-global.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/transformer/liblibrary-formats-arrow-transformer.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/grpc_request_proxy_simple.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/analytics/liblwtrace-mon-analytics.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/liblibrary-cpp-logger.global.a |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/global_plugins/libydb-library-global_plugins.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lua/liblibrary-cpp-lua.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/operation_client.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_create_table.cpp |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/legacy_protobuf/protos/libencode-legacy_protobuf-protos.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/libcpp-monlib-encode.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/liblibrary-cpp-lwtrace.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/prometheus/libmonlib-encode-prometheus.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/text/libmonlib-encode-text.a |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_table.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/expr_nodes/libproviders-clickhouse-expr_nodes.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/proto/libproviders-clickhouse-proto.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/libcpp-lwtrace-mon.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/path_generator/libproviders-s3-path_generator.a |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_keep_alive.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libproviders-s3-proto.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/libcpp-monlib-service.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/events/events.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_get_operation.cpp |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/metrics/libcpp-monlib-metrics.a |55.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/health_check/health_check.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/nodes_manager.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_log_store.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_login.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_load_rows.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_get_scale_recommendation.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/simple/settings.cpp |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_explain_yql_script.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write_actor.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/provider/libproviders-pq-provider.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_commit_transaction.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/shutdown/controller.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_kqp_base.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/jaeger_tracing/sampling_throttling_configurator.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_explain_data_query.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_execute_yql_script.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/events/query.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_keyvalue.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_fq.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/events/script_executions.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/provider/libproviders-s3-provider.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_read_rows.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/http_req.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/explain_data_query.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/proxy/proxy.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_intermediate.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/manager.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_execute_script.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_table_options.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request.cpp |55.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_actors.cpp |55.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_events.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_timeouts.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_state_collect.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_dynamic_config.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_planner_strategy.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/create_session.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/clusters_from_connections.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/nodes_health_check.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/proxy_private.cpp |55.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_peer_stats_calculator.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/task_result_write.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_cancel_operation.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_list_operations.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compile_service/kqp_compile_computation_pattern_service.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/rate_limiter_resources.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_index_record.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/task_ping.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_maintenance.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/table_bindings_from_bindings.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_resolve.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_helpers.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/compute_actor/kqp_compute_state.h_serialized.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_common.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_index.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_storage_request.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/big_integer/libcpp-openssl-big_integer.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/on_disk/chunks/libcpp-on_disk-chunks.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/holders/libcpp-openssl-holders.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/crypto/libcpp-openssl-crypto.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/io/libcpp-openssl-io.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/init/libcpp-openssl-init.global.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/method/libcpp-openssl-method.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/indent_text/libcpp-string_utils-indent_text.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_state.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/levenshtein_diff/libcpp-string_utils-levenshtein_diff.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/counter_time_keeper/liblibrary-persqueue-counter_time_keeper.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ncloud/impl/liblibrary-ncloud-impl.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pdisk_io/protos/liblibrary-pdisk_io-protos.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_scheme_executer.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/protobuf/py3/libpy3python-protobuf-py3.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/io/libcpp-mapreduce-io.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/parse_size/libcpp-string_utils-parse_size.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/quote/libcpp-string_utils-quote.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_pure_compute_actor.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/scan/libcpp-string_utils-scan.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pdisk_io/libydb-library-pdisk_io.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/url/libcpp-string_utils-url.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/ztstrbuf/libcpp-string_utils-ztstrbuf.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/tdigest/liblibrary-cpp-tdigest.a |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/terminate_handler/liblibrary-cpp-terminate_handler.global.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/libcpp-lwtrace-mon.global.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/common/libcpp-testing-common.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest_extensions/libcpp-testing-gtest_extensions.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gmock_in_unittest/libcpp-testing-gmock_in_unittest.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/protos/libcpp-lwtrace-protos.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_opt.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/hook/libcpp-testing-hook.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/unittest_main/libcpp-testing-unittest_main.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/atomic/libcpp-threading-atomic.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/tablesorter/libservice-pages-tablesorter.global.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/blocking_queue/libcpp-threading-blocking_queue.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/cancellation/libcpp-threading-cancellation.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/cron/libcpp-threading-cron.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/unittest/libcpp-testing-unittest.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/equeue/libcpp-threading-equeue.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/future/libcpp-threading-future.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/hot_swap/libcpp-threading-hot_swap.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/light_rw_lock/libcpp-threading-light_rw_lock.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/poor_man_openmp/libcpp-threading-poor_man_openmp.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/queue/libcpp-threading-queue.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/skip_list/libcpp-threading-skip_list.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_transform.cpp |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/task_scheduler/libcpp-threading-task_scheduler.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/object.cpp |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/proto/libkqp-proxy_service-proto.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/time_provider/liblibrary-cpp-time_provider.a |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/persqueue/topic_parser/counters.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/type_info/tz/libcpp-type_info-tz.a |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/audit/libydb-core-audit.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_column.cpp |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_column.cpp |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_explain_prepared.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_data_executer.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/behaviour.cpp |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/proto/libcpp-unified_agent_client-proto.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/liblibrary-cpp-unified_agent_client.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/object_listers/libproviders-s3-object_listers.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/uri/liblibrary-cpp-uri.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/json/libcpp-yson-json.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp |55.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |55.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/common/libkqp-workload_service-common.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_write_actor_settings.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_compute.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_arrow_memory_pool.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_program_builder.cpp |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/view/behaviour.cpp |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/topics/libcore-kqp-topics.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/fetcher.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_effects.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/manager.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_literal_executer.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_scan_executer.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/common/cpu_quota_manager.cpp |55.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/common/helpers.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_collector.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_event_impl.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/manager.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/snapshot.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/actors/error.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/add_column.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/initializer.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/behaviour.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_tx_manager.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_state.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/table/behaviour.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_statement_rewrite.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_read_table.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_events.cpp |55.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/cpu_clock/libcpp-yt-cpu_clock.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/node/libcpp-yson-node.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/liblibrary-cpp-yson.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/backtrace/libcpp-yt-backtrace.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/global/libcpp-yt-global.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/logging/libcpp-yt-logging.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/error/libcpp-yt-error.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_index.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/libydb-library-ydb_issue.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/multidict/libpy3contrib-python-multidict.a |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/behaviour.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/logging/plain_text_formatter/libyt-logging-plain_text_formatter.a |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo_rules.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/malloc/libcpp-yt-malloc.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/infer_schema/libyt-lib-infer_schema.a |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/checker.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/string/libcpp-yt-string.a |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/actors/rate_limiter.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/actors/pending_fetcher.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_tx.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/system/libcpp-yt-system.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo_transformer.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_sharding.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compile_service/kqp_compile_service.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/threading/libcpp-yt-threading.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/yson/libcpp-yt-yson.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ycloud/impl/liblibrary-ycloud-impl.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/yson_string/libcpp-yt-yson_string.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ytalloc/api/libcpp-ytalloc-api.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/tools/enum_parser/enum_serialization_runtime/libtools-enum_parser-enum_serialization_runtime.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/util/draft/libutil-draft.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/proto/libbackup-common-proto.a |55.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |55.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/util/charset/libutil-charset.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/type_info/liblibrary-cpp-type_info.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/liblibrary-cpp-unified_agent_client.global.a |55.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_translate.cpp |56.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/controller/libcore-backup-controller.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_partitioned_executer.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/utils/scheme_helpers.cpp |56.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_gateway_proxy.cpp |55.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |56.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/chacha_512/libblobstorage-crypto-chacha_512.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/common/libcore-blobstorage-common.a |56.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/version/libversion_definition.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_runner.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/libcore-blobstorage-crypto.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_planner.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/html.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_validate.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_executer_impl.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/blobstorage_vdiskid.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_wide_read.cpp |55.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/blobstorage_syncstate.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/behaviour.cpp |55.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_host.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/actors/result_writer.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compile_service/kqp_compile_actor.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/local_pgwire/sql_parser.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/read_attributes_utils.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/local_pgwire/local_pgwire_util.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/kqp_metadata_loader.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin_compact.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/actors/task_get.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/scheduler/new/kqp_compute_scheduler_service.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_cbo.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_effects.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/persqueue/topic_parser/topic_parser.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/common/events.cpp |55.9%| PREPARE $(CLANG16-1380963495) |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_column_statistics_requester.cpp |55.9%| PREPARE $(FLAKE8_LINTER-sbr:6561765464) |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_write_constraint.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/local_rpc/helper.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_build_phy_query.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_type_ann.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_build_txs.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_ru_calc.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_delete_index.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/rbo/kqp_operator.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/mkql_dq/libproviders-yt-mkql_dq.a |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_join.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/long_timer.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/scheduler/new/tree/dynamic.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/read_http_reply_protocol.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libyql-dq-proto.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/name_service_client_protocol.cpp |55.1%| [ld] {default-linux-x86_64, relwithdebinfo} $(B)/tools/flake8_linter/flake8_linter |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/arrow/interface/libcommon-arrow-interface.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/rewrite_io_utils.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/private.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/common/antlr4/libparser-common-antlr4.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges_predext.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/backtrace/cursors/libunwind/libbacktrace-cursors-libunwind.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/db_id_async_resolver/libproviders-common-db_id_async_resolver.a |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.grpc.pb.cc |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.grpc.pb.cc |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_index.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.grpc.pb.cc |55.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.grpc.pb.cc |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/provider/libproviders-clickhouse-provider.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/kqp_gateway.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.pb.h_serialized.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.grpc.pb.cc |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_scan_data_meta.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.grpc.pb.cc |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.grpc.pb.cc |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/rbo/kqp_convert_to_physical.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/database_basic_sausage_metainfo.pb.cc |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/database_basic_sausage_metainfo.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.pb.cc |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_scan_data.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.pb.cc |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.grpc.pb.cc |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.pb.cc |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_sequencer_factory.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann_pg.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_data/kqp_query_data.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/view/manager.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.pb.cc |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.pb.cc |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.grpc.pb.cc |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_indexes.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.grpc.pb.cc |55.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.pb.cc |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/libydb-core-protos.a |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.pb.cc |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.pb.cc |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.grpc.pb.cc |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.pb.cc |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.pb.cc |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_session_info.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.pb.cc |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/health.grpc.pb.cc |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.grpc.pb.cc |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.pb.cc |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_databases_cache.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/scheduler/new/tree/snapshot.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.pb.cc |55.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pdiskfit.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_statistics_transformer.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_read_iterator_common.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/scheduler/new/kqp_schedulable_actor.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_hash_func_propagate_transformer.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pdiskfit.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_factory.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.grpc.pb.cc |56.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view_types.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_service.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_phy_check.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_results.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_constant_folding_transformer.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.pb.h_serialized.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/backup/impl/table_writer.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view_types.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/utils/metadata_helpers.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_query_plan.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_data/kqp_predictor.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_helpers.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/audit/audit_log_impl.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/actors/pool_handlers_actors.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.pb.cc |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.pb.cc >> test_dump_restore.py::flake8 [GOOD] |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_exec.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/send_data_protocol.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/health.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_effects.cpp >> test_split_merge.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> docker_wrapper_test.py::flake8 [GOOD] |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_indexes.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_provider.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.grpc.pb.cc >> test.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> helpers.py::flake8 [GOOD] >> test_ctas.py::flake8 [GOOD] >> test_yt_reading.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_stats_mode.py::flake8 [GOOD] >> base.py::flake8 [GOOD] >> test_tpch_import.py::flake8 [GOOD] |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/topics/kqp_topics.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.pb.cc |56.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/flake8 >> test_dump_restore.py::flake8 [GOOD] |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.pb.cc >> tablet_scheme_tests.py::flake8 [GOOD] |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_opt.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.pb.cc |56.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/flake8 >> test_split_merge.py::flake8 [GOOD] |56.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part5/flake8 >> test.py::flake8 [GOOD] |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.grpc.pb.cc |56.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/postgres_integrations/go-libpq/flake8 >> docker_wrapper_test.py::flake8 [GOOD] |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.pb.cc >> test_sql_streaming.py::flake8 [GOOD] >> test_vector_index.py::flake8 [GOOD] >> test_vector_index_negative.py::flake8 [GOOD] |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.grpc.pb.cc |56.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/plans/flake8 >> test_stats_mode.py::flake8 [GOOD] |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_phy_finalize.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/manager.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.pb.cc |56.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_import/flake8 >> test_yt_reading.py::flake8 [GOOD] |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_settings.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_backup.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_backup.pb.cc |56.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/s3_import/flake8 >> test_tpch_import.py::flake8 [GOOD] |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_chain.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.pb.cc >> test_encryption.py::flake8 [GOOD] |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.grpc.pb.cc |56.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_tests/flake8 >> tablet_scheme_tests.py::flake8 [GOOD] |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.grpc.pb.cc |56.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/streaming_optimize/flake8 >> test_sql_streaming.py::flake8 [GOOD] |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.grpc.pb.cc >> test_log_scenario.py::flake8 [GOOD] >> upgrade_to_internal_path_id.py::flake8 [GOOD] >> zip_bomb.py::flake8 [GOOD] |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_kql.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/actors/cpu_load_actors.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_phase.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.grpc.pb.cc |56.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/flake8 >> test_vector_index_negative.py::flake8 [GOOD] |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/connect_socket_protocol.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.pb.cc >> test_dml.py::flake8 [GOOD] |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sequenceshard.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/kqp_ic_gateway.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sequenceshard.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.grpc.pb.cc >> test_cms_erasure.py::flake8 [GOOD] >> test_cms_restart.py::flake8 [GOOD] >> test_cms_state_storage.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_opt_build.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_resource_estimation.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.pb.cc |56.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/flake8 >> zip_bomb.py::flake8 [GOOD] |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/base/blobstorage_events.cpp |56.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/encryption/flake8 >> test_encryption.py::flake8 [GOOD] |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.grpc.pb.cc >> test_fifo_messaging.py::flake8 [GOOD] >> test_generic_messaging.py::flake8 [GOOD] >> test_polling.py::flake8 [GOOD] >> test_batch_operations.py::flake8 [GOOD] >> test_compatibility.py::flake8 [GOOD] >> test_data_type.py::flake8 [GOOD] >> test_example.py::flake8 [GOOD] >> test_export_s3.py::flake8 [GOOD] >> test_followers.py::flake8 [GOOD] >> test_rolling.py::flake8 [GOOD] >> test_statistics.py::flake8 [GOOD] >> test_stress.py::flake8 [GOOD] >> test_vector_index.py::flake8 [GOOD] >> udf/test_datetime2.py::flake8 [GOOD] >> udf/test_digest.py::flake8 [GOOD] >> udf/test_digest_regression.py::flake8 [GOOD] |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.pb.h_serialized.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert_index.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.grpc.pb.cc |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/incrhuge/libcore-blobstorage-incrhuge.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_effects.cpp |56.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/flake8 >> test_dml.py::flake8 [GOOD] |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bridge.grpc.pb.cc |56.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |56.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/local_pgwire/local_pgwire.cpp |56.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |56.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_defaults.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.pb.cc |56.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/actors/scheme_actors.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bridge.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.pb.cc |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |56.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/flake8 >> utils.py::flake8 [GOOD] |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |56.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/flake8 >> test_polling.py::flake8 [GOOD] |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/groupinfo/libcore-blobstorage-groupinfo.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/token_accessor/grpc/libcommon-token_accessor-grpc.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_download/libyt-lib-yt_download.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/other/libcore-blobstorage-other.a |56.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/compatibility/flake8 >> udf/test_digest_regression.py::flake8 [GOOD] |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/token_accessor/client/libcommon-token_accessor-client.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/libcore-backup-common.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/grpc/libdq-api-grpc.a |56.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/libcore-config-validation.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_datasource.cpp >> test.py::flake8 [GOOD] >> test_quoting.py::flake8 [GOOD] |56.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/mock/libblobstorage-pdisk-mock.a |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/libydb-core-mind.a |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/util/libyutil.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/testing.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/tx_datashard.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_public.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_mon.cpp >> test.py::flake8 [GOOD] |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/provider/libproviders-dq-provider.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert.cpp |56.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/flake8 >> test_quoting.py::flake8 [GOOD] |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/space_monitor.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_state.h_serialized.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_delayed_cost_loop.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_handle_class.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_resource_info_exchanger.cpp |56.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/tests/flake8 >> test.py::flake8 [GOOD] |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_drivemodel_db.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_log_cache.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_internal_interface.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/run_script_actor/kqp_run_script_actor.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/group_stat_aggregator.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/kqp_workload_service.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_algo.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/given_id_range.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update_index.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_rm_service.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/actors/analyze_actor.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_returning.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_data/kqp_prepared_query.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sort.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_tasks_runner.cpp |56.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/solomon/flake8 >> test.py::flake8 [GOOD] |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/local_pgwire/local_pgwire_auth_actor.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/local_pgwire/pgwire_kqp_proxy.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_compiler/kqp_olap_compiler.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/backup/impl/local_partition_reader.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_uniq_helper.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_expr_nodes.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config.grpc.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_datasink.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_resolve.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/kqp.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_write_table.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_outofspace.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/mon_main.cpp >> test_base.py::flake8 [GOOD] >> test_postgres.py::flake8 [GOOD] >> test_sql_logic.py::flake8 [GOOD] >> test_stream_query.py::flake8 [GOOD] |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/kqp.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_essence.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/local_pgwire/local_pgwire_connection.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_compiler/kqp_query_compiler.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/tx_datashard.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/blocks.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/garbage_collection.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_query_blocks_transformer.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_write_actor.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_read_actor.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_histograms.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/actors/run_actor.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_snapshot_manager.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_histogram_latency.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3_delete.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/blob_depot.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_gc.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_hugeblobctx.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3_upload.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_sequencer_actor.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/op_commit_blob_seq.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3_write.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_actor.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_load.cpp |57.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/flake8 >> test_stream_query.py::flake8 [GOOD] |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_transport.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/slot_indexes_pool.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/actors/scheme.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus.grpc.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_mongroups.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/op_load.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_uncertain.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/op_apply_config.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/drivedata_serializer.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_worker.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus.pb.cc |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config.grpc.pb.cc |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_trash.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_params.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_performance_params.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/group_metrics_exchange.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_decommit.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/scheduler/old/kqp_compute_scheduler.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_vdisk_guids.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/assimilator.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_common.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_output_stream.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console.grpc.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_metadata.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/op_init_schema.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config.pb.cc |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_mon.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc.grpc.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_cache.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_write.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_signal_event.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_costmodel.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_read.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_atomicblockcounter.cpp >> test_quota_exhaustion.py::flake8 [GOOD] |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_sectorrestorator.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_stat_aggr.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_status.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_http.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config.pb.cc |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_vdisk.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_recoverylogwriter.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/address_classification/counters.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/log_backend/log_backend_build.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_get.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/address_classification/net_classifier.h_serialized.cpp >> test.py::flake8 [GOOD] |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_osiris.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/memory_controller/memtable_collection.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_range.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/log_backend/json_envelope.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/channel_kind.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/coro_tx.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config.grpc.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/log_backend/log_backend.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_mon.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_defs.h_serialized.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery_scan.cpp |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_defrag.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_dblogcutter.cpp |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/metering/libydb-core-metering.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_block.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_connectivity.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/libcore-blobstorage-vdisk.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/ingress/libblobstorage-vdisk-ingress.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisfinder.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/flake8 >> test_quota_exhaustion.py::flake8 [GOOD] |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part19/flake8 >> test.py::flake8 [GOOD] |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.global.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/counters/libproviders-dq-counters.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__update_node_location.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/protos/libblobstorage-vdisk-protos.a >> test_clean.py::flake8 [GOOD] >> test_clickbench.py::flake8 [GOOD] >> test_diff_processing.py::flake8 [GOOD] >> test_external.py::flake8 [GOOD] >> test_import_csv.py::flake8 [GOOD] >> test_tpch.py::flake8 [GOOD] >> test_upload.py::flake8 [GOOD] |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/lease_holder.cpp |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/blobstorage_hulloptlsn.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/helper/libproviders-dq-helper.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_collect_garbage.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery_read_log.cpp |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/mock/libblobstorage-dsproxy-mock.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/mkql/libproviders-dq-mkql.a |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_common.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_status.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_delete.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3_scan.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisproxy.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/planner/libproviders-dq-planner.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/comm.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_cache.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_scatter_gather.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_events.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_dynamic.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/query.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_static_group.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_put.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf.cpp |56.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp |56.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/opt/libproviders-dq-opt.a |56.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tpc/medium/flake8 >> test_upload.py::flake8 [GOOD] |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/codecs/libcore-persqueue-codecs.a |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mon/libydb-core-mon.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal.grpc.pb.cc |56.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/sequencer.cpp >> test_partitioning.py::flake8 [GOOD] |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Passes/libllvm16-lib-Passes.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/discovery/libsrc-client-discovery.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pipe.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_state_storage.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis.cpp |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/resolved_value.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/proxy.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_storage_config.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_persistent_storage.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_binding.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_response.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group_resolver.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/blocks.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__update_epoch.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_pool.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__update_pool_status.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/metrics.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_log.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/blob_mapping_cache.cpp >> test_workload.py::flake8 [GOOD] |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pdisk.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/garbage.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/status.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/draft/libsrc-client-draft.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_patch.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/flake8 >> test_partitioning.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_alloc.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_node_enumeration.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/request.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_console.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_defs.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__update_slot_status.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__check_slot_status.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__update_config.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/s3.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__load_state.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__update_config_subscription.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__assign_free_slots.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_bridge.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__migrate_state.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_syslogreader.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/read.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__graceful_shutdown.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__extend_lease.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/agent.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/validation/validators.cpp >> test.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> scenario.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_case.py::flake8 [GOOD] |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/labels_maintainer.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__alter_tenant.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__update_config.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__init_scheme.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/dynamic_nameserver_mon.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisrunner.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__register_node.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/dynamic_nameserver.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/s3_backups/tests/flake8 >> test_workload.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/validation/column_shard_config_validator.cpp >> reconfig_state_storage_workload_test.py::flake8 [GOOD] >> test_board_workload.py::flake8 [GOOD] >> test_scheme_board_workload.py::flake8 [GOOD] >> test_state_storage_workload.py::flake8 [GOOD] >> test_large_import.py::flake8 [GOOD] |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/validation/auth_config_validator.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_osiris.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_scrub.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/address_classification/net_classifier.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idxsnap.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/local.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstvec.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_logreader.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_datasnap.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part17/flake8 >> test.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_event_filter.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_public.cpp >> test_update_script_tables.py::flake8 [GOOD] >> hive_matchers.py::flake8 [GOOD] >> test_create_tablets.py::flake8 [GOOD] >> test_drain.py::flake8 [GOOD] >> test_kill_tablets.py::flake8 [GOOD] >> test_s3.py::flake8 [GOOD] |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/recovery/hulldb_recovery.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/memory_controller/memory_controller.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_cost_tracker.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__load_state.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/join/flake8 >> test_case.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/configured_tablet_bootstrapper.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllogcutternotify.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hulldb_bulksstmngr.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstslice.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_generate.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullactor.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idx.cpp >> test.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__init_scheme.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/reconfig_state_storage_workload/tests/flake8 >> test_state_storage_workload.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_compactfreshappendix.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/flake8 >> test_kill_tablets.py::flake8 [GOOD] |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/s3_import/large/flake8 >> test_large_import.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_logreplay.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_discover.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_readbulksst.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllog.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_resource.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/balancer.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/flake8 >> test_update_script_tables.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |57.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part11/flake8 >> test.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/group_metrics_exchange.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/monitoring.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_fsm.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/mock/dsproxy_mock.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/blobstorage_hullcompdelete.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hull.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/node_report.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/config_fit_pdisks.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/migrate.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/assimilation.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/olap_workload/tests/flake8 >> test_workload.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_extr.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_mon.cpp >> test.py::flake8 [GOOD] |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/flake8 >> test_s3.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/drop_donor.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part0/flake8 >> test.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/group_mapper.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_validate.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_context.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_stathuge.cpp >> test.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_get_block.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_entryserialize.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_stattablet.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/bsc_audit.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/get_group.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/simple_queue/tests/flake8 >> test_workload.py::flake8 [GOOD] |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/console_interaction.cpp |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part12/flake8 >> test.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/domain_info.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/group_layout_checker.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_public.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/layout_helpers.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon/crossref.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_tablets_object.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/boot_queue.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_readbatch.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/grouper.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/disk_metrics.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/virtual_group.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/self_heal.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_readactor.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_barrier.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/init_scheme.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__register_node.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_range.cpp >> test.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__generate_data_ut.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/merge_split_common_table/fifo/flake8 >> test.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/drain.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/load_everything.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_statdb.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/storage_group_info.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__configure_scale_recommender.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/follower_tablet_info.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_domains.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/node_info.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/fill.cpp >> test_vector_index.py::flake8 [GOOD] |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/flake8 >> test.py::flake8 [GOOD] |57.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a >> test_vector_index_large_levels_and_clusters.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_serializable.py::flake8 [GOOD] |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/interface/logging/libmapreduce-interface-logging.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_impl.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_statics.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/log/tests/flake8 >> test_workload.py::flake8 [GOOD] |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part9/flake8 >> test.py::flake8 [GOOD] |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/mon/liblibrary-schlab-mon.global.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/llvm16/libyt-comp_nodes-llvm16.a >> test_example.py::flake8 [GOOD] >> test_config_migration.py::flake8 [GOOD] >> test_config_with_metadata.py::flake8 [GOOD] >> test_configuration_version.py::flake8 [GOOD] >> test_distconf.py::flake8 [GOOD] >> test_distconf_generate_config.py::flake8 [GOOD] >> test_distconf_reassign_state_storage.py::flake8 [GOOD] >> test_generate_dynamic_config.py::flake8 [GOOD] >> test_postgres.py::flake8 [GOOD] >> test_parametrized_queries.py::flake8 [GOOD] >> test_alloc_default.py::flake8 [GOOD] >> test_dc_local.py::flake8 [GOOD] >> test_result_limits.py::flake8 [GOOD] >> test_scheduling.py::flake8 [GOOD] |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/liblibrary-workload-stock.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/leader_tablet_info.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__adopt_tablet.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_log.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tablet_info.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__reassign_groups_on_decommit.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/protos/liblibrary-schlab-protos.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__create_tablet.cpp |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part16/flake8 >> test.py::flake8 [GOOD] |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schemu/liblibrary-schlab-schemu.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/libpy3api-grpc.global.a |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/example/flake8 >> test_example.py::flake8 [GOOD] |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/large/flake8 >> test_vector_index_large_levels_and_clusters.py::flake8 [GOOD] |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/config/libcore-persqueue-config.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__configure_subdomain.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/storage_balancer.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/purecalc/libcore-persqueue-purecalc.a |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/postgresql/flake8 >> test_postgres.py::flake8 [GOOD] |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/partition_key_range/libcore-persqueue-partition_key_range.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mon/mon.cpp |57.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/large_serializable/flake8 >> test_serializable.py::flake8 [GOOD] |57.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part13/flake8 >> test.py::flake8 [GOOD] |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__disconnect_node.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libydb-library-services.a |57.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/flake8 >> test_generate_dynamic_config.py::flake8 [GOOD] |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/libydb-core-persqueue.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/libproviders-yt-codec.a |56.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/mem_alloc/flake8 >> test_scheduling.py::flake8 [GOOD] |56.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/flake8 >> test_parametrized_queries.py::flake8 [GOOD] |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__release_tablets.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/libydb-core-public_http.global.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/libydb-core-pgproxy.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/config/libsrc-client-config.a |57.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/write_meta.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/coordination/libsrc-client-coordination.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__process_pending_operations.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__init_scheme.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/monitoring.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__delete_node.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tablet_move_info.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/percentile_counter.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__start_tablet.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/header.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__seize_tablets.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__block_storage_result.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__cut_tablet_history.cpp >> test.py::flake8 [GOOD] |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pq_database.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__delete_tablet.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__unlock_tablet.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/storage_pool_info.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__seize_tablets_reply.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__release_tablets_reply.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pq_rl_helpers.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/quota_tracker.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replbroker.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/datastreams/libsrc-client-datastreams.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replmonhandler.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__restart_tablet.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/type_codecs_defs.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/sourceid_info.h_serialized.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__lock_tablet.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__load_everything.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/utils.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_pdisk.cpp >> test_workload.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_unknown_data_source.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullrepljob.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__delete_tablet_result.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/config_cmd.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__request_tablet_seq.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/restore_corrupted_blob_actor.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__response_tablet_seq.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__process_boot_queue.cpp |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part4/flake8 >> test.py::flake8 [GOOD] |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/cmds_storage_pool.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/microseconds_sliding_window.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_domain.cpp >> conftest.py::flake8 [GOOD] >> test_clickhouse.py::flake8 [GOOD] >> test_greenplum.py::flake8 [GOOD] >> test_join.py::flake8 [GOOD] >> test_mysql.py::flake8 [GOOD] >> test_postgresql.py::flake8 [GOOD] >> test_ydb.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_auth_system_views.py::flake8 [GOOD] >> test_create_users.py::flake8 [GOOD] >> test_create_users_strict_acl_checks.py::flake8 [GOOD] >> test_db_counters.py::flake8 [GOOD] >> test_dynamic_tenants.py::flake8 [GOOD] >> test_publish_into_schemeboard_with_common_ssring.py::flake8 [GOOD] >> test_storage_config.py::flake8 [GOOD] >> test_system_views.py::flake8 [GOOD] >> test_tenants.py::flake8 [GOOD] >> test_user_administration.py::flake8 [GOOD] >> test_users_groups_with_acl.py::flake8 [GOOD] |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__kill_node.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__request_tablet_owners.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/offload_actor.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/heartbeat.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__status.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/common_app.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/bsc.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/cmds_host_config.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/metering_sink.cpp |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/node_broker/tests/flake8 >> test_workload.py::flake8 [GOOD] |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/matplotlib-inline/libpy3contrib-python-matplotlib-inline.global.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_tablet_metrics.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/protobuf_printer/libydb-library-protobuf_printer.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/hash/liblibrary-formats-arrow-hash.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/chunks_limiter/libydb-library-chunks_limiter.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/libpy3library-folder_service-proto.global.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/fcontext_impl/libboost-context-fcontext_impl.a >> common.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_rename.py::flake8 [GOOD] >> test_async_replication.py::flake8 [GOOD] |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_dc_followers.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/locale/librestricted-boost-locale.a |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/common/flake8 >> test_unknown_data_source.py::flake8 [GOOD] |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/generic/analytics/flake8 >> test_ydb.py::flake8 [GOOD] |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__reassign_groups.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replproxy.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/disjoint_interval_tree/libcpp-containers-disjoint_interval_tree.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/common/libyql-dq-common.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__tablet_owners_reply.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/flake8 >> test_users_groups_with_acl.py::flake8 [GOOD] |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_json/libydb-library-yaml_json.a |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/public_http/libydb-core-public_http.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/protos/libcore-pgproxy-protos.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/spilling/libdq-actors-spilling.a |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/libydb-core-load_test.a |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/libydb-core-quoter.a |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/certificate_check/libcore-security-certificate_check.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_sst.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/protos/libcore-public_http-protos.a >> test_multinode_cluster.py::flake8 [GOOD] >> test_recompiles_requests.py::flake8 [GOOD] >> test_ttl.py::flake8 [GOOD] |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/raw_socket/libydb-core-raw_socket.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_unreadable.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/quoter/public/libcore-quoter-public.a |57.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/flake8 >> test_rename.py::flake8 [GOOD] |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/resource_pools/libydb-core-resource_pools.a |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/flake8 >> test_async_replication.py::flake8 [GOOD] |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libcore-scheme-protos.a >> overlapping_portions.py::flake8 [GOOD] |57.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/libydb-core-scheme.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme_types/libydb-core-scheme_types.a |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/config_examples.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_tablet_status.cpp |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/load_test/percentile.h_serialized.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_tablet_groups.cpp |57.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/flake8 >> test_recompiles_requests.py::flake8 [GOOD] |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_request.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_process.cpp >> test.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test_pdisk_format_info.py::flake8 [GOOD] >> test_replication.py::flake8 [GOOD] >> test_self_heal.py::flake8 [GOOD] >> test_tablet_channel_migration.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_crud.py::flake8 [GOOD] |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/storage_stats_calculator.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/cmds_drive_status.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/config.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__stop_tablet.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/config_fit_groups.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/ycsb/info_collector.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_pile.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/commit_config.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/blob.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_queue.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/propose_group_key.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__sync_tablets.cpp |57.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/flake8 >> test_ttl.py::flake8 [GOOD] >> test_inserts.py::flake8 [GOOD] >> test_kv.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_sql.py::flake8 [GOOD] |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/grpc_request_context_wrapper.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/select_groups.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/register_node.cpp |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_mon.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__resume_tablet.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/key.cpp |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/libydb-core-security.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__switch_drain.cpp |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ldap_auth_provider/libcore-security-ldap_auth_provider.a |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/certificate_check/cert_auth_processor.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/http_router.cpp |57.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/kv/tests/flake8 >> test_workload.py::flake8 [GOOD] >> test_liveness_wardens.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_auditlog.py::flake8 [GOOD] >> test_clickbench.py::flake8 [GOOD] >> test_external.py::flake8 [GOOD] >> test_import_csv.py::flake8 [GOOD] >> test_tpcds.py::flake8 [GOOD] >> test_tpch.py::flake8 [GOOD] >> test_upload.py::flake8 [GOOD] >> test_workload_oltp.py::flake8 [GOOD] >> test_workload_simple_queue.py::flake8 [GOOD] |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/oom/flake8 >> overlapping_portions.py::flake8 [GOOD] |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serializable/flake8 >> test.py::flake8 [GOOD] |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/debug_info.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/probes.cpp |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/datasource/mysql/flake8 >> test.py::flake8 [GOOD] |57.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/write_id.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/shred.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/cmds_box.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor.cpp |57.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/sql/flake8 >> test_kv.py::flake8 [GOOD] >> test_mixed.py::flake8 [GOOD] |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a >> test.py::flake8 [GOOD] >> test_alter_ops.py::flake8 [GOOD] >> test_copy_ops.py::flake8 [GOOD] >> test_scheme_shard_operations.py::flake8 [GOOD] |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_utils.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/scrub.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_snapshot.cpp |57.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/cdc/tests/flake8 >> test_workload.py::flake8 [GOOD] |57.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part1/flake8 >> test.py::flake8 [GOOD] |57.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |57.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/flake8 >> test_tablet_channel_migration.py::flake8 [GOOD] |57.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part7/flake8 >> test.py::flake8 [GOOD] |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/stat_processor.cpp |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a >> test_kqprun_recipe.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_commit.py::flake8 [GOOD] >> test_timeout.py::flake8 [GOOD] |57.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/wardens/flake8 >> test_liveness_wardens.py::flake8 [GOOD] |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part15/flake8 >> test.py::flake8 [GOOD] |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part10/flake8 >> test.py::flake8 [GOOD] |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/mixedpy/flake8 >> test_mixed.py::flake8 [GOOD] |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/library/compatibility/binaries/downloader/flake8 >> __main__.py::flake8 [GOOD] |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_lwtrace_probes.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/unisched.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/event.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_huge.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/request_controller_info.cpp |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/flake8 >> test_scheme_shard_operations.py::flake8 [GOOD] >> alter_compression.py::flake8 [GOOD] >> test_bridge.py::flake8 [GOOD] >> base.py::flake8 [GOOD] |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/load/flake8 >> test_workload_simple_queue.py::flake8 [GOOD] |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/flake8 >> test_auditlog.py::flake8 [GOOD] |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/flake8 >> test_sql.py::flake8 [GOOD] |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/update_seen_operational.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/tables/table_queries.cpp |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part18/flake8 >> test.py::flake8 [GOOD] |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/schema.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/dread_cache_service/caching_service.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/load_based_timeout.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/sys_view.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/queue.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client.cpp |57.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/bridge/flake8 >> test_bridge.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime_with_service_name.py::flake8 [GOOD] >> select_positive_with_service_name.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_secondary_index.py::flake8 [GOOD] |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/aggregated_result.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/memory.cpp |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/merge_split_common_table/std/flake8 >> test.py::flake8 [GOOD] |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/pdisk_write.cpp |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/pq_read/test/flake8 >> test_timeout.py::flake8 [GOOD] |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/kesus_quoter_proxy.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/pdisk_read.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/update_last_seen_ready.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/pq_impl_app.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/common.cpp |57.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/kqprun/tests/flake8 >> test_kqprun_recipe.py::flake8 [GOOD] |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_scale_manager.cpp >> test.py::flake8 [GOOD] |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/group_write.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/archive.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_scale_request.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/pdisk_log.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/bulk_mkql_upsert.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_read.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/http_request.cpp |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/flake8 >> base.py::flake8 [GOOD] |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/update_group_latencies.cpp |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/proto/libsrc-client-proto.a >> test_disk.py::flake8 [GOOD] >> test_tablet.py::flake8 [GOOD] >> test_leader_start_inflight.py::flake8 [GOOD] |57.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/datasource/oracle/flake8 >> test.py::flake8 [GOOD] |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/thread_pool/libimpl-ydb_internal-thread_pool.a |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/writer/metadata_initializers.cpp |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/logger/libimpl-ydb_internal-logger.a |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/writer/partition_chooser_impl.cpp >> __main__.py::flake8 [GOOD] |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon_alloc/memory_info.cpp |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/flake8 >> test_secondary_index.py::flake8 [GOOD] |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/query/impl/libclient-query-impl.a |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon_alloc/profiler.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon_alloc/tcmalloc.cpp |57.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/flake8 >> test_tablet.py::flake8 [GOOD] |57.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part2/flake8 >> test.py::flake8 [GOOD] |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/make_request/libimpl-ydb_internal-make_request.a |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon_alloc/stats.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/writer/source_id_encoding.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/processor/schema.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/database/database.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/fetch_request_actor.cpp |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/grpc_connections/libimpl-ydb_internal-grpc_connections.a |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_balancer.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/transaction.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/public_http/http_req.cpp |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_balancer_app.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/test_load_read_iterator.cpp |57.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |57.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/flake8 >> test_leader_start_inflight.py::flake8 [GOOD] |57.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/kqp_session_common/libimpl-ydb_internal-kqp_session_common.a |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/pq_l2_cache.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/service/db_counters.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_monitoring.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/service/query_interval.cpp |57.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/kqprun/recipe/flake8 >> __main__.py::flake8 [GOOD] |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/pq.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/cluster_tracker.cpp |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/retry/libimpl-ydb_internal-retry.a |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/show_create/create_view_formatter.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_write.cpp >> test_copy_table.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> helpers.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_query.py::flake8 [GOOD] >> test_s3.py::flake8 [GOOD] |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/service_actor.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/plain_status/libimpl-ydb_internal-plain_status.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/session_pool/libimpl-ydb_internal-session_pool.a |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet/libydb-core-tablet.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/protos/libcore-tablet_flat-protos.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/pq_impl.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libcore-protos-schemeshard.a |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/event_helpers.cpp |57.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a >> test_ttl.py::flake8 [GOOD] |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/certificate_check/cert_auth_utils.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_init.cpp |56.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/audit_helpers/libcore-testlib-audit_helpers.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_balancer__balancing_app.cpp |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/show_create/formatters_common.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/params/libsrc-client-params.a |57.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/pipe_tracker.cpp |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/libydb-core-testlib.a >> conftest.py::flake8 [GOOD] >> test_join.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/flake8 >> test_ttl.py::flake8 [GOOD] |57.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/flake8 >> test_copy_table.py::flake8 [GOOD] |57.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_scan_iface.h_serialized.cpp |57.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part3/flake8 >> test.py::flake8 [GOOD] |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_stat_part_group_iter_create.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/kqp_select.cpp |57.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/sql/lib/flake8 >> test_s3.py::flake8 [GOOD] |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/common/schema.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/mirrorer.cpp >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ownerinfo.cpp |57.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/generic/streaming/flake8 >> test_join.py::flake8 [GOOD] |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_fwd_misc.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_sausage_meta.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_sausagecache.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_blob_encoder.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_quantum.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_mem_warm.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_exec_broker.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_balancer__balancing.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/write_quoter.cpp >> test_bulkupserts_tpch.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_insert_delete_duplicate_records.py::flake8 [GOOD] >> test_insertinto_selectfrom.py::flake8 [GOOD] >> test_tiering.py::flake8 [GOOD] >> test_workload_manager.py::flake8 [GOOD] >> test_serverless.py::flake8 [GOOD] >> test_account_actions.py::flake8 [GOOD] >> test_acl.py::flake8 [GOOD] >> test_counters.py::flake8 [GOOD] >> test_format_without_version.py::flake8 [GOOD] >> test_garbage_collection.py::flake8 [GOOD] >> test_multiplexing_tables_format.py::flake8 [GOOD] >> test_ping.py::flake8 [GOOD] >> test_queue_attributes_validation.py::flake8 [GOOD] >> test_queue_counters.py::flake8 [GOOD] >> test_queue_tags.py::flake8 [GOOD] >> test_queues_managing.py::flake8 [GOOD] >> test_throttling.py::flake8 [GOOD] |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_comp_create.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters_app.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/labeled_counters_merger.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_comp.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/private/aggregated_counters.cpp |57.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/flake8 >> test.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_exec_seat.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_dbase_apply.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_comp_gen.h_serialized.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/writer/writer.cpp >> test_common.py::flake8 [GOOD] >> test_yandex_audit.py::flake8 [GOOD] >> test_yandex_cloud_mode.py::flake8 [GOOD] >> test_yandex_cloud_queue_counters.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_comp_gen.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_exec_commit_mgr.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_exec_commit.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_txloglogic.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_database.cpp |57.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/flake8 >> test_throttling.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/list_all_topics_actor.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_borrowlogic.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_executor.pb.cc >> test_break.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_executor_compaction_logic.h_serialized.cpp |57.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/flake8 >> test_serverless.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_tx_env.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/events/events.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_snapshot.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |57.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/sql/large/flake8 >> test_workload_manager.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_counters.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_gclogic.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_page_iface.h_serialized.cpp |57.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part8/flake8 >> test.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_charge_create.cpp >> test_tpcds.py::flake8 [GOOD] >> test_tpch_spilling.py::flake8 [GOOD] >> test_cte.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_page_label.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/certificate_check/cert_check.cpp |57.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/flake8 >> test_yandex_cloud_queue_counters.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/user_info.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/login_page.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/public_http/http_service.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/vdisk_write.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_compaction.cpp |57.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/minidumps/flake8 >> test_break.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/sourceid.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_sourcemanager.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_part_loader.h_serialized.cpp |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_index_iter_create.cpp |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/testlib/actor_helpers.cpp |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider.cpp >> test_select.py::flake8 [GOOD] >> kikimr_config.py::flake8 [GOOD] >> base.py::flake8 [GOOD] >> data_correctness.py::flake8 [GOOD] >> data_migration_when_alter_ttl.py::flake8 [GOOD] >> tier_delete.py::flake8 [GOOD] >> ttl_delete_s3.py::flake8 [GOOD] >> ttl_unavailable_s3.py::flake8 [GOOD] >> unstable_connection.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_charge_range.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_dump.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_quoter.cpp >> conftest.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/account_read_quoter.cpp >> test_ydb_backup.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_row_versions.cpp >> test_ydb_flame_graph.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mon_alloc/monitor.cpp >> test_ydb_impex.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_slice.cpp >> test_ydb_recursive_remove.py::flake8 [GOOD] |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_bio_actor.cpp >> test_ydb_scheme.py::flake8 [GOOD] |57.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_row_eggs.h_serialized.cpp >> test_ydb_scripting.py::flake8 [GOOD] |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_outset.cpp >> test_ydb_sql.py::flake8 [GOOD] |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/kqp.cpp >> test_ydb_table.py::flake8 [GOOD] |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_overlay.cpp >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_cp_ic.py::flake8 [GOOD] >> test_dispatch.py::flake8 [GOOD] >> test_retry.py::flake8 [GOOD] >> test_retry_high_rate.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_insert_restarts.py::flake8 [GOOD] |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/service/ext_counters.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_tracing_signals.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_range_cache.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_store_hotdog.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/keyvalue_write.cpp |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_stat_table_btree_index.cpp |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_stat_table.cpp |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_stat_table_btree_index_histogram.cpp |57.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/kqp/plan2svg/flake8 >> test_cte.py::flake8 [GOOD] |57.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/flake8 >> test_select.py::flake8 [GOOD] |57.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tpc/large/flake8 >> test_tpch_spilling.py::flake8 [GOOD] |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_misc.cpp |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/probes.cpp |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_linux.cpp |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_observer.cpp |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_committed.cpp >> column_table_helper.py::flake8 [GOOD] >> base.py::flake8 [GOOD] >> range_allocator.py::flake8 [GOOD] >> test_delete_all_after_inserts.py::flake8 [GOOD] >> s3_client.py::flake8 [GOOD] >> test_delete_by_explicit_row_id.py::flake8 [GOOD] >> thread_helper.py::flake8 [GOOD] >> time_histogram.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> ydb_client.py::flake8 [GOOD] |57.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ydb_cli/flake8 >> test_ydb_table.py::flake8 [GOOD] |57.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/flake8 >> test.py::flake8 [GOOD] |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_handle.cpp |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_part.cpp |57.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/library/ut/flake8 >> kikimr_config.py::flake8 [GOOD] |57.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/flake8 >> unstable_connection.py::flake8 [GOOD] |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipe_server.cpp |57.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/restarts/flake8 >> test_insert_restarts.py::flake8 [GOOD] |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_dbase_scheme.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_table.cpp |57.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/datasource/ydb/flake8 >> test.py::flake8 [GOOD] |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipe_client_cache.cpp >> test.py::flake8 [GOOD] >> test_schemeshard_limits.py::flake8 [GOOD] |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/yql_single_query.cpp |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_part_loader.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipecache.cpp |57.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/multi_plane/flake8 >> test_retry_high_rate.py::flake8 [GOOD] |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_stats/libclient-impl-ydb_stats.a >> test_crud.py::flake8 [GOOD] |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/transfer/libydb-core-transfer.a >> test_discovery.py::flake8 [GOOD] |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/balance_coverage/libcore-tx-balance_coverage.a >> test_execute_scheme.py::flake8 [GOOD] |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/value_helpers/libimpl-ydb_internal-value_helpers.a >> test_indexes.py::flake8 [GOOD] |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/session/libcolumnshard-bg_tasks-session.a >> test_insert.py::flake8 [GOOD] |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/events/libcolumnshard-bg_tasks-events.a >> test_isolation.py::flake8 [GOOD] |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/abstract/libcolumnshard-bg_tasks-abstract.a >> test_public_api.py::flake8 [GOOD] |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/include/libclient-persqueue_public-include.a >> test_read_table.py::flake8 [GOOD] >> test_session_grace_shutdown.py::flake8 [GOOD] >> test_session_pool.py::flake8 [GOOD] >> test_query_cache.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/operation/libsrc-client-operation.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/manager/libcolumnshard-bg_tasks-manager.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/import/libsrc-client-import.a |57.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/common/flake8 >> ydb_client.py::flake8 [GOOD] |57.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/delete/flake8 >> test_delete_by_explicit_row_id.py::flake8 [GOOD] |57.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |57.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/protos/libcolumnshard-bg_tasks-protos.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/transactions/libcolumnshard-bg_tasks-transactions.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/common/libcolumnshard-blobs_action-common.a |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/kqp_upsert.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/processor.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/address.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_configure.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/blob_set.cpp >> test_restarts.py::flake8 [GOOD] >> test_actorsystem.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_http_api.py::flake8 [GOOD] |57.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/flake8 >> test_schemeshard_limits.py::flake8 [GOOD] |57.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part14/flake8 >> test.py::flake8 [GOOD] |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/read.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/scan.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_interval_metrics.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/common.cpp |57.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/show_create/view/tests/flake8 >> test_workload.py::flake8 [GOOD] |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_top_partitions.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/action.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/write.cpp |57.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/flake8 >> test_session_pool.py::flake8 [GOOD] |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/remove.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/login_shared_func.cpp |57.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part6/flake8 >> test.py::flake8 [GOOD] |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_counters_aggregator.cpp >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> select_positive_with_schema.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/query_stats/libclient-table-query_stats.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/counters/libcolumnshard-blobs_action-counters.a |57.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/query_cache/flake8 >> test_query_cache.py::flake8 [GOOD] |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/read.cpp |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/ss_tasks/libsrc-client-ss_tasks.a |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/result/libsrc-client-result.a |57.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/flake8 >> test_restarts.py::flake8 [GOOD] |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/subscriber.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/node_whiteboard.cpp |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tracing/libydb-core-tracing.a |57.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/flake8 >> test_actorsystem.py::flake8 [GOOD] |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |57.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/oltp_workload/tests/flake8 >> test_workload.py::flake8 [GOOD] |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/resources/libsrc-client-resources.a |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/resources/libsrc-client-resources.global.a >> test_workload.py::flake8 [GOOD] |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/protos/libcolumnshard-blobs_action-protos.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/local/libcolumnshard-blobs_action-local.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_init_schema.cpp |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |57.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/flake8 >> test.py::flake8 [GOOD] |57.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/http_api/flake8 >> test_http_api.py::flake8 [GOOD] |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libcolumnshard-common-protos.a |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tracing/http.cpp |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tracing/trace_collection.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tracing/trace.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_ack_timeout.cpp |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/common/libtx-columnshard-common.a |57.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/transfer/tests/flake8 >> test_workload.py::flake8 [GOOD] |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/remove.cpp |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/scheme/libsrc-client-scheme.a |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/common.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/read.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_info.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/impl/libclient-persqueue_public-impl.a |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/common/tablet_id.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/common/reverse_accessor.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ticket_parser.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/common/limits.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/common/portion.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/common/snapshot.cpp >> __main__.py::flake8 [GOOD] >> parser.py::flake8 [GOOD] |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/libsrc-client-table.a |57.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/portion.h_serialized.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/group_members.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_metrics.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/common/scalars.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/common/blob.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/partition_stats/top_partitions.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/common/volume.cpp |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/cache_policy/libcolumnshard-data_accessor-cache_policy.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/query_stats/query_stats.cpp |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/libclient-types-credentials.a |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/oauth2_token_exchange/libtypes-credentials-oauth2_token_exchange.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_init.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_resolve.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/processor_impl.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/portion_index.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/columnshard.cpp >> conftest.py::flake8 [GOOD] >> test_alter_compression.py::flake8 [GOOD] >> test_alter_tiering.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_read_update_write_load.py::flake8 [GOOD] >> test_scheme_load.py::flake8 [GOOD] >> test_simple.py::flake8 [GOOD] |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/writes_monitor.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/splitter.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/counters/scan.h_serialized.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/impl/libclient-table-impl.a |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_schedule_traversal.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/column_tables.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/db_counters.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/background_controller.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/counters/columnshard.h_serialized.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/duplicate_filtering.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_collect.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/service/sysview_service.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/common_data.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/req_tracer.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_boot_misc.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/portions.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/error_collector.cpp |58.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/docs/generator/flake8 >> parser.py::flake8 [GOOD] >> allure_utils.py::flake8 [GOOD] >> remote_execution.py::flake8 [GOOD] >> results_processor.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> ydb_cli.py::flake8 [GOOD] >> ydb_cluster.py::flake8 [GOOD] |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/common/libclient-topic-common.a |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/context/libdata_sharing-common-context.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |57.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/flake8 >> test_simple.py::flake8 [GOOD] |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/libsrc-client-topic.a |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/groups.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/data_locks/locks/composite.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_load_blob_queue.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/data_locks/locks/abstract.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/test_load_actor.cpp |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/permissions.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_compaction_logic.cpp |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/aggregator.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/login/libtypes-credentials-login.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_data_cleanup_logic.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_finish_trasersal.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/query_stats/query_metrics.cpp |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_boot_lease.cpp |58.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/lib/flake8 >> ydb_cluster.py::flake8 [GOOD] |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_init.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/controller/libdata_sharing-initiator-controller.global.a |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_interval_summary.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_aggregate.cpp |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/controller/libdata_sharing-initiator-controller.a |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/resource_pool_classifiers/resource_pool_classifiers.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/status/libdata_sharing-initiator-status.a |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/protos/liblibrary-operation_id-protos.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_aggr_stat_response.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/partition_stats/partition_stats.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/issue/libsrc-library-issue.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/decimal/libsrc-library-decimal.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/grpc/client/libsdk-library-grpc-client-v3.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_response.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_response_tablet_distribution.cpp |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/libsrc-client-types.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze.cpp |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/libcolumnshard-data_sharing-protos.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/impl/libclient-topic-impl.a |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_pipe_client.cpp |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/libsrc-library-operation_id.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/jwt/libsrc-library-jwt.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/users.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/common/result.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/service.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/pg_tables/pg_tables.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_init_schema.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_responsiveness_pinger.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/settings.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/tablet_flat_executed.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/runtime.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_navigate.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/compaction_info.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/nodes/nodes.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/node_tablet_monitor.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/common/path_id.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_db_mon.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/indexation.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/libchanges-compaction-dictionary.global.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_request.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_bootlogic.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/appdata.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/counters/libengines-changes-counters.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/groups.cpp |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/uuid/libsrc-library-uuid.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/persqueue/topic_parser_public/libsdk-library-persqueue-topic_parser_public-v3.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/persqueue/obfuscate/libsdk-library-persqueue-obfuscate-v3.a |57.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libcolumnshard-engines-protos.a |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/owners.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/service_impl.cpp |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_configure.cpp |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_delete.cpp |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/protos/libservices-bg_tasks-protos.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/abstract/libservices-bg_tasks-abstract.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/resource_pools/resource_pools.cpp |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/grpc/libapi-grpc-persqueue-deprecated.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/auth/libydb-services-auth.a |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/remove.cpp |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |57.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |57.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_schemeshard_stats.cpp |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bridge/libydb-services-bridge.a |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/libreader-simple_reader-duplicates.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/config/libydb-services-config.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/backup/libydb-services-backup.a |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/tablet_flat_executor.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/index_chunk.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/cms/libydb-services-cms.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/actors/wait_events.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/scan.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/show_create/show_create.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/blobs_manager.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tracing/tablet_info.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/engine_logs.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/vslots.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_read_validate.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_rebuildhistory.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_delivery_problem.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/tablet_flat_dummy.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/sessions/sessions.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze_deadline.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_list_renderer.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/actors/block_events.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/predicate/predicate.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/tablets/tablets.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/services.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_reset.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/fake_coordinator.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/audit_helpers/audit_helper.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_datashard_scan_response.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_impl.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/aggregator_impl.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/tx_initialize.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/transfer.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/events/delete_blobs.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc_actor.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/actors/test_runtime.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/pdisks.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_monitoring_proxy.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_resolver.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/test_tablet.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/tx_init_scheme.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/shared_sausagecache.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/show_create/create_table_formatter.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/test_shard_context.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_writelog.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/tablet_helpers.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/storage_pools.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_state.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/state_server_interface.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/helpers.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_mon.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/storages_manager/manager.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/status.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_reader/task.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/write.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storage.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_reader/fetching_executor.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/common.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/blob_manager.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/transfer/transfer_writer.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/storage.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_sys.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/base_with_blobs.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/filter.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc_actor.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/defs.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_reader/contexts.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storages_manager.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/test_shard_mon.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/common.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/cs_helper.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/portions/portion_info.h_serialized.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/tx_load_everything.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/manager.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_delete.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/common_helper.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/constructor.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/datastreams/next_token.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_gc_indexed.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/storage.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/counters_manager.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/constructor.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/datastreams/shard_iterator.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_reader/events.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/manager.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_reader/read_coordinator.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_findlatest.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/blob_manager_db.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_actor.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/queue.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/stats.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/cache_policy/policy.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/manager.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_reader/actor.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_write.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/request.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_locks/manager/manager.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/constructor.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/modification/tasks/modification.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_write_index.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_reader/contexts.h_serialized.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/write.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_data_from_source.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_blockbs.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_locks/locks/snapshot.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/common/libengines-scheme-common.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/move_portions.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/collector.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/column/libengines-scheme-column.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_draft.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/extractor/libext_index-metadata-extractor.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/controller/controller.cpp |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_locks/locks/list.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/common/libscheme-defaults-common.a |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ext_index/common/events.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/discovery/libydb-services-discovery.a |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/control.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/modification/events/change_owning.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/adapter.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libscheme-defaults-protos.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/merger.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/dynamic_config/libydb-services-dynamic_config.a |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/tiering/libengines-scheme-tiering.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_remove_blobs.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/abstract.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.cpp >> conftest.py::flake8 [GOOD] >> test_2_selects_limit.py::flake8 [GOOD] >> test_3_selects.py::flake8 [GOOD] >> test_bad_syntax.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_big_state.py::flake8 [GOOD] >> test_continue_mode.py::flake8 [GOOD] >> test_cpu_quota.py::flake8 [GOOD] >> test_delete_read_rules_after_abort_by_system.py::flake8 [GOOD] >> test_disposition.py::flake8 [GOOD] >> test_eval.py::flake8 [GOOD] >> test_invalid_consumer.py::flake8 [GOOD] >> test_kill_pq_bill.py::flake8 [GOOD] >> test_mem_alloc.py::flake8 [GOOD] >> test_metrics_cleanup.py::flake8 [GOOD] >> test_pq_read_write.py::flake8 [GOOD] >> test_public_metrics.py::flake8 [GOOD] >> test_read_rules_deletion.py::flake8 [GOOD] >> test_recovery.py::flake8 [GOOD] >> test_recovery_match_recognize.py::flake8 [GOOD] >> test_recovery_mz.py::flake8 [GOOD] >> test_restart_query.py::flake8 [GOOD] >> test_row_dispatcher.py::flake8 [GOOD] >> test_select_1.py::flake8 [GOOD] >> test_select_limit.py::flake8 [GOOD] >> test_select_limit_db_id.py::flake8 [GOOD] >> test_select_timings.py::flake8 [GOOD] >> test_stop.py::flake8 [GOOD] >> test_yds_bindings.py::flake8 [GOOD] >> test_yq_streaming.py::flake8 [GOOD] |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_reader/actor.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_reader/fetcher.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_meta.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/cursor.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/common.h_serialized.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/checker.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/common.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/remove_portions.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/transfer.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/abstract_scheme.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/header.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/session/destination.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/merged_column.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/general_compaction.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/tier_info.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/modification/transactions/tx_change_blobs_owning.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/schema_diff.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/source.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_reader/fetching_steps.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |58.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yds/flake8 >> test_yq_streaming.py::flake8 [GOOD] |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/manager/shared_blobs.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/constructor.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/counters/libstorage-actualizer-counters.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/common/libstorage-actualizer-common.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_from_source.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/abstract.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/lib/sharding/libservices-lib-sharding.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/libstorage-actualizer-abstract.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/logic.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/common/context.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/abstract/events.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/abstract/kqp_common.h_serialized.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/loading/stages.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/abstract/parsing.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/abstract/request_features.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/manager/sessions.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/abstract/decoder.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/context.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/local_discovery/libydb-services-local_discovery.a |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/iterator.cpp |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse41/libfarmhash-arch-sse41.a |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/libydb-core-grpc_streaming.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/chunks/libengines-storage-chunks.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/construction/context.cpp |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/libstorage-indexes-bits_storage.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetch_steps.cpp |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/chunks/column.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/logic.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_start_from_initiator.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.cpp |58.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/libstorage-indexes-bits_storage.global.a |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_ack_from_initiator.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/builder.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_context.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/libpy3client-yc_public-iam.global.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/maintenance/libydb-services-maintenance.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/filler.cpp |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/libpy3client-yc_public-common.global.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/libindexes-portions-extractor.a |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/libindexes-portions-extractor.global.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/merger.cpp |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/message_seqno.cpp |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/control.cpp |58.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/exception/libcpp-yt-exception.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-checksums/librestricted-aws-aws-checksums.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/compacted.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/libydb-services-metadata.a |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/crcutil/libcontrib-libs-crcutil.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/easy_parse/libcpp-json-easy_parse.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/double-conversion/libcontrib-libs-double-conversion.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/merge_subset.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/logic.cpp |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-compression/librestricted-aws-aws-c-compression.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/codecs/liblibrary-cpp-codecs.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/googletest/googlemock/librestricted-googletest-googlemock.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-common/librestricted-aws-aws-c-common.a |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/preparation_controller.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/modification_controller.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/remap.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/granule_view.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/fetch_database.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_tables.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/predicate/filter.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/initializer/events.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/counters.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_portion.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/predicate/range.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/changes.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/predicate/container.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/common/timeout.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/scanner.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_portions.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/column_engine_logs.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/plain_read_data.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/common/session/common.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/column_record.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_portion_chunk.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/meta.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/sub_columns_fetching.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/full_scan_sorted.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/logic.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/merge.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/constructors.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/constructor.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/interval.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/context.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_cursor.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/iterator.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/read_metadata.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/meta.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/db_wrapper.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/ttl.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/common/ss_dialog.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/counters.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/counters.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/ydb_value_operator.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/restore_controller.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/context.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/table_record.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetching.cpp |58.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/expat/libcontrib-libs-expat.a |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/policy.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetched_data.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/written.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse42_aesni/libfarmhash-arch-sse42_aesni.a |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx.cpp |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/ssse3/liblibs-base64-ssse3.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cctz/libcontrib-libs-cctz.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/builtins/liblibs-cxxsupp-builtins.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/plain32/liblibs-base64-plain32.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/arrow_kernels/registry/libcore-arrow_kernels-registry.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/arrow_kernels/request/libcore-arrow_kernels-request.a |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxrt/liblibs-cxxsupp-libcxxrt.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cctz/tzdata/liblibs-cctz-tzdata.global.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/credentials/libessentials-core-credentials.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/c/common/libbrotli-c-common.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/c-ares/libcontrib-libs-c-ares.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxabi-parts/liblibs-cxxsupp-libcxxabi-parts.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Net/liblibs-poco-Net.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxx/liblibs-cxxsupp-libcxx.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/liblib-Target-X86.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/c/dec/libbrotli-c-dec.a |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/plain64/liblibs-base64-plain64.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr3_cpp_runtime/libcontrib-libs-antlr3_cpp_runtime.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/dq_integration/libessentials-core-dq_integration.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/neon64/liblibs-base64-neon64.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/neon32/liblibs-base64-neon32.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/c/enc/libbrotli-c-enc.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/replication/libydb-services-replication.a |58.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/not_sorted.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common/conveyor_task.cpp |58.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/orc-format/liblibs-apache-orc-format.a |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/with_appended.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/request/common.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/download/libcore-file_storage-download.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/facade/libessentials-core-facade.a |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/write_with_blobs.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetched_data.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/read_metadata.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/datastreams/grpc_service.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/defs/libcore-file_storage-defs.a |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common/result.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_accessor.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/read_with_blobs.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/backtrace/libcontrib-libs-backtrace.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/common/libpy3tests-olap-common.global.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/avx2/liblibs-base64-avx2.a |58.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_1dba5118ef0a485f3bf803be50.o |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse42/libfarmhash-arch-sse42.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/orc/liblibs-apache-orc.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/avro/liblibs-apache-avro.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/http_download/libcore-file_storage-http_download.a |58.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_2d296dfaf373f7f15e6312517a.o |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hdr_histogram/libcontrib-libs-hdr_histogram.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/deprecated/yajl/libcontrib-deprecated-yajl.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/libcontrib-libs-farmhash.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/certs/libcerts.global.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/result.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fastlz/libcontrib-libs-fastlz.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/http_download/proto/libfile_storage-http_download-proto.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fmt/libcontrib-libs-fmt.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/address_sorting/libgrpc-third_party-address_sorting.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr4_cpp_runtime/libcontrib-libs-antlr4_cpp_runtime.a |58.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_6e536fb2c379a4ebe79c499de8.o |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/libessentials-core-issue.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/limit_sorted.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/upb/libgrpc-third_party-upb.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/flatbuffers/libcontrib-libs-flatbuffers.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libcore-file_storage-proto.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/libessentials-core-file_storage.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/manager.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/libessentials-core-issue.global.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libc_compat/libcontrib-libs-libc_compat.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/datastreams/put_records_actor.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libcore-issue-protos.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libbz2/libcontrib-libs-libbz2.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libaio/static/liblibs-libaio-static.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/langver/libessentials-core-langver.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_thread/liblibs-libevent-event_thread.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_openssl/liblibs-libevent-event_openssl.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_extra/liblibs-libevent-event_extra.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/test_meta/libpy3tests-library-test_meta.global.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/minsketch/libessentials-core-minsketch.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/column_engine.cpp |58.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_938861be99a6cedecb22904193.o |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/statistics/libproviders-s3-statistics.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libfyaml/libcontrib-libs-libfyaml.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/platformdirs/libpy3contrib-python-platformdirs.global.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_corei7/liblibs-hyperscan-runtime_corei7.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/events/libdq-actors-events.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/datastreams/datastreams_proxy.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/schema/libyt-lib-schema.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_avx2/liblibs-hyperscan-runtime_avx2.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/tablet/libydb-services-tablet.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/expr_nodes/libproviders-ytflow-expr_nodes.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_core2/liblibs-hyperscan-runtime_core2.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetched_data.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing/libfq-libs-checkpointing.a |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/session_info.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/controller/replication.h_serialized.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_base.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/queue_transaction_mixin.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/limit.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/address_helpers.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/query/libsrc-client-query.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/sys_params.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/chaos_lease.cpp |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/bridge/libblobstorage-dsproxy-bridge.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/liblibs-aws-sdk-cpp-aws-cpp-sdk-s3.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/stream_consumer_remover.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/common/service.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/stream_remover.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/add_index.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/serializability/libpy3tests-library-serializability.global.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/lib/libpy3tools-ydb_serializable-lib.global.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_table.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_with_stream.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/const.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/libcontrib-libs-googleapis-common-protos.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_transfer.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_discoverer.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/tenant_resolver.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/file_reader.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/connection_impl.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/journal_reader.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/common_opt/libessentials-core-common_opt.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/peephole_opt/libessentials-core-peephole_opt.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/journal_writer.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common/description.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/data_accessor.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/public.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/connection.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/set/libcpp-unicode-set.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/file_writer.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/client_base.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/deleting.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_batch_reader.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/accessor/secret_id.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/portion_info.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/add_data.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/event_util.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_reader.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/lag_provider.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_mount_cache.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/scheme.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/target_cluster_injecting_channel.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/private_events.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/merge.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/resolver.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/replication.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/transaction.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_writer.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/skynet.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/wire_row_stream.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/sticky_transaction_pool.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/abstract.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/table_exists.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/table_client.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/common/libessentials-parser-common.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/meta.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/transaction_impl.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/constructor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/test_helper/program_constructor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/versioned_index.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/test_helper/kernels_wrapper.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/schema_version.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/iterator.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/granule/storage.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/table_partition_reader.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/timestamp_provider.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/snapshot_scheme.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/libyql-essentials-core.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card_cache.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/chunk_replica.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/libcontrib-libs-hyperscan.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/events.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/data_statistics.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/helpers.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/helpers.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/column_features.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/ready_event_reader_base.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card_serialization.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/public.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/cm_client/libproviders-pq-cm_client.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/read_limit.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/compressors/libproviders-s3-compressors.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/check_type_compatibility.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/constructor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/time_text.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/infinite_entity.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/config.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/check_yson_token.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/hydra/version.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client_common.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/packet.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/cypress_client/public.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/index_info.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/general_cache/service/manager.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/uuid_text.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy_schemereq.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/config.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/requests.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/merge_complex_types.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/selector/liblcbuckets-constructor-selector.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/selector/liblcbuckets-planner-selector.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/snapshot_scheme.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/selector/liblcbuckets-constructor-selector.global.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/election/public.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/file_client/config.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/yson_format_conversion.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/common/libcolumnshard-export-common.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/hive/timestamp_map.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/constructor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/metadata.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/liblcbuckets-planner-level.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.global.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/libcolumnshard-export-protos.a >> conftest.py::flake8 [GOOD] >> s3_helpers.py::flake8 [GOOD] >> test_bindings_0.py::flake8 [GOOD] >> test_bindings_1.py::flake8 [GOOD] >> test_compressions.py::flake8 [GOOD] >> test_early_finish.py::flake8 [GOOD] >> test_explicit_partitioning_0.py::flake8 [GOOD] >> test_explicit_partitioning_1.py::flake8 [GOOD] >> test_format_setting.py::flake8 [GOOD] >> test_formats.py::flake8 [GOOD] >> test_inflight.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_public_metrics.py::flake8 [GOOD] >> test_push_down.py::flake8 [GOOD] >> test_s3_0.py::flake8 [GOOD] >> test_s3_1.py::flake8 [GOOD] >> test_size_limit.py::flake8 [GOOD] >> test_statistics.py::flake8 [GOOD] >> test_streaming_join.py::flake8 [GOOD] >> test_test_connection.py::flake8 [GOOD] >> test_validation.py::flake8 [GOOD] >> test_ydb_over_fq.py::flake8 [GOOD] >> test_yq_v2.py::flake8 [GOOD] |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/journal_client/public.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/job_tracker_client/public.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/journal_client/config.cpp |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/write_controller.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/blob_constructor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/put_status.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/abstract/fetcher.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/counters.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/abstract/kqp_common.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/job_tracker_client/helpers.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/abstract/libsession-storage-abstract.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/export/session/cursor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/initializer.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/protocol.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/accessor_subscribe.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/abstract.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/abstract.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/abstract/initialization.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/activation.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/executor.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/object.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/tier/libsession-storage-tier.global.a |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/config.cpp |58.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |58.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/object_client/helpers.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/helpers.cpp |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/stream_creator.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/local_discovery/grpc_service.cpp |58.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/flake8 >> test_yq_v2.py::flake8 [GOOD] |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/chunks/data.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/constructor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blob.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/common/config.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/objects_cache.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/granule/stages.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/generic_manager.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/accessor_init.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/tiering.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/index/index.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/manager.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/constructor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/meta.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/filtered_scheme.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/granule/portions_index.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_simple.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/snapshot.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/alter_impl.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/meta.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator__schema_upgrade.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/optimizer.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/alter.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/granule/granule.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/filtered_scheme.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/behaviour.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator__configure.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/constructor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/dst_remover.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/abstract.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/fetcher.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/object.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/registration.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/defs.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator_impl.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/abstract/common.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/dst_alterer.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/common.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/fetcher.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/request/request_actor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/constructor.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/public.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/common/libcolumnshard-operations-common.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/resources/libtx-columnshard-resources.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/selector/abstract/selector.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator__schema.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator__init.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_data_ack_to_source.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/counters.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/restore.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/container.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_source_cursor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/modification.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/general_cache/service/counters.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/accessor_refresh.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/execute_queue.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/service.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/config.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_to_source.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/splitter/abstract/libcolumnshard-splitter-abstract.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/abstract/events/libsubscriber-abstract-events.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/consumer_client.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/abstract/subscriber/libsubscriber-abstract-subscriber.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/meta.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/events/tables_erased/libsubscriber-events-tables_erased.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/settings.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/chunk_meta.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/column_info.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/blob_info.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/validator.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/events/tx_completed/libsubscriber-events-tx_completed.a |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/generator.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/public.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/signature.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/abstract.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/interaction.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/object_client/public.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/request/request_actor_cb.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/access_behaviour.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/request/config.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/scheme_describe.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_finish_ack_to_source.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/common.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/read_finished.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/write.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_assign_stream_name.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/query_client/query_builder.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/version/version_definition.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/blob_reader.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/adapters.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/protos/libcolumnshard-transactions-protos.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/shard_impl.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_write_source_cursor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client_cache.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_get_metrics.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/columnar.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/secret_behaviour.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key_bound_compressor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/behaviour_registrator_actor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/helpers.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key_bound.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_change_backend.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_create_replication.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/comparator.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/public.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/name_table.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/composite_compare.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_table_client.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/logical_type.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/io_tags.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/arrow/liblibs-apache-arrow.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/control.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pluggy/py3/libpy3python-pluggy-py3.global.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/helpers.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/assert/libcpp-yt-assert.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/helpers.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/arrow/scheme/libio_formats-arrow-scheme.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_heartbeat.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/config/libproviders-dq-config.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-mqtt/librestricted-aws-aws-c-mqtt.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/selector/backup/selector.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/pushdown/libproviders-common-pushdown.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_alter_dst_result.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_init_schema.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/remote_timestamp_provider.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/program/registry.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/re2/libjsonpath-rewrapper-re2.global.a |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/config.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_describe_replication.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/config.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/common/libfq-libs-common.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/MarkupSafe/py3/libpy3python-MarkupSafe-py3.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unversioned_row.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/provider/exec/libdq-provider-exec.a |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unversioned_value.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_resolve_secret_result.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/service/group.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/dst_creator.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_reader.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/change_exchange/libydb-core-change_exchange.a |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/coordinator/coordinator_hooks.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_discovery_targets_result.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/service.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_assign_tx_id.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blob_cache.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/task.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/secret_resolver.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/bridge/bridge.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/serialize.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_row.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyHamcrest/py3/libpy3python-PyHamcrest-py3.global.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/value_consumer.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/plain/libarrow-accessor-plain.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/shard_reader.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_init.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/dynamic_table_transaction_mixin.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/batching_timestamp_provider.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_drop_dst_result.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/helpers.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/record_codegen_cpp.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_drop_replication.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/pg/libessentials-sql-pg.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyYAML/py3/libpy3python-PyYAML-py3.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schema.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/public.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/controller.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/expr_nodes/libproviders-solomon-expr_nodes.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_create_dst_result.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/tablet_queue.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_drop_stream_result.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_table_session.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/cli_base/libcli_base.a |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/general_cache/usage/abstract.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_wrapper/interface/libparser-pg_wrapper-interface.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/events/events.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor/service/libtx-conveyor-service.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/optimizer.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/engine/libydb-core-engine.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/program/builder.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/conveyor/service/worker.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/nodes_manager.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/libyql-essentials-minikql.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/logging.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/range_treap.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/locks_db.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/pipe.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/delegating_transaction.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/time_counters.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/long_tx_service/long_tx_service.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/long_tx_service/lwtrace_probes.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/etc_client.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/service/allocation.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/compacted_blob_constructor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/access_control.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/watermark_runtime_data.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_antlr4/libproto_ast-gen-v1_antlr4.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_worker_error.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ydb_proxy/local_proxy/libreplication-ydb_proxy-local_proxy.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_create_stream_result.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/common/libtx-schemeshard-common.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/write_actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/topic_message.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/events.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_proxy.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_alter_replication.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/internal_client.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/libpy3contrib-libs-googleapis-common-protos.global.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/background_controller.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/abstract.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/zero_level.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/two_part_description.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/controllers.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_board/subscriber.h_serialized.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/zero_level.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__configure.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_cache/scheme_cache.h_serialized.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_ansi/libproto_ast-gen-v1_ansi.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/helper.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/events/libolap-bg_tasks-events.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/shard_writer.cpp |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__last_step_subscriptions.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/protos/libcore-viewer-protos.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/constructor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_impl.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v2_chunks.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/activation/libproviders-common-activation.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libyql-essentials-protos.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/ut_common/datashard_ut_common.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/oss/ydb_sdk_import/libpy3tests-oss-ydb_sdk_import.global.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_wrapper/libessentials-parser-pg_wrapper.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/libpy3library-mkql_proto-protos.global.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cachetools/py3/libpy3python-cachetools-py3.global.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/actor/export_actor.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/asttokens/libpy3contrib-python-asttokens.global.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/actor/write.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/arrow_resolve/libproviders-common-arrow_resolve.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_core/liblibs-libevent-event_core.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libidn/static/liblibs-libidn-static.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/attrs/py3/libpy3python-attrs-py3.global.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__acquire_read_step.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__scan.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/codec/arrow/libcommon-codec-arrow.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/broken_blobs.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libunwind/libcontrib-libs-libunwind.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/linuxvdso/original/liblibs-linuxvdso-original.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator_impl.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__init.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__read_step_subscriptions.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/codec/libproviders-common-codec.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/batch_builder/merger.cpp |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/comp_nodes/libproviders-common-comp_nodes.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/AsmParser/libllvm16-lib-AsmParser.a |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/program/program.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__write.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.collections/libpy3contrib-python-jaraco.collections.global.a |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cffi/py3/libpy3python-cffi-py3.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_v0_meta.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/indexed_blob_constructor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator_state.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/config/libproviders-common-config.a |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/leaked_blobs.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/dq/libproviders-common-dq.a |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__restore_params.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/granule/normalizer.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__notify_tx_completion.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/abstract/abstract.h_serialized.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/recipes/common/libpy3library-recipes-common.global.a |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/GlobalISel/liblib-CodeGen-GlobalISel.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/gateways_utils/libproviders-common-gateways_utils.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/session.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.functools/py3/libpy3python-jaraco.functools-py3.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/gateway/libproviders-common-gateway.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_deprecated_snapshot.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/schema_version/version.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_ttl_preset_setting_info.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__stop_guard.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/ypath/rich.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libpy3ydb-library-services.global.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/provider/libproviders-pg-provider.a |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_ttl_preset_setting_version_info.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_index_columns.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_config/events/liblibs-control_plane_config-events.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_cache/scheme_cache.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/tablet/gc_counters.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/udf_resolve/libproviders-common-udf_resolve.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/charset-normalizer/libpy3contrib-python-charset-normalizer.global.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-sdkutils/librestricted-aws-aws-c-sdkutils.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/common_level.cpp |59.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cryptography/py3/libpy3python-cryptography-py3.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__check.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/libproviders-common-schema.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/loading/stages.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__schema.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceproxy/public/libtx-sequenceproxy-public.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/python/enable_v3_new_behavior/libpy3sdk-python-enable_v3_new_behavior.global.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_caching/libydb-core-grpc_caching.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__init.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/chunks.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/events.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceshard/public/libtx-sequenceshard-public.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceproxy/libcore-tx-sequenceproxy.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/durationpy/libpy3contrib-python-durationpy.global.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/boto3/py3/libpy3python-boto3-py3.global.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/libllvm16-lib-Target.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/mkql/libcommon-schema-mkql.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/parser/libcommon-schema-parser.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/decorator/py3/libpy3python-decorator-py3.global.a |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tiering/abstract/libtx-tiering-abstract.a |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |59.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/skiff/libcommon-schema-skiff.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__restore_transaction.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/files/libydb_cli-dump-files.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitstream/Reader/liblib-Bitstream-Reader.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/one_layer.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/tier/identifier.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tracing/service/libtx-tracing-service.a |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/libolap-bg_tasks-adapter.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/transform/libproviders-common-transform.a |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator_client/client.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/tier/s3_uri.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceshard/libcore-tx-sequenceshard.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/lib/libcommon-math-lib.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tracing/usage/libtx-tracing-usage.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Object/libllvm16-lib-Object.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/executing/libpy3contrib-python-executing.global.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/libpy3library-ydb_issue-proto.global.a |59.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |59.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpublic-issue-protos.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__propose_cancel.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/util/evlog/libcore-util-evlog.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IR/libllvm16-lib-IR.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/portion.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/libessentials-public-issue.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_compute.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_arrow_helpers.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/utf8proc/libcontrib-libs-utf8proc.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/long_tx_service/long_tx_service_impl.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_actor.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/provider/libproviders-result-provider.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_async_input.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_async_output.cpp |59.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/locks/locks.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/copy_blob_ids_to_v2.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__schema_upgrade.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/common.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/global.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/langver/libessentials-public-langver.a |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/mon.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/counters/proxy_counters.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_proto_split/libproto_ast-gen-v1_proto_split.a |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_input_channel.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/future/py3/libpy3python-future-py3.global.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/tablet/write_queue.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_proxy/read_table_impl.h_serialized.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/tablet/broken_txs.cpp |59.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/libydb-core-util.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/address_classifier.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/tables_manager.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/backoff.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_unused_tables_template.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/primary.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/concurrent_rw_hash.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/intrusive_heap.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/cpuinfo.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/cache.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/libydb-core-viewer.global.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/aws.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/random.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v1_chunks.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/grpcio/py3/libpy3python-grpcio-py3.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/libcontrib-libs-grpc.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/write.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/console.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/format.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__write_index.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/hazard.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fast_tls.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/text.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/stlog.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/ui64id.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__monitoring.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Vectorize/liblib-Transforms-Vectorize.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/ulid.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/gen_step.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fragmented_buffer.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/page_map.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/source_location.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/hyperlog_counter.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_tasks_counters.cpp |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/libydb-core-viewer.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/libpy3api-protos.global.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/arrow/libpublic-udf-arrow.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/clients/libpy3tests-library-clients.global.a |59.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/runtime/dq_tasks_runner.h_serialized.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libessentials-public-types.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/support/libpublic-udf-support.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/libyql-essentials-sql.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/messagebus/libcpp-monlib-messagebus.a |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_transport.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/pg_dummy/libessentials-sql-pg_dummy.a |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_tasks_runner.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lua/libcontrib-libs-lua.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/tables/normalizer.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/wb_merge.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/dq/runtime/dq_output_channel.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/wb_filter.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_output_consumer.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_schema.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/wb_aggregate.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/mediator_queue.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/storage/s3/storage.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/long_tx_service/commit_impl.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/libydb-core-wrappers.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/settings/libessentials-sql-settings.a |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/ypath/parser_detail.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/resource_subscriber/actor.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/dynumber/libessentials-types-dynumber.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yson_helpers/libyt-lib-yson_helpers.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/http_proxy/authorization/liblibrary-http_proxy-authorization.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/parso/py3/libpy3python-parso-py3.global.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/md5/libcpp-digest-md5.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/abstract/abstract.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/libydb-library-login.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/proto/libcore-ymq-proto.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/resource_subscriber/task.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_actualization.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/common/libymq-queues-common.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_empty.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clapack/part1/liblibs-clapack-part1.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/write_data.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/simple/libcpp-http-simple.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_private_events.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/fifo/libymq-queues-fifo.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/protos/liblibrary-aclib-protos.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/std/libymq-queues-std.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/accessor/libydb-library-accessor.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_subdomain_path_id.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/slice_builder/builder.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/constructor.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/inflight_request_tracker.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson_pull/libyson_pull.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/protos/viewer.pb.cc |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/actor_type/liblibrary-actors-actor_type.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/libsql-v1-format.global.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/uriparser/libcontrib-restricted-uriparser.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/libsql-v1-format.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/harmonizer/libactors-core-harmonizer.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr3/libv1-lexer-antlr3.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/dnscachelib/liblibrary-actors-dnscachelib.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/libsql-v1-lexer.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr3_ansi/libv1-lexer-antlr3_ansi.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pexpect/py3/libpy3python-pexpect-py3.global.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_parquet/libydb-library-arrow_parquet.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/helpers/liblibrary-actors-helpers.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_ansi/libv1-lexer-antlr4_ansi.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/dnsresolver/liblibrary-actors-dnsresolver.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/compproto/liblibrary-cpp-compproto.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/special_cleaner.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/abstract.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr4_ansi/libv1-proto_parser-antlr4_ansi.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/prof/liblibrary-actors-prof.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/wardens/libpy3tests-library-wardens.global.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/indexes/schema.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/memory_log/liblibrary-actors-memory_log.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/log_backend/liblibrary-actors-log_backend.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__plan_step.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr3/libv1-proto_parser-antlr3.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/util/liblibrary-actors-util.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/testlib/common/libactors-testlib-common.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/expr_nodes/libproviders-result-expr_nodes.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/mock/libactors-interconnect-mock.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/http/liblibrary-actors-http.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/k8s_api/libpy3tools-cfg-k8s_api.global.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/secondary.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/testlib/liblibrary-actors-testlib.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/icu/libcontrib-libs-icu.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/libsql-v1-proto_parser.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/Columns/liblibrary-arrow_clickhouse-Columns.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/sharing.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/liblibrary-actors-protos.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/bitseq/libcpp-containers-bitseq.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/libpy3ydb-tools-cfg.global.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/binary_json/libessentials-types-binary_json.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/liblibrary-actors-core.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/Common/liblibrary-arrow_clickhouse-Common.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/bzip/libblockcodecs-codecs-bzip.global.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/tablet/ext_tx_base.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/uuid/libessentials-types-uuid.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/DataStreams/liblibrary-arrow_clickhouse-DataStreams.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/liblibrary-actors-interconnect.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/ring_buffer/libcpp-containers-ring_buffer.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/proto/libutils-log-proto.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr3_ansi/libv1-proto_parser-antlr3_ansi.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/wilson/liblibrary-actors-wilson.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_kernels/libydb-library-arrow_kernels.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/libcpp-digest-argonish.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v0/libessentials-sql-v0.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dns/liblibrary-cpp-dns.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/failure_injector/libessentials-utils-failure_injector.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/lower_case/libcpp-digest-lower_case.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/expr_traits/libyt-lib-expr_traits.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/backtrace/libessentials-utils-backtrace.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/batch_builder/builder.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libpy3providers-s3-proto.global.a |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/journal_client.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_view.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/result_format/libessentials-public-result_format.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/column_families/schema.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/ydbd_slice/libpy3ydbd_slice.global.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libpy3yql-essentials-protos.global.a |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/validate_logical_type.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libpy3yql-dq-proto.global.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/liblibrary-folder_service-proto.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/libydb-library-folder_service.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/http/libcpp-mapreduce-http.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/libpy3ydb-tests-library.global.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/simple_builder/liblibrary-formats-arrow-simple_builder.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/absl_flat_hash/libcpp-containers-absl_flat_hash.a |59.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_add_sharding_info.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/libessentials-utils-log.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/libydb-library-arrow_clickhouse.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/protos/liblibrary-db_pool-protos.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/prompt-toolkit/py3/libpy3python-prompt-toolkit-py3.global.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/libydb-library-db_pool.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/locks/read_start.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v0/lexer/libsql-v0-lexer.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/html/escape/libcpp-html-escape.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libpy3core-issue-protos.global.a |59.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/options.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/locks_db.cpp |59.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/limiter/grouped_memory/usage/liblimiter-grouped_memory-usage.a |59.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libpy3dq-actors-protos.global.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__progress_tx.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libpy3api-service-protos.global.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/split/libcpp-deprecated-split.a |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/control.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/liblibrary-cpp-logger.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/getopt/small/libcpp-getopt-small.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpy3public-issue-protos.global.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/chaos_lease_base.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/fetch/libessentials-utils-fetch.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/cache.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/validation/liblibrary-formats-arrow-validation.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/walle/libpy3tools-cfg-walle.global.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/disjoint_sets/liblibrary-cpp-disjoint_sets.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/opt/physical/predicate_collector.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/logger/libydb-library-logger.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/common/libcpp-mapreduce-common.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/time_cast/time_cast.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/oss/canonical/libpy3tests-oss-canonical.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/exception/libcpp-monlib-exception.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/usage/abstract.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/usage/events.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/actors/libgrpc-server-actors.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/usage/stage_features.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/api/libcpp-malloc-api.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/program/resolver.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/fyamlcpp/libydb-library-fyamlcpp.a |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schema_serialization_helpers.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/scalar/liblibrary-formats-arrow-scalar.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/liblibrary-grpc-server.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lcs/liblibrary-cpp-lcs.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/hooks/abstract/abstract.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/thread_local/libcpp-threading-thread_local.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/integration/interface/libytflow-integration-interface.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/http_client/libcpp-mapreduce-http_client.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/url_mapper/libyt-lib-url_mapper.a |59.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/resources/libservice-pages-resources.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/conclusion/libydb-library-conclusion.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/libmonlib-service-pages.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/priorities/service/service.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/library/user_job_statistics/libmapreduce-library-user_job_statistics.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/resource_subscriber/events.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schine/liblibrary-schlab-schine.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/libpy3api-grpc-draft.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/relaxed_escaper/libcpp-string_utils-relaxed_escaper.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/libpy3library-formats-arrow-protos.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/libydb-core-kqp.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/llvm16/libcodec-codegen-llvm16.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/threading/libessentials-utils-threading.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/llvm16/libcodec-codegen-llvm16.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator_client/actor_client.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pretty_types_print/protobuf/liblibrary-pretty_types_print-protobuf.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/libpy3library-login-protos.global.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/libpy3library-actors-protos.global.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/mon/liblibrary-schlab-mon.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/deprecated/read_batch_converter/libpersqueue-deprecated-read_batch_converter.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/security/libydb-library-security.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/schemereq.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/expr_nodes/libproviders-yt-expr_nodes.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/signal_backtrace/libydb-library-signal_backtrace.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/abstract/liblibrary-workload-abstract.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/indexes/update.cpp |59.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/password_checker/liblibrary-login-password_checker.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/liblibrary-workload-kv.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jsonschema/py3/libpy3python-jsonschema-py3.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/misc/libcpp-yt-misc.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/xml/document/libcpp-xml-document.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gmock_in_unittest/libcpp-testing-gmock_in_unittest.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/liblibrary-workload-kv.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/config_clusters/libyt-lib-config_clusters.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/lib/libyt-gateway-lib.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/memory_tracker.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/io/libcpp-http-io.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/xml/init/libcpp-xml-init.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/client/libcpp-mapreduce-client.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/liblibrary-workload-stock.global.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_base/cli_kicli.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/graph_reorder/libyt-lib-graph_reorder.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/tasks_list.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/interface/libcpp-mapreduce-interface.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schoot/liblibrary-schlab-schoot.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/libessentials-sql-v1.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/libyql-dq-actors.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/public/liblibrary-yaml_config-public.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/liblibrary-formats-arrow-protos.a |59.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/libcore-base-generated.a |59.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/common/libproviders-dq-common.a |59.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/common/libproviders-yt-common.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/http/xml_builder.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/http/parser.rl6.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cblas/libcontrib-libs-cblas.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/batch_slice.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/liblibrary-ydb_issue-proto.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/mkql_helpers/libyt-lib-mkql_helpers.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/snapshotreq.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/memory/libcpp-yt-memory.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/terminate_handler/liblibrary-cpp-terminate_handler.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/helpers.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/actor_tracker.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/log/libyt-lib-log.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/cache/liblibrary-login-cache.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/libydb-library-ydb_issue.global.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/init_yt_api/libyt-lib-init_yt_api.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/events/libcore-wrappers-events.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/input_transforms/libdq-actors-input_transforms.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/common/libdq-actors-common.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/spack/libmonlib-encode-spack.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/task_runner/libdq-actors-task_runner.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/upload_rows_counters.cpp |59.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/comp_nodes/libyql-dq-comp_nodes.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/table_creator/table_creator.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_discovery.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/row_spec/libyt-lib-row_spec.a |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/crc32c/libcpp-digest-crc32c.a |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/packaging/py3/libpy3python-packaging-py3.global.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/metrics/libproviders-common-metrics.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/interface/libproviders-dq-interface.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libdq-actors-protos.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/switch/liblibrary-formats-arrow-switch.a |59.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/persistent_queue.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/oauthlib/libpy3contrib-python-oauthlib.global.a |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/dq_program_builder.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/dq_block_hash_join.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/yql_common_dq_factory.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/dq_hash_operator_common.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/dq_hash_aggregate.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/dq_hash_operator_serdes.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/converter.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/ttl/update.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/ttl/schema.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/liblibrary-mkql_proto-protos.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/multidict/libpy3contrib-python-multidict.global.a |59.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/dq_hash_combine.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/normalizer.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/tier/object.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/naming_conventions/libydb-library-naming_conventions.a |59.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/libyaml-config-protos.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/arrow/libproviders-common-arrow.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/public.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/http_gateway/libproviders-common-http_gateway.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/resolvereq.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/helpers.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/proto/libproviders-yt-proto.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/native/libyt-gateway-native.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/opt/libproviders-yt-opt.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/libproviders-dq-actors.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/opaque_path_description.cpp |59.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/replica.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/backup.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/libdq-api-protos.a |59.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/libydb-core-base.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/events.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/upload_rows_counters.h_serialized.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_committer.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor/service/service.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/commitreq.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/describe.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/subscriber.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/subdomain.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/manager.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/storage_pools.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/schema.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/tablet_status_checker.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/traceid.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/column_families/update.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/tx_processing.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/read_table_impl.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__statistics.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_root.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/datareq.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/tablet.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/services_assert.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_db.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/local_user_token.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/kmeans_clusters.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/actor_activity_names.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__plan_step.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/read_data_protocol.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/populator.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_whoami.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/tx_controller.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/hash_modulo.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/propose_tx.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/granule/clean_granule.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_finish_async.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/load_test.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_rollback_transaction.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/adapter.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/priorities/usage/service.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/update.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/options/update.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_reader.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/auth.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/blobstorage_grouptype.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/init/init.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/blobstorage.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/monitoring.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/backtrace.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/group_stat.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/domain.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/feature_flags_service.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/event_filter.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/counters.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/fetcher.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/row_version.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/pool_stats_collector.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/path.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/manager/manager.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/memory_controller_iface.h_serialized.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/logoblob.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/localdb.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/context.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/blobstorage_config.pb.cc |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/upload_rows_common_impl.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/testlib/storage_helpers.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/labeled_db_counters.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/more-itertools/py3/libpy3python-more-itertools-py3.global.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_vdisk.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/common/common.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/hash_intervals.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank.cpp |59.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/res_pull/libyt-lib-res_pull.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/table/table.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_impl.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/discovery_actor.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/topic_write_scenario.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/table_index.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/schema/schema.cpp |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_9818d2b70aad7db98a0f9c044c.o |60.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_cluster.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/upload_rows.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/schema/update.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/simple.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_wb_req.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/manager.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/columns/schema.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/dq/llvm16/libcomp_nodes-dq-llvm16.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/util/proto/libprotobuf-util-proto.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/purecalc_no_pg_wrapper/liblibs-row_dispatcher-purecalc_no_pg_wrapper.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/layout/layout.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/packedtypes/liblibrary-cpp-packedtypes.a |60.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_45b6981aed17dda33d43217f52.o |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_pq.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/database_resolver_mock.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/defaults.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/object.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_debug.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/www/libcpp-messagebus-www.global.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_browse.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/object.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/options/schema.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/limiter/grouped_memory/usage/config.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/rpc_long_tx.cpp |59.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_latency.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/update.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/zc_memory_input/libcpp-streams-zc_memory_input.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/columns/update.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugedefs.cpp |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_19422d2b60428207055b4ed843.o |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_bd8a6d25e26a719f80141d0711.o |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/limiter/grouped_memory/usage/service.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/update.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/www/libcpp-messagebus-www.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_root_common.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/ydb_cli/commands/benchmark_utils.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_response.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/locks/dependencies.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/topic_operations_scenario.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_query.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/viewer.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/failure_injection.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_rewriter.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/handoff_map.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/update.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_dynamic_config.cpp |60.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/json/libmonlib-encode-json.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_precompute.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/chunk_queue/libcpp-threading-chunk_queue.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/private/labeled_db_counters.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/store/store.cpp |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/liblibs-row_dispatcher-format_handler.a |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/json/libcpp-protobuf-json.a |60.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/ydb_ping.h_serialized.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/config/libcpp-messagebus-config.a |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_workload_tpcc.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/update.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/viewer_request.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_build_stage.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/ydb_latency.h_serialized.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Werkzeug/py3/libpy3python-Werkzeug-py3.global.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_stage_float_up.cpp |60.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_profile.cpp |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_0664e2ab2eb37ae9f02538e483.o |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_0035b673555f394234ae284e25.o |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/bzip2/libcpp-streams-bzip2.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config_parser.cpp |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/filters/librow_dispatcher-format_handler-filters.a |60.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_pdisk.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_mon.cpp |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/events/liblibs-row_dispatcher-events.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp |60.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/read_rule/libfq-libs-read_rule.a |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/signer/libfq-libs-signer.a |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_51562f83ff52d1ceaac0c36a08.o |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_d2e759e2d0ff1243166a3bc7d9.o |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_fe9c8c25e6c570097a9d0c06f9.o |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hulldefs.cpp |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/arch/avx2/libhighwayhash-arch-avx2.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_encrypt.cpp |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/libcontrib-libs-highwayhash.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/utils.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/arch/sse41/libhighwayhash-arch-sse41.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/console_dumper.cpp |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/common/librow_dispatcher-format_handler-common.a |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/zstd/libcpp-streams-zstd.a |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/buffered/libmonlib-encode-buffered.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/resource_broker.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_helpers.cpp |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_4b767dce2ddf7a5424aef828d6.o |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_workload_import.cpp |60.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/retry/libpy3library-python-retry.global.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp |60.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ut_helpers/libtx-replication-ut_helpers.a |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/fq_runner/libpy3tests-tools-fq_runner.global.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_storage.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_event_filter.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/data_plane_helpers.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ut_helpers/test_table.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ut_helpers/test_topic.cpp |60.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/scheduler/libcpp-messagebus-scheduler.a |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_admin.cpp |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_5333c1912ecbac0f64ff97551f.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_monitoring.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_aebf7c73fcaf6a54715cc177c8.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_operation.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugerecovery.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/http/types.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/topic_read_scenario.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/http/xml.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_block.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/viewer_topic_data.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/http_client.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_query_state.cpp |60.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/arrow/cpp/src/arrow/python/libpy3src-arrow-python.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_pipe_req.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_replica.cpp |60.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/http/http.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config_helpers.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_scheme.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_stat.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_agg.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_viewer.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_worker_common.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/wilson_tracing_control.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/node_service/kqp_node_service.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_query_stats.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.cpp |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_48884f6b745ced4d3e78997cb1.o |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/util/libcpp-protobuf-util.a |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lzma/libcpp-streams-lzma.a |60.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/sender.cpp |60.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/runlib/utils.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_proxy.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/tenant_runtime.cpp |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/topic_readwrite_scenario.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/board_lookup.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/sync.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_yql.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/read_rule/read_rule_creator.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/tablet_killer.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/datastreams_helpers/libpy3tests-tools-datastreams_helpers.global.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/interop/libcpp-protobuf-interop.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_session_actor.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/runlib/application.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_check_integrity_get.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/read_rule/read_rule_deleter.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sighandler/liblibrary-cpp-sighandler.a |60.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/objcopy_3fdb568d483b57acc8e627f8c2.o |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sliding_window/liblibrary-cpp-sliding_window.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/board_replica.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/board_publish.cpp |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/balance_coverage/balance_coverage_builder_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_source.cpp |60.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/pq_read |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/moto/py3/libpy3python-moto-py3.global.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/ttl/validator.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/bootstrapper.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemonactor.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/balancing_actor.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/bridge.cpp |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/responses/py3/libpy3python-responses-py3.global.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/parsers/librow_dispatcher-format_handler-parsers.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/group_sessions.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/purecalc_compilation/liblibs-row_dispatcher-purecalc_compilation.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/config.pb.cc |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_guardian.cpp |60.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_temp_tables_manager.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/liblibrary-cpp-messagebus.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/runlib/kikimr_setup.cpp |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/get_value.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/serialize_deserialize.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/skiff/liblibrary-cpp-skiff.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/appdata.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/oldmodule/libcpp-messagebus-oldmodule.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_worker_actor.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/tx_helpers.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/xmltodict/py3/libpy3python-xmltodict-py3.global.a |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/shared_resources/interface/liblibs-shared_resources-interface.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request_reporting.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/dynamic_counters/libcpp-monlib-dynamic_counters.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_selector.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Flask-Cors/py3/libpy3python-Flask-Cors-py3.global.a |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Flask/py3/libpy3python-Flask-py3.global.a |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_defs.cpp |60.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_scan_data_ut.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/shared_resources/libfq-libs-shared_resources.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/stack_vector/libcpp-containers-stack_vector.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/storage_stats.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/json/proto/libprotobuf-json-proto.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/deleter.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/ydb_cli/commands/ydb_node_config.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/recipe/libpy3python-testing-recipe.global.a |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/tasks_packer/libfq-libs-tasks_packer.a |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/ls_checks.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/export_reboots_common.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyarrow/libpy3contrib-python-pyarrow.global.a |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/click/py3/libpy3python-click-py3.global.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_many.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/unistat/libmonlib-encode-unistat.a |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/colorama/py3/libpy3python-colorama-py3.global.a |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/result_formatter/libfq-libs-result_formatter.a |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/stack_array/libcpp-containers-stack_array.a |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sse/liblibrary-cpp-sse.a |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/test_connection/events/liblibs-test_connection-events.a |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/protobuf/libmessagebus_protobuf.a |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/liblibrary-cpp-retry.a |60.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/olap/combinatory/libut-olap-combinatory.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/connector_client_mock.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_bad_blobid.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_large.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/itsdangerous/py3/libpy3python-itsdangerous-py3.global.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/helpers.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_common.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_huge.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/fqrun/src/fq_runner.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/monlib/libpy3library-python-monlib.global.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ut_helpers/mock_service.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/vdisk_mock.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/scheduler/old/kqp_compute_scheduler_ut.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_localrecovery.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/fqrun/src/common.cpp |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/monlib/libpy3library-python-monlib.a |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/proto/liblibs-control_plane_storage-proto.a |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/proto/libtools-stress_tool-proto.a |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemon.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_load.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/lib/libydb_device_test.a |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyarrow/libpy3contrib-python-pyarrow.a |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/brotli/libcpp-streams-brotli.a |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/monitoring/libcpp-messagebus-monitoring.a |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.cpp |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/libpy3tests-tools-ydb_serializable.global.a |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ut_data_erasure_reboots.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_simplebs.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_synclog.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_outofspace.cpp |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/oauth/libclient-yc_private-oauth.a |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_repl.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/failing_mtpq.cpp |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/packers/liblibrary-cpp-packers.a |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/lib/cmds/libpy3tools-lib-cmds.global.a |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/ydb_recipe/libpy3ydb_recipe.global.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/fqrun/src/actors.cpp |60.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/ydb_recipe/objcopy_c55121179eeb3b5753498290c4.o |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/protos/libcpp-retry-protos.a |60.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/libfq-libs-row_dispatcher.a |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/prepare.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/actors_factory.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/leader_election.cpp |60.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/test/test_import/libtest_import_udf.so |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_defrag.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_dbstat.cpp |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/coordinator.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_gc.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_brokendevice.cpp |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/probes.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/hooks/testing/controller.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |60.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/topic/utils/libintegration-topic-utils.a |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/bridge/objcopy_0adb3ed6d98cbd98d13d8a3085.o |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/bridge/objcopy_4b2ec656f7e85bc05586d7e6fc.o |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/bridge/objcopy_de8e7bde61396640f718e89d07.o |60.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/common/libcommon.a |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/secondary_index/objcopy_6b62c1db41e3ebd0278a84dced.o |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/secondary_index/objcopy_b83d9052e0bc89877bbe223294.o |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/lib/libpy3tests-datashard-lib.global.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/dataset.cpp |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/secondary_index/objcopy_716263ce181e67161f84180281.o |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/progress_indication.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/aws.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/progress_bar.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/profile_manager.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/abstract.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/interactive.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/formats.h_serialized.cpp |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/examples.cpp |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/command.cpp |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/cert_format_converter.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/common.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/format.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/print_utils.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/command_utils.cpp |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_b16f09f52f66256b435b6170a6.o |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/parameter_stream.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/normalize_path.cpp |60.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/secondary_index/ydb-tests-datashard-secondary_index |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/interruptible.cpp |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_71d9ff7b2b2ae9abc3a65e5512.o |60.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/parameters.h_serialized.cpp |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_004939e3ca9b55c7f49ac8d93c.o |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/ydb_cli/commands/ydb_benchmark.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/pretty_table.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/pg_dump_parser.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/print_operation.cpp |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_ae5b9f6e7a00f305f01a3dde87.o |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/csv_parser.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/client_command_options.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/plan2svg.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/sys.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/ut/shard_ut.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/scheme_printers.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/parameters.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/recursive_remove.cpp |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/lib/libpy3tests-sql-lib.global.a |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/tabbed_table.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/query_stats.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/recursive_list.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/retry_func.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/root.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/yt.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/waiting_bar.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/ydb_updater.cpp |60.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |60.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/ut/coordinator_ut.cpp |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/scheme/liblibrary-cpp-scheme.a |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/gateway/dummy/libpq-gateway-dummy.a |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/fqrun/src/fq_setup.cpp |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/test_client.cpp |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/sdk_core_access/libydb_sdk_core_access.a |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/objcopy_1406195445f45d950dda89fcd8.o |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_bridge.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_bsc.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/hooks/testing/ro_controller.cpp |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_8685c3ae88e5169a5acffc7bc4.o |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_ff581f3cff717ab223922f0cd8.o |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_d191482d8b66f1c03ea8df56d3.o |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr4-c3/libcontrib-libs-antlr4-c3.a |60.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |60.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/bridge/ydb-tests-functional-bridge |60.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/daf02fd86bb7e2296f1437ae1f_raw.auxcpp |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_643fa2679e88d9b2d33558b050.o |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_fe15eb83a42d9d70d347bbba65.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_53073eb93c76466fca8f474c5f.o |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/fixtures/libpy3tests-library-fixtures.global.a |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_8120ef49e7e653ed0601604313.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_d3af02c7d57ea2cbbe5d381baa.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_f93c60b04a0499f2ec6880591a.o |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/ut_common/ut_common.cpp |60.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_debug_v1.{pb.h ... grpc.pb.h} |60.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/services_common.pb.{h, cc} |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/common/pq_ut_common.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_pipe.cpp |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_2492aafb6862566a2398c9f27e.o |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.{pb.h ... grpc.pb.h} |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_e66920085df69f6f7e41547063.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_3df021aac8504049c53286aea0.o |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/workload/libpy3stress-simple_queue-workload.global.a |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.{pb.h ... grpc.pb.h} |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/a14bb0205af400981ce9e41033_raw.auxcpp |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_view.pb.{h, cc} |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/breakpad/src/client/linux/libsrc-client-linux.a |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/topic_session.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/ut/leader_election_ut.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/test_server.cpp |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query.pb.{h, cc} |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/common/libpy3tests-stress-common.global.a |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/objcopy_d0e1cde98d2ab34e72d18aae9c.o |60.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.{pb.h ... grpc.pb.h} |60.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/ut_utils.cpp |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/workload/libpy3stress-node_broker-workload.global.a |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/objcopy_953328e5c3275a286b65dc3b1d.o |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_state.cpp |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/breakpad/src/liblibs-breakpad-src.a |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/objcopy_d2d4e3343da9b011ee6a983244.o |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/objcopy_60a4829fdc305e3a74a7ddcb41.o |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_faketablet.cpp |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/objcopy_76cd981cf66123b7633d25b898.o |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/objcopy_22b5b8dd6ea05f4194f60e6181.o |60.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/a1cd5217b8fdb95aa9fc955f31_raw.auxcpp |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/workload/libpy3stress-transfer-workload.global.a |60.3%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/a14bb0205af400981ce9e41033_raw.auxcpp |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/sensitive.pb.{h, cc} |60.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/b96345c0a74633add1330c0726_raw.auxcpp |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_table_v1.{pb.h ... grpc.pb.h} |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_topic_v1.{pb.h ... grpc.pb.h} |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_dynamic_config.pb.{h, cc} |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator_ut.cpp |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/dbc97ea0b10e1a7b45035ce1e1_raw.auxcpp |60.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |60.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |60.3%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/expr_nodes/yql_s3_expr_nodes.{gen.h ... defs.inl.h} |60.3%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config__intpy3___pb2.py.p5ju.yapyc3 |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/yql_testlib/yql_testlib.cpp |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/croaring/libcontrib-libs-croaring.a |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.{pb.h ... grpc.pb.h} |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units__intpy3___pb2.py{ ... i} |60.4%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console__intpy3___pb2.py{ ... i} |60.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/s3_recipe/s3_recipe |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units__intpy3___pb2.py.p5ju.yapyc3 |60.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/5c5fdf614c3039a8dba94a4f38_raw.auxcpp |60.3%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/dbc97ea0b10e1a7b45035ce1e1_raw.auxcpp |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_903d4758faea71f1363e296b3f.o |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_c77713875cf17988efd8fc0fb3.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_359d47616c1036f0865eb1e662.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_c52ec5ba5ab0b788efaa5ed704.o |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/clickhouse.pb.{h, cc} |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config__intpy3___pb2.py{ ... i} |60.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/ftxui/libcontrib-libs-ftxui.a |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/objcopy_5accfe00d45fb7ebcc30e116b2.o |60.4%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/rescompiler/rescompiler |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/objcopy_b783a1a2aacb855daa1e55fad6.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/objcopy_da2669c2228a88c83cd32d45da.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/objcopy_93665db601a12d4842de4565e2.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/objcopy_ce0222bab1634be9f9a52f715d.o |60.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.{pb.h ... grpc.pb.h} |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/objcopy_ec94bbf9004678001f4c8195e3.o |60.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part16/ydb-tests-fq-yt-kqp_yt_file-part16 |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.5%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc__intpy3___pb2.py{ ... i} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old__intpy3___pb2.py{ ... i} |60.3%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config__intpy3___pb2.py{ ... i} |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old__intpy3___pb2.py.p5ju.yapyc3 |60.4%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_table.pb.{h, cc} |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/objcopy_065e9244d685c2b8f0ab66e414.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/objcopy_461999da7ba13deab5689c18ec.o |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/workload/libpy3stress-oltp_workload-workload.global.a |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/row_dispatcher.cpp |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/minidumps/objcopy_e740d8bfaebae830aaeb4ace59.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/minidumps/objcopy_be727953c626d90e9f80dacc0b.o |60.5%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc__intpy3___pb2.py.p5ju.yapyc3 |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_49a1ca9559288648fba9cf7b65.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/minidumps/objcopy_077abccc5552b4ff2e53b07653.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/objcopy_17cef60c2dd0eb7ea46181ba87.o |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/workload/type/libpy3oltp_workload-workload-type.global.a |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_0446f521b26a2e8128f94ac50f.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_367e2bc5d83faa0907a06d2976.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_1aeeb50f676472f975830c135d.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_bcbbd2d8f2367d5f3ed5199234.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_f05ead59375a9db120b95dd730.o |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/dq_task_params.pb.{h, cc} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_scheme_v1.{pb.h ... grpc.pb.h} |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/aggregation.cpp |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/protos/flat_table_part.pb.{h, cc} |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/ut/common/kqp_workload_service_ut_common.cpp |60.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/fq_v1.{pb.h ... grpc.pb.h} |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/objcopy_d709b1895f91108d9f51b703ea.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/objcopy_ec9bc627b6d56d1a941c2b7e4f.o |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/common/autoscaling_ut_common.cpp |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_a5874452d3dbd6f6e49cd08be6.o |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/minidumps/ydb-tests-functional-minidumps |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_a38b1580810a6e4b419da99dcf.o |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_b9fd5c62781ec3b78d111a0ba7.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_9ea5b1fb7a4f8e1b0b8d7cf345.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/objcopy_dfbd751fc64901b06ded4354c8.o |60.4%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/python/moto/bin/moto_server |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/sqs/libpy3tests-library-sqs.global.a |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_7f9e816a97aaeee837ac316091.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_5d73baff4bb68923ddbe5f4fcd.o |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_242486256e1af973cd1d5376d1.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/objcopy_2aa1916d45dca98014edb3d732.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/objcopy_504b845d57f1a23561e970de61.o |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/fc437004cc347d978fb8cbf231_raw.auxcpp |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_2efdf95387a81f55cf9c81071a.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/objcopy_1574e8a5a6c530c7bfd6378c4d.o |60.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/libpy3functional-sqs-merge_split_common_table.global.a |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/tests/objcopy_59eb97971e5f83d3296e6c33b5.o |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/dummy.{pb.h ... grpc.pb.h} |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/objcopy_82d6d29ac7be3798b7e748facc.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/objcopy_b1ab101896e634020e0c6ffeaf.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/tests/objcopy_60e08504076128d310212c6460.o |60.5%| [LD] {BAZEL_DOWNLOAD} $(B)/library/recipes/docker_compose/docker_compose |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/objcopy_589d529f9477963cf67237781c.o |60.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/workload/libpy3show_create-view-workload.global.a |60.5%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/tools/protoc/plugins/grpc_python/grpc_python |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.{pb.h ... grpc.pb.h} |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/tests/objcopy_5acd2383ed2cd599cfd64f7c8a.o |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/ut/row_dispatcher_ut.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/row_dispatcher_service.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/stress_tool/device_test_tool.cpp |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/proto/device_perf_test.{pb.h ... grpc.pb.h} |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |60.5%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/py3cc/py3cc |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.{pb.h ... grpc.pb.h} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/access_service.{pb.h ... grpc.pb.h} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.{pb.h ... grpc.pb.h} |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/objcopy_27c0687ceeb7ce4ff5e4cea90a.o |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.{pb.h ... grpc.pb.h} |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/objcopy_7eab954373d77ffb1fab95ca0d.o |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/objcopy_d68e1e5b762e412afe6a534487.o |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/monitoring/mon_proto.pb.{h, cc} |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tbb/libcontrib-libs-tbb.a |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/objcopy_265d7fd505d52534f38ea6fb7f.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/objcopy_40226ff8497733c6e798ee3940.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/objcopy_a52eb3c900a84eaad86a211549.o |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/common.pb.{h, cc} |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/local.cpp |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_08a4b5d38a76e21591db0c3424.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_e2637cea0f2e4db109b364a246.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/objcopy_1f78e7638ae0f2e308bd7331f9.o |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |60.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/reconfig_state_storage_workload/workload/libpy3stress-reconfig_state_storage_workload-workload.global.a |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/objcopy_988cc467d4da79de606ebf50ee.o |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/objcopy_f4efacd00293c5fe09c3f84a62.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_f4b44a5d280d0f27f5ffd278e8.o |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/issue_severity.pb.{h, cc} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pinger.pb.{h, cc} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.{pb.h ... grpc.pb.h} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/health_config.pb.{h, cc} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_dynamic_config_v1.{pb.h ... grpc.pb.h} |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/objcopy_12d01741952bd4afa836364d84.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/objcopy_15e284a8ecb30c90903e842e70.o |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_executor.pb.{h, cc} |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/objcopy_cee1e02beaf827051149b5ca30.o |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query_stats.pb.{h, cc} |60.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/12c5e25ae32e1ab2f8f95f91f5_raw.auxcpp |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/protos/events.pb.{h, cc} |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pending_fetcher.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_common.pb.{h, cc} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_import.pb.{h, cc} |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/large_serializable/objcopy_24cfda7d41447be7f781827fb8.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/large_serializable/objcopy_aab724be52dad3663d415db204.o |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/stress-reconfig_state_storage_workload-tests |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/large_serializable/objcopy_bab46dc0e0bb01200e952d765c.o |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.{pb.h ... grpc.pb.h} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/credentials.pb.{h, cc} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.{pb.h ... grpc.pb.h} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_tasks.pb.{h, cc} |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/cursor.pb.{h, cc} |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/select.cpp |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_388aef0b6ac03d4f661ae7a30e.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_f8b2cbafb1fed0e25bf9683c2d.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_52d3e6a0651990fc997ab40ba2.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_dc1e8788b8287c02880cfe2814.o |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/fq.pb.{h, cc} |60.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/http_api_client/libpy3fq-libs-http_api_client.global.a |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_ce073e3cc612363936bdd04210.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_6cfba3dbee97ec121b2f346459.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_c43ce24509a50b033fa4050a33.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_e31620202d3ba8df14ff2a18e1.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_912038ceef7de48e0e15c25307.o |60.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/c664ef6ca80e747b410e1da324_raw.auxcpp |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_0c451aebc6dafbdf0d9da2ab02.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_703c8e1d9a9a2b271b8b995a29.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_52e86d5ee8fadefdbb415ca379.o |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/lib/libpy3functional-tpc-lib.global.a |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_bac05c8b5a79735451f58d9322.o |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/helpers/libpy3olap-scenario-helpers.global.a |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_64bde13108f9284b2e9f0bbb7a.o |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/lib/libpy3olap-load-lib.global.a |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_03f75cad4510fd9d018635026c.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_2194854d9f8cbb3e0ba798b861.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/tests/objcopy_3bb523a1011c0a7019f2684a90.o |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/tests/objcopy_cd57da3671b96739ee73293fb1.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/tests/objcopy_e8c94c485e81b4b2899f52f594.o |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/workload/libpy3stress-s3_backups-workload.global.a |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/file_storage.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/protos/events.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scripting.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_proxy.pb.{h, cc} |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/patched/replxx/librestricted-patched-replxx.a |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/proto/source.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_bridge.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.{pb.h ... grpc.pb.h} |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |60.6%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/python/mypy-protobuf/bin/protoc-gen-mypy/protoc-gen-mypy |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/actualization.cpp |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/protos/retry_options.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_backup_v1.{pb.h ... grpc.pb.h} |60.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/709f125727d9ea4165df516509_raw.auxcpp |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_auth_v1.{pb.h ... grpc.pb.h} |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_36807918bd7a86c1ea37310c9c.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_656baae3c1e24959f5bcc457d7.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_0ab925f82bbba07bf3b749dc3c.o |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_5992d4831c5055a481712a2a80.o |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/large_serializable/ydb-tests-functional-large_serializable |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.{pb.h ... grpc.pb.h} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/protos/lwtrace.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/portion_info.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.{pb.h ... grpc.pb.h} |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |60.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/e72d7c890a4e6d107b4b82a390_raw.auxcpp |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/db_pool.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.{pb.h ... grpc.pb.h} |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_minikql.cpp |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_6508d12aaafde6f0a60fe8fff3.o |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ut_extsubdomain_reboots.cpp |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_cd9abca883cad9b25e20bf2f08.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_afdf6d60c4f76ae91a235d460b.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_bd84885c5c24478d181ba9e493.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_277b7e8f79021687bec95be8db.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_0359848ae21601186c5b0d9873.o |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/sfh/libcpp-digest-sfh.a |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/494cbbb0dc8b7c9860714b57e7_raw.auxcpp |60.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/90ae9161b2f57d308762e42ba4_raw.auxcpp |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.{pb.h ... grpc.pb.h} |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/bucket_quoter/liblibrary-cpp-bucket_quoter.a |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/writer.cpp |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/objcopy_854d6cc7a0cc5cdd793cfc1e6d.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/objcopy_a926d3332cb769ac3e6c9e6e37.o |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/pq_async_io/mock_pq_gateway.cpp |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/objcopy_2f7ac0f750374152d13c6bfbcf.o |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view_types.{pb.h ... grpc.pb.h} |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/objcopy_422ca1effff14e5a08952658d0.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_1a1e300767b552f4c13c3295d0.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_7bfd03a31f5e230607792f10cc.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/objcopy_8ac5034640eee44b1cd5fa5253.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_4f92526e13553482736b942b2c.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_7eade8c49389813f8c36b72b5b.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/objcopy_5f161468ff5322b803d4d0dc79.o |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/executor.cpp |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_3209cda00462f2963f3cbbc912.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_df0cb3f315162a3110ee243ecd.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_a0543c2dc30365e9b2ad3d0ca6.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_e0331f455507fe5ac3b71d0537.o |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/shard/protos/counters_shard.pb.{h, cc} |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator_ut_helpers.cpp |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ut_split_merge_reboots.cpp |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/task_controller.pb.{h, cc} |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_change_exchange.cpp |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/variator.cpp |60.6%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/494cbbb0dc8b7c9860714b57e7_raw.auxcpp |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/large/objcopy_04f2935f3ada8eb9d01ebaba6b.o |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.{pb.h ... grpc.pb.h} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.{pb.h ... grpc.pb.h} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.{pb.h ... grpc.pb.h} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/iam_token_service.{pb.h ... grpc.pb.h} |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/large/objcopy_6af7a7ce8a1ee5e67d75a2978a.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/suite_tests/objcopy_9be2dadc45d1a9fdc157172661.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/suite_tests/objcopy_b701dac104d6ebd83e6489821f.o |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/large/objcopy_28f172e1aa977d907bdfa0a81b.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/suite_tests/objcopy_73ddf87b96fcbfc4f715436dc4.o |60.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ce697fc3b324cb6152c4d7223d_raw.auxcpp |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/protos/data.pb.{h, cc} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/yql_mount.pb.{h, cc} |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs__intpy3___pb2.py.p5ju.yapyc3 |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/compaction.cpp |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/suite_tests/ydb-tests-functional-suite_tests |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.{pb.h ... grpc.pb.h} |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/rpc/status.{pb.h ... grpc.pb.h} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage__intpy3___pb2.py{ ... i} |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage__intpy3___pb2.py.p5ju.yapyc3 |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/dqs.pb.{h, cc} |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ticket_parser_ut.cpp |60.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/82d705ad49a94ea30cd4594c50_raw.auxcpp |60.7%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/ut/topic_session_ut.cpp |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/blob_range.pb.{h, cc} |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/typed_local.cpp |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/vector_index/large/ydb-tests-datashard-vector_index-large |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest_main/libcpp-testing-gtest_main.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest/libcpp-testing-gtest.a |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_fd8d9957a06c9923c501e36fd9.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_e4166f3d104a6751b45e7e712f.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_b9aaa278b10ed44e5645b3ef2f.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_8491a772a9425d10f304e6f0e9.o |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.{pb.h ... grpc.pb.h} |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/common/libpy3functional-postgresql-common.global.a |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_6e0da74b1512d0ffe19c5dc500.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_816e2dba53f55d924139cdb3c5.o |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/config.pb.{h, cc} |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/query_executor.cpp |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/pq_async_io/ut_helpers.cpp |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/bulk_upsert.cpp |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/abstract.cpp |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/objcopy_2cc418e8604751e5b8f9029a81.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/objcopy_e872ffee323253a62fe108f2f4.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/objcopy_ac3c83156eb65915b12091966a.o |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.{pb.h ... grpc.pb.h} |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/objcopy_6d8369510b03c08a300f2e2657.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/objcopy_7d0deb4120fbddf720c11b5358.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/objcopy_e1e64d508ce59834ec0a40f731.o |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.{pb.h ... grpc.pb.h} |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_upload_rows.cpp |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/interconnect.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/user_account_service.{pb.h ... grpc.pb.h} |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/delete/objcopy_609c2613d8f9c513602350c6a8.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/delete/objcopy_e6184a39b8332c221c5cda3c2f.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/delete/objcopy_ffc5f76f7501b8251738448541.o |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/yql_types.pb.{h, cc} |60.7%| [PR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/include/llvm/IR/Attributes.inc{, .d} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_discovery_v1.{pb.h ... grpc.pb.h} |60.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_178e64ce5db822fc6aa8b3e608.o |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/xz/libcpp-streams-xz.a |60.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/ydb-tests-olap |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/delete/ydb-tests-olap-delete |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_0a1f127d9343562caddfbacf79.o |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/libcontrib-libs-tcmalloc.a |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_f9b0feecd0e36f08cbf5c53562.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_b866963286293af0b6f2139fed.o |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/legacy_protobuf/protos/metric_meta.pb.{h, cc} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/compute.pb.{h, cc} |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |60.7%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/tx_datashard.{pb.h ... grpc.pb.h} |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_datastreams_v1.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/protos/fq.pb.{h, cc} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.{pb.h ... grpc.pb.h} |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/libcontrib-libs-tcmalloc.global.a |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/validation.pb.{h, cc} |60.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/11a9f7a6b79b2ef13b0ccdb626_raw.auxcpp |60.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/data_events/write_data.cpp |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |60.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/user_settings_reader.cpp |60.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.{pb.h ... grpc.pb.h} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.{pb.h ... grpc.pb.h} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/user_account.{pb.h ... grpc.pb.h} |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/topic_sdk_test_setup.cpp |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_3ea8aa67e7c24c4f0e3b0406b9.o |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/tools/protobuf_plugin/config_proto_plugin |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/tools/protoc/plugins/grpc_cpp/grpc_cpp |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_51b071d7746089933668451b33.o |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/tools/protoc/plugins/cpp_styleguide/cpp_styleguide |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/lz4/libstreams-lz-lz4.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/libcpp-streams-lz.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/factory/open_by_signature/libstreams-factory-open_by_signature.a |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/medium/objcopy_71b7c7df3e7853e6e7cd11e484.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/medium/objcopy_1583476a2a074be936cf5a393e.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/medium/objcopy_cc203073bb2a03b31e52a78f24.o |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/snappy/libstreams-lz-snappy.a |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_445797246443360525d31550d1.o |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/factory/open_common/libstreams-factory-open_common.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/local_executor/libcpp-threading-local_executor.a |60.7%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/kqp.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/storage.pb.{h, cc} |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/proxy_service.cpp |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_events.pb.{h, cc} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_logstore_v1.{pb.h ... grpc.pb.h} |60.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/c29e775d4210e0bfea21a038e3_raw.auxcpp |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/vector_index/medium/ydb-tests-datashard-vector_index-medium |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/objcopy_c65a9d5efe13dc05c1466090ba.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/objcopy_b8aa61f402be805d2e3e9e75a2.o |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/iam_token_service_subject.{pb.h ... grpc.pb.h} |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/objcopy_d23500649301df2a8de48ba70d.o |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/receive_message.cpp |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_operation_v1.{pb.h ... grpc.pb.h} |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_permissions.cpp |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.{pb.h ... grpc.pb.h} |60.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/f9b9904f5f29323034ee90b36a_raw.auxcpp |60.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/top_keeper/libcpp-containers-top_keeper.a |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.{pb.h ... grpc.pb.h} |60.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.{pb.h ... grpc.pb.h} |60.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/base/query_id.h_serialized.cpp |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.{pb.h ... grpc.pb.h} |60.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/attributes_md5.cpp |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/protos/pgproxy.pb.{h, cc} |60.5%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator_impl.cpp |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_external_data_source/ut_external_data_source.cpp |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/untag_queue.cpp |60.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/aae788a890ddcb1702c659c8aa_raw.auxcpp |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_86ad37399122e504f3e6d8378d.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_9a3dabea847c21e0b4fa4cda26.o |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/commands/libcommands.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/flavours/libpy3tests-library-flavours.global.a |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_e317764e105a7e9e48b67a7b7e.o |60.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/main.cpp |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_5a4a401f33f46c70417a65f584.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_951c70889c9404d1662da27090.o |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator__reserve.cpp |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_cca8dcd66462c9ca3c57fcb78e.o |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/hyperloglog/liblibrary-cpp-hyperloglog.a |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/objcopy_774cbd1f10ee287899289ecb3f.o |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/adaptive/protos/libhistogram-adaptive-protos.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/adaptive/libcpp-histogram-adaptive.a |60.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/aba998449c2518e3272d8e87fb_raw.auxcpp |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_b34c6a8a5501db208eebc5d8e4.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_e32003454342267c2263935765.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_1339ee5ef04af3a5a49d43a6c9.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_6b8c453743f8fd2c5380af70c6.o |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/queue_schema.cpp |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_e68ca1a2fa9943132c020ae028.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_9be8b6745d0fa150928bab4206.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_fdd48fc620c42f480ae38b77f5.o |60.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/38dcacd12926621ca72e30ce1b_raw.auxcpp |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_e3bb1c534d69f237b55dd8dfe7.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_b08299d456f3448b368e814cb8.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_dae5a42f53b4f98bf1b9fd8118.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_4fdbe64ce62f955927d10364b5.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_c5a20cdd9533abc10e82efdd1a.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_8e19d47784789c55156c57f816.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_7a185a4b35de7733fde931d298.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_64cecb639c5f85fbf868097a08.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_93dc3386250916dfae1ecb9b13.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_3d6916930a438b51675ef6dda7.o |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/type_info.{pb.h ... grpc.pb.h} |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/google/benchmark/librestricted-google-benchmark.a |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/data.pb.{h, cc} |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_inference/libydb-library-arrow_inference.a |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.{pb.h ... grpc.pb.h} |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpcds-dbgen/libbenchmarks-gen-tpcds-dbgen.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/queries/tpch/libbenchmarks-queries-tpch.global.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/queries/tpcds/libbenchmarks-queries-tpcds.global.a |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.{pb.h ... grpc.pb.h} |60.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpch-dbgen/libbenchmarks-gen-tpch-dbgen.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpcds-dbgen/libbenchmarks-gen-tpcds-dbgen.global.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/csv/table/libarrow-csv-table.a |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_config_v1.{pb.h ... grpc.pb.h} |60.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/auth_mocks.cpp |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/backup/libkikimr_backup.a |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_scripting_v1.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/connector.pb.{h, cc} |60.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/b04a8a4d880a6ab0d69f34e52c_raw.auxcpp |60.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/snapshot.pb.{h, cc} |60.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_export_v1.{pb.h ... grpc.pb.h} |60.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_transport.pb.{h, cc} |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/execute.cpp |60.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/activation.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ymq.pb.{h, cc} |60.6%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/upload_rows_counters.h_serialized.cpp |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator__scheme.cpp |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/benchmark_base/liblibrary-workload-benchmark_base.a |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/clickbench/liblibrary-workload-clickbench.global.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/mixed/liblibrary-workload-mixed.global.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/query/liblibrary-workload-query.global.a |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.{pb.h ... grpc.pb.h} |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/mixed/liblibrary-workload-mixed.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/vector/liblibrary-workload-vector.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/log/liblibrary-workload-log.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpc_base/liblibrary-workload-tpc_base.global.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpc_base/liblibrary-workload-tpc_base.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/liblibrary-workload-tpch.global.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/clickbench/liblibrary-workload-clickbench.a |60.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/libydb_cli-commands-interactive.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/libcommands-interactive-highlight.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/complete/libcommands-interactive-complete.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/transfer_workload/libtransfer_workload.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/stat_visualization/libpublic-lib-stat_visualization.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/topic_workload/libtopic_workload.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/color/libinteractive-highlight-color.a |60.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/liblibrary-workload-tpch.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcds/liblibrary-workload-tpcds.global.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/pire/libcpp-regex-pire.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcds/liblibrary-workload-tpcds.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/topic/libtopic.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/liblib-ydb_cli-dump.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/query/liblibrary-workload-query.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/import/liblib-ydb_cli-import.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/log/liblibrary-workload-log.global.a |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/cms/libsrc-client-cms.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/vector/liblibrary-workload-vector.global.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/debug/libsrc-client-debug.a |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/objcopy_44fac4fe441507735704a000ad.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_7648c2519d02b8456f762efc4b.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/objcopy_7c328c2741f9dd7697a2e0e8b1.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_31d605682329607481eb568ed0.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/objcopy_994fcbd53c4e2174c302bdb5ab.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_245adf3e28f56e6467e034d9f2.o |60.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/abstract/abstract.h_serialized.cpp |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/data_events/columnshard_splitter.cpp |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_cluster_discovery.pb.{h, cc} |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/monitoring/libsrc-client-monitoring.a |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/dq_io.pb.{h, cc} |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/events_writer.cpp |60.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/a2dcc326c51839151e5bfd92cd_raw.auxcpp |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.{pb.h ... grpc.pb.h} |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_formats.pb.{h, cc} |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/result.cpp |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcc/liblibrary-workload-tpcc.a |60.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/ssa.pb.{h, cc} |60.6%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus.{pb.h ... grpc.pb.h} |60.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/message_delay_stats.cpp |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.{pb.h ... grpc.pb.h} |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/timezone_conversion/liblibrary-cpp-timezone_conversion.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/punycode/libcpp-unicode-punycode.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/tld/liblibrary-cpp-tld.a |60.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/ydb-dump.cpp |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |60.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_3bdea7737a87c43bfaa0aaf4c3.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_1ab2a5a6dd84a6c9ff5d5c50b0.o |60.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/workload-transfer-topic-to-table.cpp |60.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/supported_codecs_fixture.cpp |60.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/workload-topic.cpp |60.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/supported_codecs.cpp |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_6887bde1dc99f5c5c2f0922842.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_7211c23d9494c46f0f60063e9e.o |60.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_48a08121f0a68da2f2666b0341.o |60.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_791e2f78c18891d943ecce5e41.o |60.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/run_ydb.cpp |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/get_queue_url.cpp |60.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.{pb.h ... grpc.pb.h} |60.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bridge.{pb.h ... grpc.pb.h} |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/actor.cpp |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/antlr_ast/gen/v1_ansi_antlr4/libantlr_ast-gen-v1_ansi_antlr4.a |60.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydbd/export.cpp |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/antlr4/libv1-complete-antlr4.a |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/ut_sequence/dsproxy_config_retrieval.cpp |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/ranking/libname-service-ranking.global.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/core/libv1-complete-core.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/libsql-v1-complete.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/local/libcomplete-analysis-local.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/reflect/libsql-v1-reflect.global.a |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_dead_letter_source_queues.cpp |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_pure_ansi/libv1-lexer-antlr4_pure_ansi.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/reflect/libsql-v1-reflect.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/antlr_ast/gen/v1_antlr4/libantlr_ast-gen-v1_antlr4.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/regex/libv1-lexer-regex.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_pure/libv1-lexer-antlr4_pure.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/global/libcomplete-analysis-global.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/highlight/libsql-v1-highlight.global.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/ranking/libname-service-ranking.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/cache/local/libname-cache-local.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/cached/libobject-simple-cached.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/cache/libcomplete-name-cache.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/highlight/libsql-v1-highlight.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/libcomplete-name-object.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/libname-object-simple.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/binding/libname-service-binding.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/impatient/libname-service-impatient.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/column/libname-service-column.a |60.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydb/ydb |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/libcomplete-name-service.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/text/libv1-complete-text.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/static/libobject-simple-static.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/syntax/libv1-complete-syntax.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/union/libname-service-union.a |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/schema/libname-service-schema.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/static/libname-service-static.global.a |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/static/libname-service-static.a |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/sentinel_ut_unstable.cpp |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/sentinel_ut.cpp |60.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/common.cpp |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/hive_metastore/libcore-external_sources-hive_metastore.a |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_ut_common.cpp |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tornado/tornado-4/libpy3python-tornado-tornado-4.global.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gbenchmark/libcpp-testing-gbenchmark.a |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tornado/tornado-4/libpy3python-tornado-tornado-4.a |60.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/main.cpp |60.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_client_ut.cpp |60.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pg_ydb_proxy.cpp |60.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pgwire.cpp |60.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pg_ydb_connection.cpp |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/executor.cpp |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_ut_common.cpp |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/schema.cpp |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/error.cpp |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_2b682e146a665bfa19210b0fd9.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_e0aef87c4bf15cfdc957f4bdd1.o |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/example/ydb-tests-example |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_65ac58c27d43a55d0ea4eda626.o |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/compatibility/libpy3tests-library-compatibility.global.a |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_b7f5600f224f7d7aa608ada59e.o |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_c623700776b43ee95ec93c56f9.o |60.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |60.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_9beede1c5ddb1a5202bb8125bf.o |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/hive_metastore/hive_metastore_native/libexternal_sources-hive_metastore-hive_metastore_native.a |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/ydb/ut/parse_command_line.cpp |60.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql_compile_ut.cpp |60.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/ut/metadata_conversion.cpp |60.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema_ut.cpp |60.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/external_sources/s3/ut/s3_aws_credentials_ut.cpp |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/backup/impl/local_partition_reader_ut.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/view/view_ut.cpp |60.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_serialization.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/ydbd/main.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet_ut.cpp |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_fetcher_ut.cpp |60.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_kafka_functions.cpp |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/ut_helpers/liblibs-quota_manager-ut_helpers.a |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_scheme_ut.cpp |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |60.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/util_ut.cpp |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql_ut.cpp |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/ut/queue_id_ut.cpp |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/ut/params_ut.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/prctl/libpy3library-python-prctl.global.a |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/kafka_test_client.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp |60.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/prctl/libpy3library-python-prctl.a |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/utils/types/libpy3tests-utils-types.global.a |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_query_ut.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_s3_plan_ut.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/test/libs/rows/libtest-libs-rows.a |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/make_config.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/export_s3_buffer_ut.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/immediate_controls_configurator_ut.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_actor.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/s3/s3_recipe_ut_helpers.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/ut_produce_actor.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/modifications_validator_ut.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/ut_protocol.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/log_settings_configurator_ut.cpp |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/metarequest_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/feature_flags_configurator_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/net_classifier_updater_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/ut/batch_slice.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/common/rows_proto_splitter_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/common/iceberg_processor_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_background_compaction.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/ut/ut_splitter.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_comp_gen.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/s3_recipe_helper/liblibrary-testlib-s3_recipe_helper.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/health_check/health_check_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/actors_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceproxy/sequenceproxy_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_charge.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_s3fifo_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/ut_sequence/datashard_ut_sequence.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_recompute_kmeans.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/stub/libudf-service-stub.global.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_init.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/yc_search_ut/index_events_processor_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_compaction.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/breakpad/libydb-library-breakpad.global.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_sample_k.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/keys/libydb-library-keys.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/solomon/actors/ut/ut_helpers.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/yc_search_ut/test_events_writer.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_reshuffle_kmeans.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_prefix_kmeans.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/solomon/actors/ut/dq_solomon_write_actor_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_row_versions_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/long_tx_service/long_tx_service_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_rs.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_secondary_index.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_local_kmeans.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/generic/actors/ut/yql_generic_lookup_actor_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/helper.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_column_stats.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/datetime/libdatetime_udf.global.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/roaring/libroaring.global.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/client/ydb_topic/include/libclient-ydb_topic-include.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/knn/libknn_udf.global.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_stats.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_followers.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/ut_sequenceshard.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/ut_helpers.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/query_actor/query_actor_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/compression_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/ydb_convert_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_replication.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_split_merge.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/format_handler_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_pq_reboots/ut_pq_reboots.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/pg/pg_catalog_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/time_cast/time_cast_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/ut_backup_collection_reboots.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_parser_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/proto/libkqprun-src-proto.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/compatibility/binaries/downloader/libpy3compatibility-binaries-downloader.global.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_olap/ut_olap.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_find_split_key.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_cxx_database_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/pgwire/pgwire |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |59.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/compatibility/binaries/downloader/downloader |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ncloud/impl/access_service_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_keys.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/tests/liblibrary-persqueue-tests.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_gclogic_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/range_treap_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/mkql_simple_file/libproviders-common-mkql_simple_file.a |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_filestore_reboots/ut_filestore_reboots.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/ut/ut_script.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/table_description_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_clock_pro_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/ut_external_data_source_reboots.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_part_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_ext_blobs_multiple_channels.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator_client/ut_helpers.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_btree_index_nodes.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_handle_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_background_cleaning/ut_background_cleaning.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_switchable_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_bloom.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_redo.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/ut/ut_program.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_btree_index_iter_charge.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_forward.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator_ext_blobs.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_db_scheme.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_proto.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/address_classification/net_classifier_ut.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/compress_base/lib/libcommon-compress_base-lib.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_compaction.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_compaction_multi.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_decimal.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/compress_base/libcompress_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/re2/libre2_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/digest/libdigest_udf.global.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_read_table.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/yson2/libyson2_udf.global.a |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_db_iface.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/objcopy_484246668d943fbae3b476ec7d.o |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_memtable.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_self.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/datetime2/libdatetime2_udf.global.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_sausage.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_pages.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_part_multi.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_slice_loader.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_part.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_iterator.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_slice.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/ut/objcopy_9f29b589555ed64086e5eadccf.o |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_screen.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_versions.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_stat.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_range_cache_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/keyvalue/grpc_service_ut.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/file/libyt-gateway-file.a |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator_client/actor_client_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_filter_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/topic_reader_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/rate_limiter/rate_limiter_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/inside_ydb_ut/inside_ydb_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/flat_test_db.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore/ut_incremental_restore.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/json_change_record_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_write.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/kqprun.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/kqprun |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_other.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base_reboots/ut_base_reboots.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_failure_injection/ut_failure_injection.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/quoter_performance_test/main.cpp |59.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sdk/cpp/sdk_credprovider/ydb-tests-functional-sdk-cpp-sdk_credprovider |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_compaction/ut_compaction.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/common_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_write_ut.cpp >> downloader::import_test [GOOD] |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_write.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ut_bsvolume_reboots.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_scan.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/ut/ut_sharding.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/federated_topic/ut/basic_usage_ut.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/python/yt/type_info/libpy3python-yt-type_info.global.a |59.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part12/ydb-tests-fq-yt-kqp_yt_file-part12 |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/pg/kqp_pg_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_rename_table_column.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator_volatile_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_data_cleanup.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/metering_sink_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/microseconds_sliding_window_ut.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/python/yt/libpy3yt-python-yt.global.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partitiongraph_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache_actor.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/type_codecs_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/quota_tracker_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/internals_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_move_reboots/ut_move_reboots.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/edaf602b2011baa1519a223d63_raw.auxcpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/utils_ut.cpp |59.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/library/compatibility/binaries/downloader/import_test >> downloader::import_test [GOOD] |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/objcopy_b91160bcee04ad1f57e80af064.o |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/python/yt/yson/libpy3python-yt-yson.global.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/ut/objcopy_1d0482d354dc270d18e7123281.o |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/cache_eviction_ut.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/objcopy_1326afc143d720f2af434cd836.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/objcopy_1007df29dec27b0b7a1587d49f.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_leases_ut.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/utils/libpy3fq-generic-utils.global.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/test/libs/table/libtest-libs-table.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_sequence_reboots/ut_sequence_reboots.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/utils/libpy3connector-tests-utils.global.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_datetime.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/simplejson/py3/libpy3python-simplejson-py3.global.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_3db6af291678d4ac330517956a.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_database_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/transfer |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperscan/libhyperscan_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/lib/libcommon-url_base-lib.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/histogram/libhistogram_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json2/libjson2_udf.global.a |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/static/libcommon-stat-static.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/libstat_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json/libjson_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/ip_base/libip_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperloglog/libhyperloglog_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/pire/libpire_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/ip_base/lib/libcommon-ip_base-lib.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/logs/dsv/libdsv_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/set/libset_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/libtopfreq_udf.global.a |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_1555e67a3dd43a3e7f09bf8eee.o |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scheme/kqp_acl_ut.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/liburl_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/service_mocks/ldap_mock/libtestlib-service_mocks-ldap_mock.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncquorum_ut.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/simplejson/py3/libpy3python-simplejson-py3.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/static/libcommon-topfreq-static.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_488333b1ebd4c1d6d8ec5bcb8f.o |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_node_enumeration_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_bsvolume/ut_bsvolume.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/ut/ymq_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/group_mapper_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_tables_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ut_ycsb.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_81b7879c9cfa37bdcf437f5ff4.o |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_actors_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_write_actor_ut.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_5865a174a6c25ca1a2d6386702.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_c02c3d9f840d02af9fad858a55.o |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/dc048c91e67372877fc6ad2dfc_raw.auxcpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/top/libtop_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/helpers/libclient-oauth2_token_exchange-helpers.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ut_helpers/libpublic-lib-ut_helpers.a |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_bfa810e70cd1de18c5d4a18a62.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_00c87b13e2f685811a9825079d.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_5db899a01c2ec6f53648af6840.o |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_ut_pool.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scheme/kqp_constraints_ut.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_replication_v1.{pb.h ... grpc.pb.h} |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/dread_cache_service/ut/caching_proxy_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |59.8%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/config.pb.{h, cc} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_maintenance.pb.{h, cc} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/validation.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_ut_local.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/access_service.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/24b7f665b4aa031005ffced39b_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/mv_object_map_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |59.8%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_operation.pb.{h, cc} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_ymq_v1.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_coordination_v1.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/9270b323afac6e83cea5bf4aa4_raw.auxcpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_config.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/protos/tx_event.pb.{h, cc} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/test_connection.pb.{h, cc} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/api/http.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/login.pb.{h, cc} |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |59.8%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/9270b323afac6e83cea5bf4aa4_raw.auxcpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/ut/kinesis_ut.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/grouper_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydbd/ydbd |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_keyvalue_v1.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_datashard.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata__intpy3___pb2.py.p5ju.yapyc3 |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation__intpy3___pb2.py{ ... i} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation__intpy3___pb2.py.p5ju.yapyc3 |59.9%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2.py.p5ju.yapyc3 |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.9%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2.py{ ... i} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata__intpy3___pb2.py{ ... i} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/list_all_topics_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/backup/impl/table_writer_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/benchmark/b_part.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/ut/graph_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/issue_message.pb.{h, cc} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/proto/dq_solomon_shard.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/7c1f1e5f5235f3f91f737c08e3_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/common/v1/common.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_proxy.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/backup_ut/list_objects_in_s3_export_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/proxy/ut_helpers.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scheme/kqp_secrets_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/grpc/fq_private_v1.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/8dc9a4d43a36492d7f07bb1764_raw.auxcpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/1fe825be1cf65e9693ebce9341_raw.auxcpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/accessservice/access_service.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/nodes_manager.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/initiator.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_stats.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/backup_ut/encrypted_backup_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_datashard.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/ydb_convert.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_monitoring_v1.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_maintenance_v1.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/api/annotations.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/partition_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/sha256.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/ydb_cli/topic/topic_read_ut.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_coordination.pb.{h, cc} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/backup_ut/backup_path_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.{pb.h ... grpc.pb.h} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |59.9%| [PR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/expr_nodes/yql_yt_expr_nodes.{gen.h ... defs.inl.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compression_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_table_split_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/common/cache_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.{pb.h ... grpc.pb.h} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/pqtablet_ut.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_import_v1.{pb.h ... grpc.pb.h} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/fetch_request_ut.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/dq_effects.pb.{h, cc} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/links.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/protos/blobs.pb.{h, cc} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/ut/ydb-core-client-ut |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/resource.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_backup.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/resource.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/protos/events.pb.{h, cc} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_delete_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/b128a176091d6f4205660cfcb9_raw.auxcpp |60.0%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/selector.pb.{h, cc} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_ut_tenants.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/pqtablet_mock.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/protos/events.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/proto/events.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/sourceid_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/dcebaf9a0d338442b39b25171a_raw.auxcpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_status_codes.pb.{h, cc} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/9ce1d76b66c1825c22ae6402fe_raw.auxcpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.{pb.h ... grpc.pb.h} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/service_account.{pb.h ... grpc.pb.h} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.{pb.h ... grpc.pb.h} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |60.0%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/b128a176091d6f4205660cfcb9_raw.auxcpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |60.0%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/expr_nodes/yql_pg_expr_nodes.{gen.h ... defs.inl.h} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/marker.pb.{h, cc} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_collector_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/tpch/lib/libtests-tpch-lib.global.a |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/tpch/lib/libtests-tpch-lib.a |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.{pb.h ... grpc.pb.h} |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key__intpy3___pb2.py.p5ju.yapyc3 |60.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/209d59416ce59145c1e1d96c2f_raw.auxcpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter_ut.cpp |60.0%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/provider/yql_kikimr_expr_nodes.{gen.h ... defs.inl.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/proxy/proxy_actor_ut.cpp |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical__intpy3___pb2.py.p5ju.yapyc3 |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/partition_chooser_ut.cpp |60.0%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/kqp__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_ut.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats__intpy3___pb2.py{ ... i} |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_dispatcher_ut.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical__intpy3___pb2.py{ ... i} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key__intpy3___pb2.py{ ... i} |60.0%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/kqp__intpy3___pb2.py.p5ju.yapyc3 |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |60.0%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config.{pb.h ... grpc.pb.h} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/protos/aclib.pb.{h, cc} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/operation/operation.{pb.h ... grpc.pb.h} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/quotas_manager.pb.{h, cc} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/user_info_ut.cpp |60.0%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/kqp__intpy3___pb2.py{ ... i} |60.0%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes/yql_expr_nodes.{gen.h ... defs.inl.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/counters_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/pq_ut.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/pathid.{pb.h ... grpc.pb.h} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/services.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/token_accessor.pb.{h, cc} |60.1%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console__intpy3___pb2.py.p5ju.yapyc3 |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_cms.pb.{h, cc} |60.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/c6cd853d298b80d82dda8309af_raw.auxcpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/a173ecb5b713824a01f21a39dc_raw.auxcpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/graph_params/proto/graph_params.pb.{h, cc} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |60.1%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/expr_nodes/yql_generic_expr_nodes.{gen.h ... defs.inl.h} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_columnshard.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_storage.pb.{h, cc} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_update_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/check_integrity.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/pqrb_describes_ut.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group__intpy3___pb2.py{ ... i} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_replication.pb.{h, cc} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/large_results/kqp_scriptexec_results_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/jaeger_tracing_configurator_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/user_settings_names.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/flat_ut.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/checkpoint_coordinator.pb.{h, cc} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_cache_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/tx_proxy_status.cpp |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/proto/logger_config.pb.{h, cc} |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_clickhouse_internal_v1.{pb.h ... grpc.pb.h} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/board_subscriber_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_ut_configs.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/resource_manager.pb.{h, cc} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/locks_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/queue_id.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/monitoring.cpp |60.0%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/fc437004cc347d978fb8cbf231_raw.auxcpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.{pb.h ... grpc.pb.h} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/protos/persqueue.pb.{h, cc} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/protos/event.pb.{h, cc} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.{pb.h ... grpc.pb.h} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/dlq_helpers.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/tx_datashard__intpy3___pb2.py{ ... i} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/create_queue.cpp |60.1%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/node_checkers.h_serialized.cpp |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast__intpy3___pb2.py.p5ju.yapyc3 |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy__intpy3___pb2.py.p5ju.yapyc3 |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast__intpy3___pb2.py{ ... i} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy__intpy3___pb2.py{ ... i} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.{pb.h ... grpc.pb.h} |60.1%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/tx_datashard__intpy3___pb2.py.p5ju.yapyc3 |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.1%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/tx_datashard__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/sink.pb.{h, cc} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_clickhouse_internal.pb.{h, cc} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/common/entity_id_ut.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/proto/yq_internal.pb.{h, cc} |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/d8a0ba19a8c38a9b8166f51a9b_raw.auxcpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore_reboots/ut_incremental_restore_reboots.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/proxy_actor.cpp |60.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/result_formatter/result_formatter_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/cancel_tx_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_base_init.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.{pb.h ... grpc.pb.h} |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_shared.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_watch.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/6ff6cb7fc73023cd7bac8e9866_raw.auxcpp |60.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part18/ydb-tests-fq-yt-kqp_yt_file-part18 |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_debug.pb.{h, cc} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_monitoring.pb.{h, cc} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/object_storage_listing_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_grpc.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/index_events_processor.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_topic.pb.{h, cc} |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/objcopy_a6e393b6d53f4c73feac80b55c.o |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_e2acb41e7099c0db4fe54a1587.o |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_cf3971576aced18377e99f5367.o |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/objcopy_89b3e69f7cdba68b4eefcae48c.o |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.{pb.h ... grpc.pb.h} |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_7c81cbfa6b5ce112674cb0a849.o |60.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/13360e4ecdf34efe6c3a817a44_raw.auxcpp |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/libpy3s3_backups.global.a |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/objcopy_4b2e093abff756c97b675c0a31.o |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/objcopy_4508aef343f36758ea760320db.o |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs__intpy3___pb2.py{ ... i} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/delete_message.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_persqueue_v1.{pb.h ... grpc.pb.h} |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache__intpy3___pb2.py.p5ju.yapyc3 |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/protos/operation_id.pb.{h, cc} |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_federation_discovery_v1.{pb.h ... grpc.pb.h} |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/delete_user.cpp |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/auth.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/rpc.{pb.h ... grpc.pb.h} |60.1%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp |60.1%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/expr_nodes/yql_res_expr_nodes.{gen.h ... defs.inl.h} |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_proto_split/SQLv1Parser.pb.{code0.cc ... main.h} |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/auth_multi_factory.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/table_settings.cpp |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/queue_attributes.cpp |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/storage.pb.{h, cc} |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.{pb.h ... grpc.pb.h} |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/fq_config.pb.{h, cc} |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_0aefef587c181350d3a25f70e0.o |60.1%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.{pb.h ... grpc.pb.h} |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/s3_backups/s3_backups |60.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_c068ee86eb127df13256bfbe45.o |60.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_f3c323ef80ada193284f036d44.o |60.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_af18efc2f04dd1af5ca802c329.o |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.{pb.h ... grpc.pb.h} |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.{pb.h ... grpc.pb.h} |60.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_87b299e07b15c86f4f50f458ef.o |60.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_965640ca94893d27c182c611e2.o |60.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/36583b0197800233ec9e3729e1_raw.auxcpp |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/fq_private.pb.{h, cc} |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/gateways_config.pb.{h, cc} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_query_v1.{pb.h ... grpc.pb.h} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events.cpp |60.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |60.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |60.2%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/enum_parser/enum_parser/enum_parser |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/log.cpp |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_tablet.pb.{h, cc} |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.{pb.h ... grpc.pb.h} |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.{pb.h ... grpc.pb.h} |60.2%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc.{pb.h ... grpc.pb.h} |60.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/service.pb.{h, cc} |60.0%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/generated/dispatch_op.h |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/datastreams.pb.{h, cc} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_keyvalue.pb.{h, cc} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_object_storage.pb.{h, cc} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/dynamic_node.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_logstore.pb.{h, cc} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/error.cpp |60.1%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_reader/contexts.h_serialized.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/tests/kikimr_tpch/kqp_tpch_ut.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/change_visibility.cpp |60.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |60.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/etcd_proxy/service/etcd_impl.cpp |60.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/queue_leader.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |60.0%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/configurator.cpp |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/objcopy_4f055c289b3de8f2a1e827ae5c.o |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/schema.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/run_query.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/metering.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/send_message.cpp |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/library/libpy3tools-nemesis-library.global.a |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_c98e5b95c64b8486a12f10d408.o |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_927a1f7611cf94fb1cd21ef8cf.o |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_b06d27009e49b9ba3df883a226.o |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |60.1%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/kikimr.cpp |60.0%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/compute_actor/kqp_compute_state.h_serialized.cpp |60.0%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/manager.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/action.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/cfg.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/node_tracker.cpp |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/recipe/libpy3kqprun_recipe.global.a |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/recipe/objcopy_dcbdf62672440a626e79a64e14.o |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/libpy3oltp_workload.global.a |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/objcopy_bcf2142e31bf537964dc063d11.o |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/objcopy_589315062f5401a368910248f0.o |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/objcopy_61613f0bd98876f149d8574891.o |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/objcopy_c114cbf6b820d92320c1e2c912.o |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/retention.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/sessions.pb.{h, cc} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/get_queue_attributes.cpp |60.1%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal.{pb.h ... grpc.pb.h} |60.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/0df74f5c3f7eead9a8ec0077f0_raw.auxcpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/compression.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/yandex_passport_cookie.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/dq_io_state.pb.{h, cc} |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/index.pb.{h, cc} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/read_actors_factory.pb.{h, cc} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_bulk_upsert_ut.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_object_storage_v1.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.{pb.h ... grpc.pb.h} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_bulk_upsert_olap_ut.cpp |60.1%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/data_events/shards_splitter.cpp |60.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.{pb.h ... grpc.pb.h} |60.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/datetime/libdatetime_udf.so |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/data.pb.{h, cc} |60.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/e242d0eae6b82a1b268997c584_raw.auxcpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/auth_factory.cpp |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/delete_queue.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/local_rate_limiter_allocator.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/issue_id.pb.{h, cc} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/accessservice/access.{pb.h ... grpc.pb.h} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/reference.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/proto/graph_description.pb.{h, cc} |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_login_ut.cpp |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_queue_tags.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_stats_ut.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.{pb.h ... grpc.pb.h} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/field_transformation.pb.{h, cc} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.{pb.h ... grpc.pb.h} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.{pb.h ... grpc.pb.h} |60.0%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_monitoring_ut.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_object_storage_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_logstore_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/main.cpp |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_83efacabe56767ae4f106a6d27.o |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/proxy.cpp |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_f738234258cd034cd5383f92ad.o |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/api/field_behavior.{pb.h ... grpc.pb.h} |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_2f0e0ac8198858b9ec9901778e.o |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.global.a |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_import_ut.cpp |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/sql/ydb-tests-sql |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_rate_limiter.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_index_table_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/libetcd-grpc.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_coordination_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_ldap_login_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/pire/libpire_udf.so |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore_reboots/schemeshard-ut_incremental_restore_reboots |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/direct_read_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/describes_ut/ic_cache_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/actors/test_runtime_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_query_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/describes_ut/describe_topic_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_register_node_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_olapstore_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_scripting_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_read_rows_ut.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/liblibrary-cpp-lfalloc.a |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/fifo_cleanup.cpp |60.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/docs/generator/generator |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_table_ut.cpp |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/init.h_serialized.cpp |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/create_user.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/actor_bootstrapped_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/service.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/tag_queue.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/set_queue_attributes.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/backpressure.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/dynamic_prototype/libcpp-protobuf-dynamic_prototype.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/yql/libcpp-protobuf-yql.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/clickbench/ydb-tests-functional-clickbench |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/re2/libre2_udf.so |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/ydb/ut/ydb_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/actor_tracker_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/bind_queue_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/actor_activity_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/objcopy_afb48e06933bdee6c5245db82e.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/objcopy_10b0cfa01297f7d7392eb4d9e4.o |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/operation_helpers_ut.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/objcopy_b306c2955ce13e6db6cae73363.o |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/storagepoolmon/ut/storagepoolmon_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/rpc_calls_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/control/immediate_control_board_actor_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/test_interconnect_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/phantom_blobs.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/http/ut/xml_builder_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_utils_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic_ut.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/docs/generator/libpy3olap-docs-generator.global.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/test_protocols_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/memory_controller/memtable_collection_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_resource_tree_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_ut_trace.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |59.6%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/datetime2/libdatetime2_udf.so |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/actors/ut/database_resolver_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/query_stats/query_stats_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_named_expressions_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_agg_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/ut/grpc/libgrpc_streaming-ut-grpc.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_not_null_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/string/libstring_udf.so |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_sort_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/group_size_in_units.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scan/kqp_scan_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scan/kqp_point_consolidation_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_import/ydb-tests-fq-yt-kqp_yt_import |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scan/kqp_split_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tablet_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/perf/kqp_query_perf_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/cost/kqp_cost_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/memory_controller/memory_controller_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/effects/kqp_write_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/effects/kqp_effects_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/slow/pq_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/effects/kqp_immediate_effects_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_ne_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/effects/kqp_inplace_update_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/ut_helpers.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/auto_config_initializer_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_kqp.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_rm_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/certificate_check/cert_utils_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_common.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/read_attributes_utils_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_counters.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/idx_test/ydb_index_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/kesus_quoter_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/certificate_check/cert_check_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_provider_ut.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/objcopy_5fddfa8f171a3216cad65e02ab.o |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_col_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/json_proto_conversion_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scan/kqp_flowcontrol_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_view_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/database/ut/ut_database.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_extract_predicate_unpack_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_move/ut_move.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_merge_ut.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_streaming/grpc_streaming_ut.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/etcd_proxy/etcd_proxy |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/ut/ut_aggregation/ut_aggregate_statistics.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_errors.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_labeled.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/wrappers/s3_wrapper_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_rw/ut_normalizer.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_trace.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_backup/ut_backup.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_commit_redo_limit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/ut/ut_basic_statistics.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_stream_lookup.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/ut/ut_http_request.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_info_types.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/common_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_base.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_pg_types.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/storage_tenant_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/subscriber_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_rw/ut_columnshard_read_write.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_rtmr/ut_rtmr.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/join/kqp_index_lookup_join_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/target_discoverer_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/ut_fat/blobstorage_node_warden_ut_fat.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_object_storage_listing.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_external_table/ut_external_table.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/worker_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/join/kqp_flip_join_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/join/kqp_join_ut.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/table_writer_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/http_router_ut.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/populator_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/cloud_events_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_prefixed_vector_ut.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/idx_test/libpublic-lib-idx_test.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_locks.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_ru_calculator/ut_ru_calculator.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_login/ut_login.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_olap_reboots/ut_olap_reboots.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ut_subdomain_reboots.cpp |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/control/ut/ydb-core-control-ut |59.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/base/cloud_enums.h_serialized.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ext_tenant_ut.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/ut_user_attributes_reboots.cpp |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/row_dispatcher.pb.{h, cc} |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export.cpp |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/common.pb.{h, cc} |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_vector_ut.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ut_cdc_stream_reboots.cpp |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/events.pb.{h, cc} |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ut_rtmr_reboots.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/functions_executor_wrapper.cpp |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.{pb.h ... grpc.pb.h} |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.{pb.h ... grpc.pb.h} |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/topic_description.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_column_build/ut_column_build.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup/ut_continuous_backup.cpp |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/olap/high_load/read_update_write.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_common_pq.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_index_build_reboots.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/infly.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/kqp_mock.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_decimal_types.cpp |59.3%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/expr_nodes/kqp_expr_nodes.{gen.h ... defs.inl.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.{pb.h ... grpc.pb.h} |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/160c5542e9615810f3f86fe955_raw.auxcpp |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/task_command_executor.pb.{h, cc} |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.{pb.h ... grpc.pb.h} |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/acl.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/stream_creator_ut.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_sysview/ut_sysview.cpp |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_bridge_v1.{pb.h ... grpc.pb.h} |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_common.pb.{h, cc} |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_auditsettings/ut_auditsettings.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/02d68a63e2a652986bebc94975_raw.auxcpp |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/dst_creator_ut.cpp |59.3%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/160c5542e9615810f3f86fe955_raw.auxcpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/stress_tool/device_test_tool_ut.cpp |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_error_codes.pb.{h, cc} |59.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/1d4add45df109108c6e0289305_raw.auxcpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_volatile.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/issue_id.pb.{h, cc} |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |59.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/fq/ut_integration/ut_utils.cpp |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local__intpy3___pb2.py{ ... i} |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test__intpy3___pb2.py{ ... i} |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.{pb.h ... grpc.pb.h} |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters__intpy3___pb2.py{ ... i} |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme__intpy3___pb2.py{ ... i} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp >> ydb-tests-functional-clickbench::import_test [GOOD] |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_rw/ut_backup.cpp |59.2%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/160c5542e9615810f3f86fe955_raw.auxcpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_range_ops.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/protobuf_udf/libessentials-minikql-protobuf_udf.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain/ut_extsubdomain.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.{pb.h ... grpc.pb.h} |59.3%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/tools/protoc/protoc |59.3%| [EN] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/stock.h_serialized.{cpp, h} |59.3%| [EN] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/kv.h_serialized.{cpp, h} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_returning_ut.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/purge_queue.cpp |59.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp |59.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/import_test >> ydb-tests-functional-clickbench::import_test [GOOD] |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/gateways.pb.{h, cc} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_backup_collection/ut_backup_collection.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/ut/ut_column_statistics.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/data/kqp_read_null_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/perf/kqp_workload_ut.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_sqlin_ut.cpp |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.{pb.h ... grpc.pb.h} |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/retry_config.pb.{h, cc} |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config__intpy3___pb2.py{ ... i} |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_sequence/ut_sequence.cpp |59.2%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2.py{ ... i} |59.2%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.3%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/slow/autopartitioning_ut.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config__intpy3___pb2.py{ ... i} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_ranges_ut.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/network/libessentials-utils-network.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/fqrun/fqrun.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/topic_service_ut.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_kqp.cpp |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/demo_tx.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_ut.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/partition_end_watcher_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/topic_yql_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_common_ut.cpp >> generator::import_test [GOOD] |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/file/libfile_udf.global.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/pqtablet_mock.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_fixture.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/protobuf/libprotobuf_udf.global.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/streaming/libstreaming_udf.global.a |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay_yt/main.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |59.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/docs/generator/import_test >> generator::import_test [GOOD] |59.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/b3824ec07381bf48b3130c7ba9_raw.auxcpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/0ec1f6840e6475509b66e2aab5_raw.auxcpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/connector.{pb.h ... grpc.pb.h} |59.3%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/expr_nodes/dq_expr_nodes.{gen.h ... defs.inl.h} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/modify_permissions.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/proto/result_set_meta.pb.{h, cc} |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/console__intpy3___pb2.py{ ... i} |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yaml_config/protos/config.pb.{h, cc} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.{pb.h ... grpc.pb.h} |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache__intpy3___pb2.py{ ... i} |59.3%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/runtime_feature_flags.h |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/dynamic_config/dynamic_config_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/probes.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/protos/events.pb.{h, cc} ------- [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/kqp__intpy3___pb2.py{ ... i} ydb/core/protos/kqp.proto:15:1: warning: Import ydb/public/api/protos/ydb_topic.proto is unused. |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.{pb.h ... grpc.pb.h} |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/fqrun/fqrun |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/persqueue_error_codes_v1.pb.{h, cc} |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/config__intpy3___pb2.py{ ... i} |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/tx_datashard__intpy3___pb2.py{ ... i} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.{pb.h ... grpc.pb.h} >> ydb-tests-fq-yt-kqp_yt_import::import_test [GOOD] |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_discovery.pb.{h, cc} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/sensitive.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/fq/ut_integration/fq_ut.cpp |59.3%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/8760973cadf4aa816a9d37589f_raw.auxcpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_compat_ut.cpp |59.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/160c5542e9615810f3f86fe955_raw.auxcpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/protos/config.pb.{h, cc} |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/console.{pb.h ... grpc.pb.h} |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2.py{ ... i} |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/blobstorage_vdisk_internal.{pb.h ... grpc.pb.h} |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2.py{ ... i} |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/grpc.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay_yt/query_replay.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/proto/records.pb.{h, cc} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_view_v1.{pb.h ... grpc.pb.h} |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/serverless_proxy_config.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/oauth_request.{pb.h ... grpc.pb.h} |59.3%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/8760973cadf4aa816a9d37589f_raw.auxcpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.{pb.h ... grpc.pb.h} |59.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/kqp.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/first_class_src_ids_ut.cpp ------- [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/grpc__intpy3___pb2.py{ ... i} ydb/core/protos/grpc.proto:6:1: warning: Import ydb/core/protos/msgbus_kv.proto is unused. |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_ut.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.{pb.h ... grpc.pb.h} |59.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_import/import_test >> ydb-tests-fq-yt-kqp_yt_import::import_test [GOOD] |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_cms_v1.{pb.h ... grpc.pb.h} |59.3%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/base/generated/runtime_feature_flags.h |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/grpc/persqueue.{pb.h ... grpc.pb.h} |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/counters.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/actors.pb.{h, cc} |59.3%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.3%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_auth.pb.{h, cc} |59.4%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/console__intpy3___pb2.py.p5ju.yapyc3 |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/accessor.pb.{h, cc} |59.3%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/console_config.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.{pb.h ... grpc.pb.h} |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine__intpy3___pb2.py{ ... i} |59.3%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/msgbus.{pb.h ... grpc.pb.h} |59.3%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/kqp__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon__intpy3___pb2.py{ ... i} |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine__intpy3___pb2.py.p5ju.yapyc3 |59.4%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |59.4%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/kqp__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/config__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/tx_datashard__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/key_range.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/protos/viewer.pb.{h, cc} |59.3%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/tx_datashard__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_rate_limiter_v1.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/f5842ce240ac3a1b94dd2c1f2e_raw.auxcpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/unittests.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/queues_list_reader.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.{pb.h ... grpc.pb.h} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |59.4%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus__intpy3___pb2.py{ ... i} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/task.pb.{h, cc} |59.4%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |59.3%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/b128a176091d6f4205660cfcb9_raw.auxcpp |59.3%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/fc437004cc347d978fb8cbf231_raw.auxcpp |59.4%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/a14bb0205af400981ce9e41033_raw.auxcpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_issue_message.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/schemereq_ut.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/grpc/api.{pb.h ... grpc.pb.h} |59.4%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/query_replay_yt/query_replay_yt |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/protos/data.pb.{h, cc} |59.4%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/tx_datashard.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_value.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/iam_token.{pb.h ... grpc.pb.h} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/fields.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/f58beed9514e65def82ad7e2a5_raw.auxcpp |59.4%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/grpc__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_status_codes.pb.{h, cc} |59.4%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.{gen.h ... defs.inl.h} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.{pb.h ... grpc.pb.h} |59.3%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/grpc__intpy3___pb2.py.p5ju.yapyc3 |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_state_load_plan.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/audit.pb.{h, cc} |59.4%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/udf_resolver.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scheme.pb.{h, cc} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/85072bb936b0763f4b03040c4c_raw.auxcpp |59.4%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/dbc97ea0b10e1a7b45035ce1e1_raw.auxcpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/rate_limiter.pb.{h, cc} |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color__intpy3___pb2.py.p5ju.yapyc3 |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_v1.pb.{h, cc} |59.4%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/viewer/protos/viewer.pb.{h, cc} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.{pb.h ... grpc.pb.h} |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay_yt/query_compiler.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py{ ... i} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color__intpy3___pb2.py{ ... i} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk__intpy3___pb2.py{ ... i} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/5f8a81cdd9d2daa4eaa6ef0775_raw.auxcpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |59.4%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py.p5ju.yapyc3 |59.4%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/expr_nodes/dqs_expr_nodes.{gen.h ... defs.inl.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/helpers.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/iam_token_service.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/e6ce42a762195cf7e946ca411e_raw.auxcpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_queues.cpp |59.4%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/minikql.{pb.h ... grpc.pb.h} |59.4%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/msgbus__intpy3___pb2.py{ ... i} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/ut/grpc/streaming_service.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/protos/graph.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/operations.{pb.h ... grpc.pb.h} |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_b8d63b589074145793d63c27a3.o |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_f928a40774b17a9d6cd7cabd2c.o |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/849c58233edc33539cbeb93a31_raw.auxcpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_bf578b7161cc94bf18488d04ca.o |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_e7477203b27fa0321cf18fd7ee.o |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_tablet_v1.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/query.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_federation_discovery.pb.{h, cc} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/afa8c81460609cfd6247d9dbf7_raw.auxcpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |59.4%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/e6ce42a762195cf7e946ca411e_raw.auxcpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/e39200e8838ea19804ddbf1a59_raw.auxcpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/blobstorage_config.pb.{h, cc} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_api.pb.{h, cc} |59.4%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/afa8c81460609cfd6247d9dbf7_raw.auxcpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/cleanup_queue_data.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service_ut.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/proto/storage_meta.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/proto/quota_internal.pb.{h, cc} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.{pb.h ... grpc.pb.h} |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap__intpy3___pb2.py.p5ju.yapyc3 |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |59.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp |59.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/b128a176091d6f4205660cfcb9_raw.auxcpp |59.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/fc437004cc347d978fb8cbf231_raw.auxcpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap__intpy3___pb2.py{ ... i} |59.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/a14bb0205af400981ce9e41033_raw.auxcpp |59.4%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.{pb.h ... grpc.pb.h} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper__intpy3___pb2.py{ ... i} |59.5%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper__intpy3___pb2.py.p5ju.yapyc3 |59.4%| [PB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/config.{pb.h ... grpc.pb.h} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.{pb.h ... grpc.pb.h} |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base__intpy3___pb2.py{ ... i} |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_config__intpy3___pb2.py{ ... i} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant__intpy3___pb2.py{ ... i} |59.5%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/dbc97ea0b10e1a7b45035ce1e1_raw.auxcpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config__intpy3___pb2.py{ ... i} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.{pb.h ... grpc.pb.h} |59.4%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/yql/libcomplete-analysis-yql.a |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/cluster/static/libname-cluster-static.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/datastreams/datastreams_ut.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/cluster/libname-service-cluster.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/check/libv1-complete-check.a |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/docker/libpy3contrib-python-docker.global.a |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.{pb.h ... grpc.pb.h} |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/check/libv1-lexer-check.a |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/issue_id.{pb.h ... grpc.pb.h} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/essentials/tools/sql2yql/sql2yql |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/metering.h_serialized.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/encrypted_storage_ut.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yql/essentials/tools/sql2yql/sql2yql.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |59.5%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/src/common.h_serialized.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/objcopy_8db6616d40f8020d0632222fe3.o |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/objcopy_9314464e3560b2511ac931acd9.o |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy_ut.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_b9f2edaa5324f618808de2a972.o |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_112302f377365c2eb2333f817f.o |59.5%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/cpp_style_checker/cpp_style_checker |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/objcopy_8fca143a218b930f297b779e3a.o |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_e5d897582dc0fbda7c578cb53f.o |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |59.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/8518bc2ed0b218aec7a0a95428_raw.auxcpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/protos/container.pb.{h, cc} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |59.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/4ddd0d26de267b7b48b2409480_raw.auxcpp |59.5%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/8750dadd4a699d4221d697bf03_raw.auxcpp |59.5%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_export.pb.{h, cc} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.{pb.h ... grpc.pb.h} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/5732afb9e28f3811e12c561eb5_raw.auxcpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/18547f9635bde59b5840161212_raw.auxcpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/cms/cms_ut.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/kv.{pb.h ... grpc.pb.h} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/service_account_service.{pb.h ... grpc.pb.h} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |60.2%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |60.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/column_families.cpp |60.3%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/table_description.cpp |60.7%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/5732afb9e28f3811e12c561eb5_raw.auxcpp |61.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |61.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health__intpy3___pb2.py{ ... i} |61.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health__intpy3___pb2_grpc.py.p5ju.yapyc3 |61.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_1c0f807c059fe226699115f242.o |61.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_790c6ea4aad5e761d21421b25d.o |61.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq__intpy3___pb2.py{ ... i} |61.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_16842d72ae0dac1856818f841e.o |61.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv__intpy3___pb2_grpc.py.p5ju.yapyc3 |61.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq__intpy3___pb2.py.p5ju.yapyc3 |61.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv__intpy3___pb2.py{ ... i} |61.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health__intpy3___pb2.py.p5ju.yapyc3 |61.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv__intpy3___pb2.py.p5ju.yapyc3 |62.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |62.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |62.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/secure_protobuf_printer.cpp |62.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_40779f0570229cef213050a4fa.o |62.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_14c03c6aecffbe39cb01ddf2ed.o |62.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/table_profiles.cpp |62.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_d52256d4fa9895f38df6030445.o |62.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/160c5542e9615810f3f86fe955_raw.auxcpp |63.1%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/b128a176091d6f4205660cfcb9_raw.auxcpp |63.1%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp |63.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/dbc97ea0b10e1a7b45035ce1e1_raw.auxcpp |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |63.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/purge.cpp |63.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/base/generated/libcore-base-generated.a |63.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp |64.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/ut/ut_ext_index.cpp |64.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_users.cpp |64.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/fc437004cc347d978fb8cbf231_raw.auxcpp |64.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/a14bb0205af400981ce9e41033_raw.auxcpp |64.3%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/160c5542e9615810f3f86fe955_raw.auxcpp |64.3%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/dbc97ea0b10e1a7b45035ce1e1_raw.auxcpp |64.3%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/grpc__intpy3___pb2.py{ ... i} |64.3%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/b128a176091d6f4205660cfcb9_raw.auxcpp |64.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/grpc__intpy3___pb2.py.p5ju.yapyc3 |64.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |64.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/libcore-base-generated.a |64.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/grpc__intpy3___pb2_grpc.py.p5ju.yapyc3 |64.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |64.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/4399546af28cb40e5d74ea4a4b_raw.auxcpp |64.3%| [PB] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/protos/config.pb.{h, cc} |64.3%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp |64.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2.py.p5ju.yapyc3 |64.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming |64.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |64.3%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/metering.h_serialized.cpp |64.3%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/fc437004cc347d978fb8cbf231_raw.auxcpp |64.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/objcopy_49bad8251d240ad7c49d384b91.o |64.3%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/a14bb0205af400981ce9e41033_raw.auxcpp |64.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/objcopy_181bdcd1743e9a1a78fafe4b60.o |64.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/objcopy_49e9948af399bc60603a7d2db5.o |64.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/garbage_collector.cpp |64.5%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal.{pb.h ... grpc.pb.h} |64.7%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2.py.p5ju.yapyc3 |64.9%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2_grpc.py.p5ju.yapyc3 |65.0%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console_config.{pb.h ... grpc.pb.h} |65.0%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/tx_datashard__intpy3___pb2_grpc.py.p5ju.yapyc3 |65.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/lib/ydb_cli/common/libcommon.a |65.1%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/viewer/protos/viewer.pb.{h, cc} |65.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part0/ydb-tests-fq-yt-kqp_yt_file-part0 |65.1%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/tx_datashard__intpy3___pb2.py.p5ju.yapyc3 |65.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/count_queues.cpp |65.1%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/config__intpy3___pb2.py.p5ju.yapyc3 |65.2%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/kqp__intpy3___pb2.py.p5ju.yapyc3 |65.2%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/config__intpy3___pb2_grpc.py.p5ju.yapyc3 |65.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/data_events/shard_writer.cpp |65.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_253d734e8c901d319d84fcc6e9.o |65.2%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console__intpy3___pb2.py.p5ju.yapyc3 |65.2%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py.p5ju.yapyc3 |65.2%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/runtime_feature_flags.h |65.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_363b5875cc5c5e5745458b16b8.o |65.2%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |65.2%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2.py{ ... i} |65.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_303f7409bfab4277e367bbd11a.o |65.3%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console__intpy3___pb2.py{ ... i} |65.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_e2a089b95d9316f6e26025d3e3.o |65.4%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/serverless_proxy_config.{pb.h ... grpc.pb.h} |65.5%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/grpc.{pb.h ... grpc.pb.h} |65.5%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/config__intpy3___pb2.py{ ... i} |65.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |65.8%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/tx_datashard__intpy3___pb2.py{ ... i} |65.9%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |65.9%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/kqp__intpy3___pb2_grpc.py.p5ju.yapyc3 |66.0%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2.py{ ... i} |66.1%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console.{pb.h ... grpc.pb.h} |66.1%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/kqp__intpy3___pb2.py{ ... i} |66.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/d78d0f74a3f72be1016c0cf8cf_raw.auxcpp |66.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |66.2%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/msgbus.{pb.h ... grpc.pb.h} |66.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/objcopy_4352b8b3e3cf61532c865b371b.o |66.2%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |66.2%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/tx_datashard.{pb.h ... grpc.pb.h} |66.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/library/libpy3tests-postgres_integrations-library.global.a |66.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/objcopy_3ddbad334a37a829b3772ddb05.o |66.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/objcopy_95b3eecc97c453f0c55c456659.o |66.5%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/kqp.{pb.h ... grpc.pb.h} |67.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |67.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_08f7acdb6eb761b28bf6990862.o |67.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |67.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |67.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_c7c229be41e9b028572ad1aab3.o |67.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/workload/libpy3stress-cdc-workload.global.a |67.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_8e57113197bb359e3999b04aab.o |67.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |67.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_5b5c3367c789898aa5a6cae866.o |67.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_d0255dda539959b69d421868a2.o |67.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_b031a661ba244dffa03ab0c7ec.o |67.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_96b8686cd075e874d95d4aa5c5.o |67.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_6a5c78aa9f679a0920be5264fe.o |67.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |67.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/5a2f230528097042fdaf726fed_raw.auxcpp |67.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/tests/objcopy_b9fcf9641e3e569e88014f85ff.o |67.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_4826ee2207124da1bc398e3bd8.o |67.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/tests/objcopy_7f02665786b7523f76c02ad1dd.o |67.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |68.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |68.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_5294a064c14cf5a49516321590.o |68.0%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/config.{pb.h ... grpc.pb.h} |68.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/tests/objcopy_7c0098f27edc25092453a8033c.o |68.1%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |68.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/lib/ydb_cli/common/libcommon.a |68.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/build_info/liblibrary-cpp-build_info.a |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_part_loader.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_mongroups.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yql/dq/runtime/dq_output_channel.cpp |68.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_mongroups.cpp |68.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/svnversion/liblibrary-cpp-svnversion.a |69.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_part_loader.cpp |69.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |69.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/dq/runtime/libyql-dq-runtime.a |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/dq/runtime/dq_output_channel.cpp |70.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |70.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/dq/comp_nodes/libyql-dq-comp_nodes.a |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/ydb_cli/commands/ydb_benchmark.cpp |72.3%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp |72.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/testing/group_overseer/libblobstorage-testing-group_overseer.a |72.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_engine_flat_host_ut.cpp |72.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_proto_ut.cpp |72.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/kikimr_program_builder_ut.cpp |73.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/dq/runtime/libyql-dq-runtime.a |74.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/backup/s3_path_style/ydb-tests-functional-backup-s3_path_style |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_benchmark.cpp |74.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |74.5%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |74.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |74.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp |74.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/iceberg_ut_data.cpp |74.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/kqp_generic_provider_ut.cpp |74.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_checkpoint_storage_ut.cpp |74.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |74.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |74.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp |74.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |74.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/statestorage.cpp |74.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp |74.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/runtime/kqp_re2_ut.cpp |74.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions_ut.cpp |74.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/engine/mkql_engine_flat_ut.cpp |73.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_ut.cpp |73.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/storage_service_ydb_ut.cpp |73.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp |73.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_state_storage_ut.cpp |74.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/gc_ut.cpp |73.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/ut_helpers.cpp |73.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cluster_info_ut.cpp |73.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_quorum_tracker_ut.cpp |73.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp |73.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tenants_ut.cpp |73.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |73.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp |73.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp |73.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_tx_ut.cpp |73.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/arrow/kqp_types_arrow_ut.cpp |73.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/yql/kqp_yql_ut.cpp |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |73.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp |73.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp |73.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage |73.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/yql/kqp_scripting_ut.cpp |73.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/ut/ut_utils.cpp |73.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/lib/auth/auth_helpers.cpp |73.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/rbo/kqp_rbo_ut.cpp |73.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |73.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/yt/yt/client/libyt-yt-client.a |73.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |73.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |73.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |73.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |73.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/cache_ut.cpp |73.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/blobs_sharing_ut.cpp |73.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_replication_reboots/ut_replication_reboots.cpp |73.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/arrow/kqp_arrow_in_channels_ut.cpp |73.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_external_table_reboots/ut_external_table_reboots.cpp |73.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |73.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_view/ut_view.cpp |73.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/ut/ut_simple.cpp |73.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp |73.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl_utility.cpp |73.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |73.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_transfer/ut_transfer.cpp |73.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index/ut_unique_index.cpp |73.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/in_memory_control_plane_storage_ut.cpp |73.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_ut.cpp |73.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_system_names/ydb-core-tx-schemeshard-ut_system_names |73.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |73.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |73.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_permissions_ut.cpp |73.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index/ut_async_index.cpp |73.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |73.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part8/ydb-tests-fq-yt-kqp_yt_file-part8 |73.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |73.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |73.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/base/secure_protobuf_printer.cpp |73.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |73.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |73.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |73.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_login_large/ut_login_large.cpp |73.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |73.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/server_restart/public-sdk-cpp-tests-integration-server_restart |73.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/scale_recommender_policy_ut.cpp |72.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |72.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_sysview_reboots/ut_sysview_reboots.cpp |72.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |72.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |72.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |72.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_minstep.cpp |72.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/yql/essentials/tools/sql2yql/sql2yql |72.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ut_topic_splitmerge.cpp |72.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ycloud/impl/access_service_ut.cpp |72.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/row_dispatcher/row_dispatcher_service.cpp |72.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |72.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ycloud/impl/service_account_service_ut.cpp |72.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |72.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_order.cpp |72.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream/ut_cdc_stream.cpp |72.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_maintenance_api_ut.cpp |72.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator_ut.cpp |72.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_ut_common.cpp |72.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |72.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |72.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |72.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/ut/ut_secret.cpp |72.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ycloud/impl/user_account_service_ut.cpp |72.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_patch_ut.cpp |72.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/replica_ut.cpp |72.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ut_export_reboots_s3.cpp |72.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/pipe_tracker_ut.cpp |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |72.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ycloud/impl/folder_service_ut.cpp |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |72.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |72.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters_ut.cpp |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |72.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_metrics_ut.cpp |72.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/sequencer_ut.cpp |72.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_discover_ut.cpp |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |72.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_counters_ut.cpp |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |72.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_request_reporting_ut.cpp |72.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay/query_replay.cpp |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |72.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/query_replay/ydb_query_replay |72.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |72.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_scan_data_ut.cpp |72.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/ut/ut_init.cpp |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay/query_compiler.cpp |72.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part19/ydb-tests-fq-yt-kqp_yt_file-part19 |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay/main.cpp |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp |72.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |72.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/downtime_ut.cpp |72.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay/query_proccessor.cpp |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |72.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/table_creator/table_creator_ut.cpp |72.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/balancing_ut.cpp |72.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |72.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/partition_stats/partition_stats_ut.cpp |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_common_new_schemecache_ut.cpp |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_sink_mvcc_ut.cpp |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_sink_tx_ut.cpp |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_new_schemecache_ut.cpp |72.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/ut/helpers/libmkql_proto-ut-helpers.a |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/resource_broker_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/commitoffset_ut.cpp |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_snapshot_isolation_ut.cpp |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_sink_locks_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_resolver_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_locks_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_counters_aggregator_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/storage_pool_info_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_mvcc_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_pipe_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/yql/kqp_pragma_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/bootstrapper_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/object_distribution_ut.cpp |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_impl_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/ut_vector_index_build_reboots.cpp |72.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/folder_service.{pb.h ... grpc.pb.h} |72.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/transitional/folder_service.{pb.h ... grpc.pb.h} |72.6%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/client/libyt-yt-client.a |72.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/folder.{pb.h ... grpc.pb.h} |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/essentials/tools/sql2yql/sql2yql |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_pipecache_ut.cpp |72.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/secure_protobuf_printer.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp |72.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/assign_tx_id_ut.cpp |72.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_scan_data_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_serverless_reboots/ut_serverless_reboots.cpp |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/ut/ut_object.cpp |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/monitoring_ut.cpp |72.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/lib/auth/auth_helpers.cpp |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_reassign.cpp |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_explain_ut.cpp |72.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_blockbs_ut.cpp |72.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_internal_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_types_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/kqp_olap_stats_ut.cpp |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_stats_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_erase_rows.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_params_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/datatime64_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/compression_ut.cpp |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_limits_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/service/kqp_service_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_query_ut.cpp |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_system_names/ut_system_names.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/delete_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/clickbench_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/sys_view_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/dictionary_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/indexes_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/compaction_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/service/kqp_qs_scripts_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/statistics_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/aggregations_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/tiering_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/decimal_ut.cpp |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_user_attributes/ut_user_attributes.cpp |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut.cpp |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/optimizer_ut.cpp |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/locks_ut.cpp |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/write_ut.cpp |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_permissions_ut.cpp |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/sparsed_ut.cpp |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/json_ut.cpp |72.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_permissions_ut.cpp |72.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/config/bsconfig_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_quotas_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_reboots/ut_reboots.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index/ut_vector_index.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/topic_data_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_replication/ut_replication.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/service/kqp_document_api_ut.cpp |72.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_ut.cpp |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/viewer_ut.cpp |71.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_change_collector.cpp |71.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl.cpp |71.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/ut/ut_tiers.cpp |71.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_ut.cpp |71.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |71.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/config/ut/ydb-services-config-ut |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.cpp |68.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yql/providers/pq/async_io/dq_pq_write_actor.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_table.cpp |67.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/pq/async_io/dq_pq_write_actor.cpp |66.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_table.cpp |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/export_scan.cpp |66.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/export_scan.cpp |66.2%| PREPARE $(BLACK_LINTER-sbr:8415400280) |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/row_dispatcher/topic_session.cpp |65.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/topic_session.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ydb_convert/column_families.cpp |65.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/column_families.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service.cpp |64.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service.cpp |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/ydb/actors_factory.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/actors/rate_limiter.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/read_rule/read_rule_creator.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/read_rule/read_rule_deleter.cpp |64.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/compute/ydb/actors_factory.cpp |64.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/actors/rate_limiter.cpp |64.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part3/ydb-tests-fq-yt-kqp_yt_file-part3 |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.cpp |63.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/bin/solomon_emulator |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/ydb/executer_actor.cpp |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/common/run_actor_params.cpp |63.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/compute/common/run_actor_params.cpp |63.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/compute/ydb/executer_actor.cpp |63.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part17/ydb-tests-fq-yt-kqp_yt_file-part17 |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/ydb/resources_cleaner_actor.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/ydb/finalizer_actor.cpp |62.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/read_rule/libfq-libs-read_rule.a |62.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/compute/ydb/finalizer_actor.cpp |62.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/recipe/solomon_recipe |62.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/compute/ydb/resources_cleaner_actor.cpp |62.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part7/ydb-tests-fq-yt-kqp_yt_file-part7 |62.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part1/ydb-tests-fq-yt-kqp_yt_file-part1 |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/ydb/stopper_actor.cpp |62.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part4/ydb-tests-fq-yt-kqp_yt_file-part4 |62.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part14/ydb-tests-fq-yt-kqp_yt_file-part14 |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/compute/ydb/stopper_actor.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/actors/error.cpp |62.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/node_broker |61.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_query_session/ydb-tests-functional-kqp-kqp_query_session |61.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ydb_convert/table_profiles.cpp |61.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part5/ydb-tests-fq-yt-kqp_yt_file-part5 |61.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/plan2svg/ydb-tests-functional-kqp-plan2svg |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/ydb/status_tracker_actor.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/deprecated/kicli/query.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/ydb/initializer_actor.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/internal/utils.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/ydb/ydb_connector_actor.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/fq/ut_integration/ut_utils.cpp |61.4%| RESOURCE $(sbr:4966407557) |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/data_events/shard_writer.cpp |61.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/table_profiles.cpp |61.5%| [AR] {RESULT} $(B)/ydb/core/base/generated/libcore-base-generated.a |61.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/kicli/query.cpp |61.4%| [SB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/postgresql/psql/psql |61.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/dicts/libdicts_udf.so |61.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/fq/ut_integration/ut_utils.cpp |61.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/logs/dsv/libdsv_udf.so |61.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/dummylog/libdummylog.so |61.1%| [AR] {RESULT} $(B)/ydb/core/fq/libs/read_rule/libfq-libs-read_rule.a |61.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperloglog/libhyperloglog_udf.so |61.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/data_events/shard_writer.cpp |60.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/streaming/libstreaming_udf.so |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/test/simple/libsimple_udf.so |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/structs/libstructs_udf.so |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/libtopfreq_udf.so |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/lists/liblists_udf.so |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/libmath_udf.so |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/vector/libvector_udf.so |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/callables/libcallables_udf.so |60.7%| [AR] {RESULT} $(B)/ydb/library/yql/dq/comp_nodes/libyql-dq-comp_nodes.a |60.7%| [AR] {RESULT} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |60.7%| [AR] {RESULT} $(B)/ydb/public/lib/ydb_cli/common/libcommon.a |60.7%| RESOURCE $(sbr:770480022) |60.7%| [AR] {RESULT} $(B)/ydb/library/yql/dq/runtime/libyql-dq-runtime.a |60.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.cpp |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/type_inspection/libtype_inspection_udf.so |60.7%| [AR] {RESULT} $(B)/yt/yt/client/libyt-yt-client.a |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/set/libset_udf.so |60.5%| [LD] {RESULT} $(B)/yql/essentials/tools/sql2yql/sql2yql |60.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part13/ydb-tests-fq-yt-kqp_yt_file-part13 |60.5%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/libstat_udf.so |60.5%| [SB] {default-linux-x86_64, relwithdebinfo} $(B)/library/recipes/docker_compose/bin/docker-compose |60.4%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/histogram/libhistogram_udf.so |60.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/yson2/libyson2_udf.so |60.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/olap_workload |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/digest/libdigest_udf.so |59.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/cdc |59.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/transfer/ut/functional/ydb-core-transfer-ut-functional |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/top/libtop_udf.so |58.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/replication/ydb-tests-functional-replication |58.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/backup/ydb-tests-functional-backup |58.8%| COMPACTING CACHE 7.3GiB |58.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part2/ydb-tests-fq-yt-kqp_yt_file-part2 |58.8%| [TS] {RESULT} ydb/tests/stress/simple_queue/tests/flake8 |58.8%| [TS] {RESULT} ydb/tests/stress/mixedpy/flake8 |58.8%| [TS] {RESULT} ydb/tests/fq/s3/flake8 |58.8%| [TS] {RESULT} ydb/tests/functional/wardens/flake8 |58.8%| [TS] {RESULT} ydb/tests/functional/ttl/flake8 |58.8%| [TS] {RESULT} ydb/tests/sql/large/flake8 |58.8%| [TS] {RESULT} ydb/tests/fq/multi_plane/flake8 |58.8%| [TS] {RESULT} ydb/tests/stress/kv/tests/flake8 |58.8%| [TS] {RESULT} ydb/tests/stress/show_create/view/tests/flake8 |58.8%| [TS] {RESULT} ydb/tests/stress/transfer/tests/flake8 |58.8%| [TS] {RESULT} ydb/tests/functional/blobstorage/flake8 |58.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/row_dispatcher/row_dispatcher.cpp |58.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part10/ydb-tests-fq-yt-kqp_yt_file-part10 |58.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part6/ydb-tests-fq-yt-kqp_yt_file-part6 |58.8%| [TS] {RESULT} ydb/tests/tools/nemesis/ut/flake8 |58.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part12/flake8 |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part9/ydb-tests-fq-yt-kqp_yt_file-part9 |58.9%| [TS] {RESULT} ydb/tests/functional/ydb_cli/flake8 |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/file/libfile_udf.so |58.9%| [TS] {RESULT} ydb/tests/functional/sqs/large/flake8 |58.9%| [TS] {RESULT} ydb/tests/olap/lib/flake8 |58.9%| [TS] {RESULT} ydb/tests/functional/sqs/messaging/flake8 |58.9%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part11/flake8 |58.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.cpp |58.9%| [TS] {RESULT} ydb/tests/stress/s3_backups/tests/flake8 |58.9%| [TS] {RESULT} ydb/tests/functional/restarts/flake8 |58.9%| [TS] {RESULT} ydb/tests/fq/http_api/flake8 |58.9%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part3/flake8 |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_indexes/ydb-tests-functional-kqp-kqp_indexes |58.9%| [TS] {RESULT} ydb/tests/fq/generic/streaming/flake8 |58.9%| [TS] {RESULT} ydb/tests/functional/minidumps/flake8 |58.9%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part7/flake8 |58.9%| [TS] {RESULT} ydb/tests/datashard/dump_restore/flake8 |58.9%| [TS] {RESULT} ydb/tests/olap/ttl_tiering/flake8 |58.9%| [TS] {RESULT} ydb/tests/functional/autoconfig/flake8 |58.9%| [TS] {RESULT} ydb/tests/olap/delete/flake8 |58.9%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/join/flake8 |58.9%| [TS] {RESULT} ydb/tests/olap/s3_import/large/flake8 |58.9%| [TS] {RESULT} ydb/tests/tools/pq_read/test/flake8 |58.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/pq/async_io/libproviders-pq-async_io.a |58.9%| [TS] {RESULT} ydb/tests/functional/sqs/common/flake8 |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/show_create_view |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/streaming_optimize/ydb-tests-fq-streaming_optimize |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part11/ydb-tests-fq-yt-kqp_yt_file-part11 |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/bulk_upsert/ydb-public-sdk-cpp-tests-integration-bulk_upsert |58.9%| [TS] {RESULT} ydb/tests/functional/suite_tests/flake8 |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/solomon/ydb-tests-fq-solomon |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperscan/libhyperscan_udf.so |58.9%| [TS] {RESULT} ydb/tests/olap/oom/flake8 |58.9%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/flake8 |58.9%| [TS] {RESULT} ydb/tests/sql/flake8 |58.9%| [TS] {RESULT} ydb/tests/datashard/dml/flake8 |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |58.9%| [AR] {RESULT} $(B)/ydb/library/yql/providers/pq/async_io/libproviders-pq-async_io.a |59.0%| [TS] {RESULT} ydb/tests/stress/reconfig_state_storage_workload/tests/flake8 |59.0%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/mysql/flake8 |59.0%| [TS] {RESULT} ydb/tests/functional/hive/flake8 |59.0%| [TS] {RESULT} ydb/tests/functional/canonical/flake8 |59.0%| [TS] {RESULT} ydb/tests/datashard/vector_index/large/flake8 |59.0%| [TS] {RESULT} ydb/tests/functional/config/flake8 |59.0%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part0/flake8 |59.0%| [TS] {RESULT} ydb/tests/datashard/partitioning/flake8 |59.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator_grpc/solomon_recipe_grpc |59.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/libunicode_udf.so |59.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/tools/astdiff/astdiff |59.0%| [TS] {RESULT} ydb/tests/olap/docs/generator/import_test |59.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/python/python3_small/libpython3_udf.so |59.0%| [TS] {RESULT} ydb/tests/fq/solomon/flake8 |59.0%| [TS] {RESULT} ydb/tests/datashard/select/flake8 |59.0%| [TS] {RESULT} ydb/tests/functional/cms/flake8 |59.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/partition_writer_cache_actor.cpp |59.0%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part10/flake8 |59.0%| [TS] {RESULT} ydb/tests/fq/restarts/flake8 |59.0%| [TS] {RESULT} ydb/tests/functional/sqs/cloud/flake8 |59.0%| [TS] {RESULT} ydb/tests/library/ut/flake8 |59.0%| [TS] {RESULT} ydb/tests/functional/sqs/with_quotas/flake8 |59.0%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_import/flake8 |59.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/basic_example/public-sdk-cpp-tests-integration-basic_example |59.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_client.cpp |59.0%| [TS] {RESULT} ydb/tests/fq/yds/flake8 |59.0%| [TS] {RESULT} ydb/tests/datashard/vector_index/medium/flake8 |59.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/partition_writer_cache_actor.cpp |59.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/liburl_udf.so |59.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/supp/ydb_supp |59.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_client.cpp |59.0%| [TS] {RESULT} ydb/tests/functional/tpc/large/flake8 |59.0%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/ydb/flake8 |59.0%| [TS] {RESULT} ydb/tests/datashard/copy_table/flake8 |59.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part15/ydb-tests-fq-yt-kqp_yt_file-part15 |59.0%| [TS] {RESULT} ydb/tests/functional/serverless/flake8 |59.0%| [TS] {RESULT} ydb/tests/stress/oltp_workload/tests/flake8 |59.0%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/flake8 |59.0%| [TS] {RESULT} ydb/tests/olap/common/flake8 |59.1%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part14/flake8 |59.1%| [TS] {RESULT} ydb/tests/functional/query_cache/flake8 |59.1%| [TS] {RESULT} ydb/tests/sql/lib/flake8 |59.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/operations/write_data.cpp |59.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json2/libjson2_udf.so |59.1%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/flake8 |59.1%| [TS] {RESULT} ydb/tests/functional/audit/flake8 |59.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/write_data.cpp |59.1%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part2/flake8 |59.1%| [TS] {RESULT} ydb/tests/stress/node_broker/tests/flake8 |59.1%| [TS] {RESULT} ydb/tests/olap/load/flake8 |59.1%| [TS] {RESULT} ydb/tests/fq/common/flake8 |59.1%| [TS] {RESULT} ydb/tests/stress/cdc/tests/flake8 |59.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/remove_lock_change_records.cpp |59.1%| [TS] {RESULT} ydb/tests/tools/kqprun/tests/flake8 |59.1%| [TS] {RESULT} ydb/tests/olap/docs/generator/flake8 |59.1%| [TS] {RESULT} ydb/tests/olap/scenario/flake8 |59.1%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part4/flake8 |59.1%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part18/flake8 |59.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/remove_lock_change_records.cpp |59.1%| [TS] {RESULT} ydb/tests/fq/mem_alloc/flake8 |59.1%| [TS] {RESULT} ydb/tests/datashard/parametrized_queries/flake8 |59.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_query_svc/ydb-tests-functional-kqp-kqp_query_svc |59.1%| [TS] {RESULT} ydb/tests/stress/olap_workload/tests/flake8 |59.1%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part17/flake8 |59.1%| [TS] {RESULT} ydb/tests/functional/tpc/medium/flake8 |59.1%| [TS] {RESULT} ydb/tests/functional/script_execution/flake8 |59.1%| [TS] {RESULT} ydb/tests/functional/bridge/flake8 |59.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/protobuf/libprotobuf_udf.so |59.1%| [TS] {RESULT} ydb/core/viewer/tests/flake8 |59.1%| [TS] {RESULT} ydb/tests/olap/data_quotas/flake8 |59.1%| [TS] {RESULT} ydb/tests/datashard/secondary_index/flake8 |59.1%| [TS] {RESULT} ydb/tests/functional/scheme_tests/flake8 |59.1%| [TS] {RESULT} ydb/tests/datashard/split_merge/flake8 |59.1%| [TS] {RESULT} ydb/tests/olap/column_family/compression/flake8 |59.1%| [TS] {RESULT} ydb/tests/functional/encryption/flake8 |59.2%| [TS] {RESULT} ydb/tests/olap/flake8 |59.2%| [TS] {RESULT} ydb/tests/fq/streaming_optimize/flake8 |59.2%| [TS] {RESULT} ydb/tests/postgres_integrations/go-libpq/flake8 |59.2%| [TS] {RESULT} ydb/tests/library/compatibility/binaries/downloader/import_test |59.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part8/flake8 |59.2%| [TS] {RESULT} ydb/tests/functional/kqp/plan2svg/flake8 |59.2%| [TS] {RESULT} ydb/tests/functional/limits/flake8 |59.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part15/flake8 |59.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/simple_queue |59.2%| [TS] {RESULT} ydb/tests/library/compatibility/binaries/downloader/flake8 |59.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part1/flake8 |59.2%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/oracle/flake8 |59.2%| [TS] {RESULT} ydb/tests/functional/sqs/multinode/flake8 |59.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part16/flake8 |59.2%| [TS] {RESULT} ydb/tests/functional/large_serializable/flake8 |59.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part9/flake8 |59.2%| [TS] {RESULT} ydb/tests/example/flake8 |59.2%| [TS] {RESULT} ydb/tests/functional/scheme_shard/flake8 |59.2%| [TS] {RESULT} ydb/tests/functional/clickbench/flake8 |59.2%| [TS] {RESULT} ydb/tests/functional/api/flake8 |59.2%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/fifo/flake8 |59.2%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/std/flake8 |59.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_import/import_test |59.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/change_sender_table_base.cpp |59.2%| [TS] {RESULT} ydb/tests/functional/serializable/flake8 |59.2%| [TS] {RESULT} ydb/tests/functional/rename/flake8 |59.2%| [TS] {RESULT} ydb/tests/fq/generic/analytics/flake8 |59.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender_table_base.cpp |59.2%| [TS] {RESULT} ydb/tests/datashard/async_replication/flake8 |59.2%| [TS] {RESULT} ydb/tests/tools/kqprun/recipe/flake8 |59.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part13/flake8 |59.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part5/flake8 |59.2%| [TS] {RESULT} ydb/tests/stress/log/tests/flake8 |59.3%| [TS] {RESULT} ydb/tests/functional/clickbench/import_test |59.3%| [TS] {RESULT} ydb/tests/functional/postgresql/flake8 |59.3%| [TS] {RESULT} ydb/tests/datashard/s3/flake8 |59.3%| [TS] {RESULT} ydb/tests/datashard/ttl/flake8 |59.3%| [TS] {RESULT} ydb/tests/olap/s3_import/flake8 |59.3%| [TS] {RESULT} ydb/tests/functional/tenants/flake8 |59.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/topic/topic_it |59.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/ydb-public-sdk-cpp-tests-integration-sessions |59.4%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part19/flake8 |59.4%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part6/flake8 |59.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/public-sdk-cpp-tests-integration-sessions_pool |59.4%| [TS] {RESULT} ydb/tests/compatibility/flake8 |59.4%| [TS] {RESULT} ydb/tests/fq/plans/flake8 |59.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/large/ydb-tests-olap-s3_import-large |59.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/row_dispatcher/libfq-libs-row_dispatcher.a |59.4%| [AR] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/libfq-libs-row_dispatcher.a |59.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/topic/with_direct_read/topic_direct_read_it |59.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/purge.cpp |59.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/purge.cpp |59.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/create_cdc_stream_unit.cpp |59.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_cdc_stream_unit.cpp |59.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/garbage_collector.cpp |59.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/garbage_collector.cpp |59.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/list_users.cpp |59.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/list_users.cpp |59.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/count_queues.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/count_queues.cpp |59.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard_private_events.cpp |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/special_cleaner.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_private_events.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/special_cleaner.cpp |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/partition_actor.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/partition_actor.cpp |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/distributed_commit_helper.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/distributed_commit_helper.cpp |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/resource_subscriber/task.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/task.cpp |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/resource_subscriber/actor.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/actor.cpp |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_service.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_service.cpp |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/abstract/abstract.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/abstract/abstract.cpp |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_empty.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_empty.cpp |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/export/session/storage/s3/storage.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/storage/s3/storage.cpp |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/execute_write_unit.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_write_unit.cpp |59.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |59.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |59.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp |59.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp |59.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/grpc_pq_write.cpp |59.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/grpc_pq_write.cpp |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_actualization.cpp |59.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_actualization.cpp |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/tables/normalizer.cpp |59.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/tables/normalizer.cpp |59.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |59.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |59.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard_schema.cpp |59.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_schema.cpp |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/abstract.cpp |59.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/abstract.cpp |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp |59.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/constructor.cpp |59.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/constructor.cpp |59.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |59.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |59.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_unused_tables_template.cpp |59.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_unused_tables_template.cpp |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/tablet/ext_tx_base.cpp |59.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/tablet/ext_tx_base.cpp |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/secret/initializer.cpp |59.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/initializer.cpp |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/operators/sharing.cpp |59.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/sharing.cpp |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean.cpp |59.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean.cpp |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard_subdomain_path_id.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_subdomain_path_id.cpp |59.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/write_session_actor.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/secondary.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/secondary.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/write_session_actor.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/store_scheme_tx_unit.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_scheme_tx_unit.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/secret/snapshot.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/snapshot.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/operations/slice_builder/builder.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/slice_builder/builder.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v1_chunks.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v1_chunks.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/splitter/chunks.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/splitter/chunks.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/inflight_request_tracker.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/inflight_request_tracker.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/create_store.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/create_store.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_store.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_store.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/import_s3.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/portion.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/import_s3.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/portion.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/tablet/broken_txs.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/tablet/broken_txs.cpp |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/metadata/behaviour.cpp |59.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/behaviour.cpp |59.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |59.8%| [AR] {RESULT} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |59.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/services_initializer.cpp |59.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/services_initializer.cpp |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/create_table.cpp |59.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/create_table.cpp |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/copy_blob_ids_to_v2.cpp |59.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/copy_blob_ids_to_v2.cpp |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/schema_actors.cpp |59.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/schema_actors.cpp |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_store.cpp |59.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_store.cpp |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard__write_index.cpp |59.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__write_index.cpp |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/read_info_actor.cpp |59.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/read_info_actor.cpp |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/operations/write.cpp |59.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/write.cpp |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/secret/checker_access.cpp |59.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/checker_access.cpp |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/tablet/gc_counters.cpp |59.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/tablet/gc_counters.cpp |59.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |59.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |59.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_index_columns.cpp |59.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_index_columns.cpp |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/primary.cpp |59.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/primary.cpp |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/object.cpp |59.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_ttl_preset_setting_version_info.cpp |59.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/object.cpp |59.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_ttl_preset_setting_version_info.cpp |59.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/common_level.cpp |59.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/common_level.cpp |59.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/schema_version/version.cpp |59.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/schema_version/version.cpp |59.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/one_layer.cpp |59.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/one_layer.cpp |59.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |59.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |59.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |59.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_table.cpp |59.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_table.cpp |59.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |59.9%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |59.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |59.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/tables_manager.cpp |59.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard__propose_cancel.cpp |59.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__propose_cancel.cpp |59.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/tables_manager.cpp |59.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_ttl_preset_setting_info.cpp |59.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_ttl_preset_setting_info.cpp |59.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_deprecated_snapshot.cpp |59.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_deprecated_snapshot.cpp |59.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/move_table_unit.cpp |59.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/move_table_unit.cpp |59.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks.cpp |59.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks.cpp |59.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/operations/batch_builder/merger.cpp |60.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/batch_builder/merger.cpp |59.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/tablet/write_queue.cpp |60.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp |60.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/tablet/write_queue.cpp |60.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp |60.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |60.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |60.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |60.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/operations/events.cpp |60.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/events.cpp |60.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard.cpp |60.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard.cpp |60.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/granule/normalizer.cpp |60.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/granule/normalizer.cpp |60.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard__init.cpp |60.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__init.cpp |60.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/constructor.cpp |60.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/constructor.cpp |60.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/kesus/grpc_service.cpp |60.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/kesus/grpc_service.cpp |60.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/kesus/libydb-services-kesus.a |60.0%| [AR] {RESULT} $(B)/ydb/services/kesus/libydb-services-kesus.a |60.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/kesus/libydb-services-kesus.a |60.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/broken_blobs.cpp |60.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/loading/stages.cpp |60.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/broken_blobs.cpp |60.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/loading/stages.cpp |60.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |60.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |60.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |60.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/store_data_tx_unit.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/export/actor/write.cpp |60.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/actor/write.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/ds_table/config.cpp |60.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/config.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_impl.cpp |60.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_impl.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/federated_query/common/common.cpp |60.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/common/common.cpp |60.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |60.1%| [AR] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |60.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/sharding/hash_slider.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__write.cpp |60.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__write.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_outreadset.cpp |60.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_outreadset.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/secret/checker_secret.cpp |60.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/checker_secret.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/writer/indexed_blob_constructor.cpp |60.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/indexed_blob_constructor.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v2_chunks.cpp |60.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v2_chunks.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/bridge/bridge.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/secret/manager.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/export/session/session.cpp |60.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/manager.cpp |60.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/lib/actors/pq_schema_actor.cpp |60.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/session.cpp |60.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |60.2%| [AR] {RESULT} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |60.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |60.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/lib/actors/pq_schema_actor.cpp |60.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/dsproxy/bridge/libblobstorage-dsproxy-bridge.a |60.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/leaked_blobs.cpp |60.2%| [AR] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/bridge/libblobstorage-dsproxy-bridge.a |60.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/leaked_blobs.cpp |60.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/request/config.cpp |60.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/read_session_actor.cpp |60.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard__notify_tx_completion.cpp |60.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/read_session_actor.cpp |60.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__notify_tx_completion.cpp |60.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/metadata/object.cpp |60.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/object.cpp |60.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/test_helper/helper.cpp |60.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/events.cpp |60.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/helper.cpp |60.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/events.cpp |60.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/metadata/snapshot.cpp |60.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/snapshot.cpp |60.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/service.cpp |60.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/libydb-services-metadata.a |60.2%| [AR] {RESULT} $(B)/ydb/services/metadata/libydb-services-metadata.a |60.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/service.cpp |60.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/libydb-services-metadata.a |60.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/mock/yql_mock.cpp |60.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/read_init_auth_actor.cpp |60.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/read_init_auth_actor.cpp |60.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/mock/yql_mock.cpp |60.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/mock/libfq-libs-mock.a |60.3%| [AR] {RESULT} $(B)/ydb/core/fq/libs/mock/libfq-libs-mock.a |60.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/mock/libfq-libs-mock.a |60.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_v0_meta.cpp |60.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_v0_meta.cpp |60.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/sharding/sharding.cpp |60.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |60.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |60.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/util/actorsys_test/single_thread_ic_mock.cpp |60.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/actorsys_test/single_thread_ic_mock.cpp |60.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |60.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/zero_level.cpp |60.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/zero_level.cpp |60.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.global.a |60.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.global.a |60.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.global.a |60.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/export/events/events.cpp |60.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/events/events.cpp |60.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |60.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |60.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |60.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/initializer/common.cpp |60.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/common.cpp |60.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/execute_data_tx_unit.cpp |60.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_data_tx_unit.cpp |60.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard__write.cpp |60.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__write.cpp |60.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/export/actor/export_actor.cpp |60.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/actor/export_actor.cpp |60.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |60.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |60.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/secret/secret_behaviour.cpp |60.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/secret_behaviour.cpp |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/background_controller.cpp |60.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/background_controller.cpp |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ymq/ymq_proxy.cpp |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_dep_tracker.cpp |60.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ymq/ymq_proxy.cpp |60.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_dep_tracker.cpp |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/abstract.cpp |60.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/abstract.cpp |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/secret/access.cpp |60.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/access.cpp |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard__scan.cpp |60.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__scan.cpp |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_overload.cpp |60.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_overload.cpp |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/upload_stats.cpp |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/zero_level.cpp |60.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/upload_stats.cpp |60.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/zero_level.cpp |60.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/liblcbuckets-planner-level.a |60.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/liblcbuckets-planner-level.a |60.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/liblcbuckets-planner-level.a |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/secret/fetcher.cpp |60.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/fetcher.cpp |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/metadata/fetcher.cpp |60.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/fetcher.cpp |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/update_offsets_in_transaction_actor.cpp |60.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/writer/compacted_blob_constructor.cpp |60.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/update_offsets_in_transaction_actor.cpp |60.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/request/request_actor_cb.cpp |60.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/request/request_actor_cb.cpp |60.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/compacted_blob_constructor.cpp |60.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |60.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |60.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |60.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/optimizer.cpp |60.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/optimizer.cpp |60.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |60.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |60.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |60.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/persqueue.cpp |60.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue.cpp |60.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/ds_table/behaviour_registrator_actor.cpp |60.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/behaviour_registrator_actor.cpp |60.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/manager/modification.cpp |60.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/modification.cpp |60.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp |60.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp |60.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/manager/restore.cpp |60.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/restore.cpp |60.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |60.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |60.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |60.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/fq/ydb_over_fq.cpp |60.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/fq/ydb_over_fq.cpp |60.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_kqp_effects.cpp |60.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/topic.cpp |60.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/topic.cpp |60.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/fq/grpc_service.cpp |60.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/fq/grpc_service.cpp |60.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/fq/libydb-services-fq.a |60.6%| [AR] {RESULT} $(B)/ydb/services/fq/libydb-services-fq.a |60.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/test_helper/shard_writer.cpp |60.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/fq/libydb-services-fq.a |60.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/secret/access_behaviour.cpp |60.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/shard_writer.cpp |60.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/sharding/random.cpp |60.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/access_behaviour.cpp |60.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/write_actor.cpp |60.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |60.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/write_actor.cpp |60.6%| [AR] {RESULT} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |60.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |60.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |60.6%| [AR] {RESULT} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |60.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/direct_read_actor.cpp |60.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/direct_read_actor.cpp |60.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/ds_table/scheme_describe.cpp |60.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/request/request_actor.cpp |60.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/scheme_describe.cpp |60.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |60.6%| [AR] {RESULT} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |60.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |60.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/util/actorsys_test/testactorsys.cpp |60.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/actorsys_test/testactorsys.cpp |60.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |60.7%| [AR] {RESULT} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |60.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |60.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/secret/secret.cpp |60.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/secret.cpp |60.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/test_helper/shard_reader.cpp |60.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/shard_reader.cpp |60.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |60.7%| [AR] {RESULT} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |60.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/meta.cpp |60.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |60.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/persqueue_utils.cpp |60.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/persqueue_utils.cpp |60.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |60.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |60.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/logs/log.cpp |60.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/logs/log.cpp |60.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |60.7%| [AR] {RESULT} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |60.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |60.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_actor.cpp |60.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_actor.cpp |60.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |60.7%| [AR] {RESULT} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |60.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |60.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/metadata/initializer.cpp |60.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/initializer.cpp |60.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/ds_table/accessor_refresh.cpp |60.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/accessor_refresh.cpp |60.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/deprecated/client/grpc_client.cpp |60.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/client/grpc_client.cpp |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp |60.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/manager/common.cpp |60.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/common.cpp |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/initializer/fetcher.cpp |60.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/fetcher.cpp |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/update.cpp |60.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/update.cpp |60.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |60.8%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |60.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/ds_table/service.cpp |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/constructor.cpp |60.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/service.cpp |60.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/constructor.cpp |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/update.cpp |60.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/update.cpp |60.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |60.8%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |60.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/abstract/common.cpp |60.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/abstract/common.cpp |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp |60.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor/usage/service.cpp |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/ds_table/registration.cpp |60.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/registration.cpp |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_write_source_cursor.cpp |60.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_write_source_cursor.cpp |60.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/initializer/object.cpp |60.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/object.cpp |60.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard_impl.cpp |60.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_impl.cpp |60.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/prepare_data_tx_in_rs_unit.cpp |60.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/prepare_data_tx_in_rs_unit.cpp |60.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/filtered_scheme.cpp |60.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/filtered_scheme.cpp |60.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/manager/alter.cpp |60.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/alter.cpp |60.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp |60.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp |60.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/manager/abstract.cpp |60.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/update.cpp |60.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/abstract.cpp |60.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/update.cpp |60.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |60.9%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |60.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor/usage/abstract.cpp |60.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |60.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/base/counters.cpp |60.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/base/msgbus.cpp |60.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/base/msgbus.cpp |60.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/lib/base/libpublic-lib-base.a |60.9%| [AR] {RESULT} $(B)/ydb/public/lib/base/libpublic-lib-base.a |60.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/lib/base/libpublic-lib-base.a |60.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/initializer/snapshot.cpp |60.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/snapshot.cpp |60.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/metadata/manager.cpp |60.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/manager.cpp |60.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |61.0%| [AR] {RESULT} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |61.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |61.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/common/config.cpp |61.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/test_helper/controllers.cpp |61.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/controllers.cpp |61.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/initializer/behaviour.cpp |61.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/behaviour.cpp |61.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/manager/alter_impl.cpp |61.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/alter_impl.cpp |61.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/meta.cpp |61.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/meta.cpp |61.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/ut_common/datashard_ut_common.cpp |61.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/ut_common/datashard_ut_common.cpp |61.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |61.0%| [AR] {RESULT} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |61.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/meta.cpp |61.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |61.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/meta.cpp |61.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |61.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |61.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |61.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/filtered_scheme.cpp |61.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/filtered_scheme.cpp |61.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_finish_ack_to_source.cpp |61.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_finish_ack_to_source.cpp |61.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/formats/arrow/serializer/native.cpp |61.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/formats/arrow/serializer/native.cpp |61.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.global.a |61.0%| [AR] {RESULT} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.global.a |61.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.global.a |61.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__init.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__init.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/queues_list_reader.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/queues_list_reader.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_to_source.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_to_source.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_data_ack_to_source.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_data_ack_to_source.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_simple.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_simple.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/fetcher.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/fetcher.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor/usage/config.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/read_table_scan_unit.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/read_table_scan_unit.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/manager/generic_manager.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/generic_manager.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/abstract/initialization.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/abstract/initialization.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_allocator/txallocator.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp |61.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |61.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |61.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/initializer/manager.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/manager.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/objects_cache.cpp |61.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/objects_cache.cpp |61.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_kqp_data_tx_out_rs_unit.cpp |61.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/manager/object.cpp |61.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/object.cpp |61.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_source_cursor.cpp |61.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_source_cursor.cpp |61.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/abstract.cpp |61.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/abstract.cpp |61.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |61.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |61.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |61.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/libstorage-actualizer-abstract.a |61.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/libstorage-actualizer-abstract.a |61.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/libstorage-actualizer-abstract.a |61.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/initializer/accessor_init.cpp |61.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/accessor_init.cpp |61.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/constructor.cpp |61.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/constructor.cpp |61.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |61.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |61.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |61.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/local_discovery/grpc_service.cpp |61.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/local_discovery/grpc_service.cpp |61.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/local_discovery/libydb-services-local_discovery.a |61.2%| [AR] {RESULT} $(B)/ydb/services/local_discovery/libydb-services-local_discovery.a |61.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/local_discovery/libydb-services-local_discovery.a |61.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/deprecated/client/msgbus_client.cpp |61.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/client/msgbus_client.cpp |61.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/lib/deprecated/client/liblib-deprecated-client.a |61.2%| [AR] {RESULT} $(B)/ydb/public/lib/deprecated/client/liblib-deprecated-client.a |61.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/constructor.cpp |61.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/lib/deprecated/client/liblib-deprecated-client.a |61.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/constructor.cpp |61.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |61.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |61.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |61.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/modify_permissions.cpp |61.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/modify_permissions.cpp |61.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ymq/grpc_service.cpp |61.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ymq/grpc_service.cpp |61.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ymq/libydb-services-ymq.a |61.3%| [AR] {RESULT} $(B)/ydb/services/ymq/libydb-services-ymq.a |61.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ymq/libydb-services-ymq.a |61.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/chunks/data.cpp |61.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/chunks/data.cpp |61.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/chunks/libengines-storage-chunks.a |61.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/chunks/libengines-storage-chunks.a |61.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_split_dst.cpp |61.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/chunks/libengines-storage-chunks.a |61.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_split_dst.cpp |61.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/optimizer.cpp |61.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/optimizer.cpp |61.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/initializer/initializer.cpp |61.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |61.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/check_data_tx_unit.cpp |61.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |61.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_data_tx_unit.cpp |61.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/initializer.cpp |61.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |61.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/granule/portions_index.cpp |61.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |61.3%| [AR] {RESULT} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |61.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |61.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/abstract/kqp_common.cpp |61.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp |61.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/abstract/kqp_common.cpp |61.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/granule/granule.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/abstract/fetcher.cpp |61.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/abstract/fetcher.cpp |61.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |61.4%| [AR] {RESULT} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |61.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ydb_convert/topic_description.cpp |61.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/topic_description.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/service/executor.cpp |61.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/executor.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/snapshot_scheme.cpp |61.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/snapshot_scheme.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/service/activation.cpp |61.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/activation.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/snapshot_scheme.cpp |61.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/snapshot_scheme.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/ds_table/accessor_subscribe.cpp |61.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/accessor_subscribe.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/column_features.cpp |61.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/column_features.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor/usage/events.cpp |61.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/conveyor/usage/libtx-conveyor-usage.a |61.4%| [AR] {RESULT} $(B)/ydb/core/tx/conveyor/usage/libtx-conveyor-usage.a |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/tools/kqprun/src/kqp_runner.cpp |61.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/src/kqp_runner.cpp |61.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/index/index.cpp |61.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |61.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |61.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/constructor.cpp |61.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/constructor.cpp |61.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp |61.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |61.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |61.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/granule/stages.cpp |61.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |61.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/common/service.cpp |61.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |61.5%| [AR] {RESULT} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |61.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/versioned_index.cpp |61.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/versioned_index.cpp |61.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/tiering.cpp |61.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/tiering.cpp |61.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/purge_queue.cpp |61.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/purge_queue.cpp |61.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/index_info.cpp |61.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/index_info.cpp |61.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/resolver.cpp |61.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/load_tx_details_unit.cpp |61.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/resolver.cpp |61.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/load_tx_details_unit.cpp |61.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp |61.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp |61.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |61.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |61.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |61.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/iterator.cpp |61.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ydb_convert/tx_proxy_status.cpp |61.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/tx_proxy_status.cpp |61.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/formats/arrow/arrow_helpers.cpp |61.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/formats/arrow/serializer/abstract.cpp |61.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/abstract.cpp |61.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/abstract.cpp |61.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.a |61.6%| [AR] {RESULT} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.a |61.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/formats/arrow/serializer/abstract.cpp |61.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.a |61.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/formats/arrow/libcore-formats-arrow.a |61.6%| [AR] {RESULT} $(B)/ydb/core/formats/arrow/libcore-formats-arrow.a |61.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp |61.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/query_utils.cpp |61.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/query_utils.cpp |61.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |61.6%| [AR] {RESULT} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |61.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |61.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/formats/arrow/program/execution.cpp |61.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.a |61.6%| [AR] {RESULT} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.a |61.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__get_state_tx.cpp |61.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__get_state_tx.cpp |61.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/create_table_unit.cpp |61.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_table_unit.cpp |61.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp |61.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp |61.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |61.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |61.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/metadata.cpp |61.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/metadata.cpp |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common/description.cpp |61.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/description.cpp |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetched_data.cpp |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_write_out_rs_unit.cpp |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/portions/portion_info.cpp |61.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/portion_info.cpp |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/meta.cpp |61.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/meta.cpp |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/tools/kqprun/src/ydb_setup.cpp |61.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/src/ydb_setup.cpp |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/events.cpp |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/service/add_data.cpp |61.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/add_data.cpp |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/service/deleting.cpp |61.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/deleting.cpp |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/granule/storage.cpp |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp |61.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp |61.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |61.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |61.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |61.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/portions/data_accessor.cpp |61.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/data_accessor.cpp |61.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/column_engine.cpp |61.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/column_engine.cpp |61.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/schema_version.cpp |61.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/schema_version.cpp |61.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |61.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |61.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |61.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/execute_kqp_data_tx_unit.cpp |61.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/constructor.cpp |61.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a |61.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a |61.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/tools/kqprun/src/actors.cpp |61.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/src/actors.cpp |61.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/service/add_index.cpp |61.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/add_index.cpp |61.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/export_common.cpp |61.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |61.8%| [AR] {RESULT} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |61.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |61.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/abstract.cpp |61.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/abstract.cpp |61.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/constructor.cpp |61.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/constructor.cpp |61.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.a |61.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.a |61.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.a |61.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/datastreams/put_records_actor.cpp |61.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/datastreams/put_records_actor.cpp |61.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp |61.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_distributed_erase.cpp |61.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_distributed_erase.cpp |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp |61.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |61.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp |61.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |61.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/scheme.cpp |61.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/merge.cpp |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_allocator/txallocator__scheme.cpp |61.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator__scheme.cpp |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common/result.cpp |61.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/result.cpp |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_subdomain_path_id.cpp |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_accessor.cpp |61.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_accessor.cpp |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_allocator/txallocator_impl.cpp |61.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_impl.cpp |61.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetched_data.cpp |61.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetched_data.cpp |61.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |61.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |61.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp |61.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tenant.cpp |62.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tenant.cpp |62.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp |62.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp |62.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp |62.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp |62.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |62.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |62.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |62.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__kqp_scan.cpp |62.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common/conveyor_task.cpp |62.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/conveyor_task.cpp |62.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_write_operation.cpp |62.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/limit.cpp |62.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/limit.cpp |62.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/result.cpp |62.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/result.cpp |62.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/portions/write_with_blobs.cpp |62.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/write_with_blobs.cpp |62.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |62.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |62.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp |62.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |62.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp |62.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |62.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |62.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/limit_sorted.cpp |62.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/limit_sorted.cpp |62.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |62.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/manager.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/reshuffle_kmeans.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/portions/written.cpp |62.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/written.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetched_data.cpp |62.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetched_data.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/local_kmeans.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/recompute_kmeans.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/datastreams/datastreams_proxy.cpp |62.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/datastreams/datastreams_proxy.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/portions/read_with_blobs.cpp |62.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/read_with_blobs.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp |62.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/run/service_initializer.cpp |62.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/service_initializer.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_allocator/txallocator__reserve.cpp |62.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator__reserve.cpp |62.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |62.1%| [AR] {RESULT} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |62.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_cache_append.cpp |62.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_cache_append.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.cpp |62.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/change_sender_cdc_stream.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/portions/meta.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/meta.cpp |62.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_cursor.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_cursor.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/not_sorted.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/not_sorted.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/db_wrapper.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/db_wrapper.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_kqp_delete_rows.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/context.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/context.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_kqp_compute.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/policy.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/policy.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/prefix_kmeans.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/portions/constructors.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/constructors.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetching.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/external_sources/object_storage.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetching.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/logic.cpp |62.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |62.2%| [AR] {RESULT} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |62.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |62.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/with_appended.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/with_appended.cpp |62.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/iterator.cpp |62.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/iterator.cpp |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.cpp |62.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.cpp |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_portion_chunk.cpp |62.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_portion_chunk.cpp |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp |62.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp |62.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |62.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |62.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/read_metadata.cpp |62.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |62.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_root.cpp |62.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_root.cpp |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_genconfig.cpp |62.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_genconfig.cpp |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_kqp.cpp |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_validate_config.cpp |62.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_validate_config.cpp |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/portions/column_record.cpp |62.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/column_record.cpp |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.cpp |62.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.cpp |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/merge.cpp |62.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/libreader-simple_reader-duplicates.a |62.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/libreader-simple_reader-duplicates.a |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/create_persistent_snapshot_unit.cpp |62.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_persistent_snapshot_unit.cpp |62.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/sub_columns_fetching.cpp |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/predicate/container.cpp |62.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/sub_columns_fetching.cpp |62.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/predicate/container.cpp |62.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |62.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |62.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/sample_k.cpp |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/ttl.cpp |62.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/ttl.cpp |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/predicate/range.cpp |62.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/predicate/range.cpp |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/secondary_index.cpp |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue.cpp |62.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue.cpp |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/interval.cpp |62.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/interval.cpp |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/context.cpp |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_s3_downloads.cpp |62.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_s3_downloads.cpp |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/read_metadata.cpp |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/datastreams/grpc_service.cpp |62.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |62.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |62.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/datastreams/grpc_service.cpp |62.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |62.4%| [AR] {RESULT} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |62.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_console.cpp |62.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_console.cpp |62.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/export_s3_uploader.cpp |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/predicate/filter.cpp |62.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/predicate/filter.cpp |62.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |62.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |62.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/columns/schema.cpp |62.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/columns/schema.cpp |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/full_scan_sorted.cpp |62.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/full_scan_sorted.cpp |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__engine_host.cpp |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_bs.cpp |62.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_bs.cpp |62.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |62.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp |62.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp |62.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp |62.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/remap.cpp |62.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/remap.cpp |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_cluster_discovery.cpp |62.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_cluster_discovery.cpp |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/execute_kqp_scan_tx_unit.cpp |62.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_kqp_scan_tx_unit.cpp |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/table_index.cpp |62.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/table_index.cpp |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_stress.cpp |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/merger.cpp |62.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_stress.cpp |62.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/run/main.cpp |62.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/main.cpp |62.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |62.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/run/config.cpp |62.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |62.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/config.cpp |62.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/run/factories.cpp |62.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/factories.cpp |62.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/changes.cpp |62.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/changes.cpp |62.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/common/session/common.cpp |62.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/common/session/common.cpp |62.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/logic.cpp |62.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/logic.cpp |62.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/libchanges-compaction-dictionary.global.a |62.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/libchanges-compaction-dictionary.global.a |62.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/libchanges-compaction-dictionary.global.a |62.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_fakeinitshard.cpp |62.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_fakeinitshard.cpp |62.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/rpc_long_tx.cpp |62.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/rpc_long_tx.cpp |62.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp |62.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/change_exchange_split.cpp |62.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/scanner.cpp |62.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |62.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |62.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp |62.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |62.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/run/config_parser.cpp |62.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/config_parser.cpp |62.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp |62.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_node.cpp |62.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp |62.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_node.cpp |62.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/portions/compacted.cpp |62.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/compacted.cpp |62.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/common/common.cpp |62.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/common/common.cpp |62.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |62.7%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |62.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |62.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/plain_read_data.cpp |62.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/plain_read_data.cpp |62.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_portion.cpp |62.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_portion.cpp |62.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |62.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |62.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/granule_view.cpp |62.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/granule_view.cpp |62.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |62.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/base/action.cpp |62.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/run/config_helpers.cpp |62.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_portions.cpp |62.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_portions.cpp |62.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/fifo_cleanup.cpp |62.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/column_engine_logs.cpp |62.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/column_engine_logs.cpp |62.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/builder.cpp |62.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/builder.cpp |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/set_queue_attributes.cpp |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/create_user.cpp |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/logic.cpp |62.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/logic.cpp |62.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |62.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |62.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/auth_factory.cpp |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_context.cpp |62.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_context.cpp |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/run/kikimr_services_initializers.cpp |62.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/kikimr_services_initializers.cpp |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_tables.cpp |62.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_tables.cpp |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/adapter.cpp |62.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/libolap-bg_tasks-adapter.a |62.8%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/libolap-bg_tasks-adapter.a |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/cfg.cpp |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/base/queue_attributes.cpp |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.cpp |62.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.cpp |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/iterator.cpp |62.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/iterator.cpp |62.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |62.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |62.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_config.cpp |62.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_config.cpp |62.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/merge_subset.cpp |62.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/merge_subset.cpp |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/common/context.cpp |62.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |62.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp |62.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp |62.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |62.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |62.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/control.cpp |62.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/control.cpp |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/tag_queue.cpp |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/protect_scheme_echoes_unit.cpp |62.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/protect_scheme_echoes_unit.cpp |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_debug.cpp |62.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_debug.cpp |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction.cpp |62.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction.cpp |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetch_steps.cpp |62.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetch_steps.cpp |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/store_snapshot_tx_unit.cpp |62.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_snapshot_tx_unit.cpp |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/service.cpp |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp |62.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a |62.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/logic.cpp |62.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/run/auto_config_initializer.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/logic.cpp |63.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |63.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |63.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/abstract.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/node_tracker.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/abstract.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/metering.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/deprecated/kicli/error.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/kicli/error.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/deprecated/kicli/kikimr.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/kicli/kikimr.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/deprecated/kicli/dynamic_node.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/kicli/dynamic_node.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/base/dlq_helpers.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/filler.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/filler.cpp |63.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |63.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |63.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/data_events/shards_splitter.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/data_events/shards_splitter.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_disk.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_disk.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/retention.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/service/manager.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ydb_convert/table_settings.cpp |63.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/list_queue_tags.cpp |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_admin.cpp |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/validators/core_validators.cpp |63.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/core_validators.cpp |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/base/run_query.cpp |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmd_config.cpp |63.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmd_config.cpp |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/health/health.cpp |63.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/health/health.cpp |63.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |63.1%| [AR] {RESULT} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |63.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/validators/validator_nameservice.cpp |63.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/validator_nameservice.cpp |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/init/init.cpp |63.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/init/init.cpp |63.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |63.1%| [AR] {RESULT} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |63.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/validators/registry.cpp |63.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/registry.cpp |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_initroot.cpp |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/proxy_actor.cpp |63.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_initroot.cpp |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/schema.cpp |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_ack_from_initiator.cpp |63.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_ack_from_initiator.cpp |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/usage/service.cpp |63.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/auth_multi_factory.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/send_message.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/manager.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/constructor.cpp |63.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |63.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |63.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/constructor.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/get_queue_attributes.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/service/events.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/delete_queue.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/manager/shared_blobs.cpp |63.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/manager/shared_blobs.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_start_from_initiator.cpp |63.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_start_from_initiator.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/loading/stages.cpp |63.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/loading/stages.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/construction/context.cpp |63.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |63.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |63.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/construction/context.cpp |63.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |63.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |63.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |63.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_reader/fetching_steps.cpp |63.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_reader/fetching_steps.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/merged_column.cpp |63.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/merged_column.cpp |63.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |63.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |63.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/validators/validator_bootstrap.cpp |63.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/queue_leader.cpp |63.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_actorsystem_perftest.cpp |63.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/service/category.cpp |63.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_actorsystem_perftest.cpp |63.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/validator_bootstrap.cpp |63.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/manager/sessions.cpp |63.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/manager/sessions.cpp |63.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/index_events_processor.cpp |63.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |63.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |63.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/describe.cpp |63.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |63.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/describe.cpp |63.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/delete_message.cpp |63.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/http.cpp |63.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/http.cpp |63.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_from_source.cpp |63.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_from_source.cpp |63.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_cms.cpp |63.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_cms.cpp |63.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_meta.cpp |63.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_meta.cpp |63.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |63.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |63.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |63.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tiering/tier/object.cpp |63.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tiering/tier/object.cpp |63.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |63.4%| [AR] {RESULT} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |63.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_repl_offsets.cpp |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/version/version.cpp |63.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/version/version.cpp |63.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/driver_lib/version/libversion.a |63.4%| [AR] {RESULT} $(B)/ydb/core/driver_lib/version/libversion.a |63.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/version/libversion.a |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/monitoring.cpp |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/resolvereq.cpp |63.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/resolvereq.cpp |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp |63.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |63.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/read_table_impl.cpp |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.cpp |63.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.cpp |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/datareq.cpp |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/service/scope.cpp |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/config/init/dummy.cpp |63.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/init/dummy.cpp |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/delete_user.cpp |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/commitreq.cpp |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/converter.cpp |63.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/converter.cpp |63.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/modifications_validator.cpp |63.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/modifications_validator.cpp |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/source.cpp |63.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/source.cpp |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_reader/actor.cpp |63.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_reader/actor.cpp |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/immediate_controls_configurator.cpp |63.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/immediate_controls_configurator.cpp |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/create_queue.cpp |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_reader/fetcher.cpp |63.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_reader/fetcher.cpp |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/modification/transactions/tx_change_blobs_owning.cpp |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/constructor.cpp |63.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |63.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |63.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |63.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/upload_rows_counters.cpp |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/incr_restore_scan.cpp |63.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/incr_restore_scan.cpp |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/modification/events/change_owning.cpp |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_accessor/request.cpp |63.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/modification/events/change_owning.cpp |63.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |63.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |63.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_utils/cli.cpp |63.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli.cpp |63.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/complete_data_tx_unit.cpp |63.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/complete_data_tx_unit.cpp |63.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |63.6%| [AR] {RESULT} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |63.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |63.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/run/run.cpp |63.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/run.cpp |63.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/util/memory_tracker.cpp |63.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/driver_lib/run/librun.a |63.6%| [AR] {RESULT} $(B)/ydb/core/driver_lib/run/librun.a |63.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/memory_tracker.cpp |63.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/run/librun.a |63.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/usage/common.cpp |63.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/service/process.cpp |63.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/manager.cpp |63.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/general_compaction.cpp |63.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/general_compaction.cpp |63.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |63.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |63.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |63.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_locks/manager/manager.cpp |63.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/cursor.cpp |63.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/cursor.cpp |63.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |63.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |63.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |63.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |63.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |63.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/snapshotreq.cpp |63.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/destination/session/destination.cpp |63.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/session/destination.cpp |63.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |63.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |63.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/constructor.cpp |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/remove_portions.cpp |63.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/remove_portions.cpp |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/transfer.cpp |63.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/transfer.cpp |63.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |63.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/collector.cpp |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_locks/locks/list.cpp |63.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_locks/locks/list.cpp |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_locks/locks/snapshot.cpp |63.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_locks/locks/snapshot.cpp |63.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |63.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |63.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_reader/actor.cpp |63.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_reader/actor.cpp |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_pipeline.cpp |63.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_pipeline.cpp |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/proxy.cpp |63.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy.cpp |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/control.cpp |63.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/control.cpp |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/internal/nodes_health_check.cpp |63.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/nodes_health_check.cpp |63.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/merger.cpp |63.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/merger.cpp |63.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |63.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |63.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |63.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__create_tenant.cpp |63.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__create_tenant.cpp |63.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/indexes/update.cpp |63.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/indexes/update.cpp |63.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/constructor.cpp |63.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_accessor/cache_policy/policy.cpp |63.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |63.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |63.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_accessor/cache_policy/libcolumnshard-data_accessor-cache_policy.a |63.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/cache_policy/libcolumnshard-data_accessor-cache_policy.a |63.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/controller/controller.cpp |63.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/controller/controller.cpp |63.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |63.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |63.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |63.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_allocator_client/actor_client.cpp |63.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator_client/actor_client.cpp |63.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |63.8%| [AR] {RESULT} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |63.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |63.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_update_config.cpp |63.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_remove_blobs.cpp |63.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_update_config.cpp |63.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_remove_blobs.cpp |63.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__load_state.cpp |63.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__load_state.cpp |63.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_draft.cpp |63.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/tier/adapter.cpp |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/move_portions.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/move_portions.cpp |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_actor.cpp |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/blob_manager_db.cpp |63.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |63.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__get_log_tail.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__get_log_tail.cpp |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console_tenants_manager.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_tenants_manager.cpp |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console_configuration_info_collector.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_configuration_info_collector.cpp |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_reader/events.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_reader/events.cpp |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_split_src.cpp |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_reader/read_coordinator.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_reader/read_coordinator.cpp |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_accessor/manager.cpp |63.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |63.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_write_index.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_write_index.cpp |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/time_cast/time_cast.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/time_cast/time_cast.cpp |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_get.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_get.cpp |63.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |63.9%| [AR] {RESULT} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |64.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |63.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/schemereq.cpp |64.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/counters/counters_manager.cpp |64.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/schemereq.cpp |64.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/modification/tasks/modification.cpp |64.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/modification/tasks/modification.cpp |64.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |64.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |64.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |64.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__replace_yaml_config.cpp |64.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__replace_yaml_config.cpp |64.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_data_from_source.cpp |64.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_data_from_source.cpp |64.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |64.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |64.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |64.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/validators/validator.cpp |64.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/validator.cpp |64.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_remove_task.cpp |64.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/console/validators/libcms-console-validators.a |64.0%| [AR] {RESULT} $(B)/ydb/core/cms/console/validators/libcms-console-validators.a |64.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/cms/console/validators/libcms-console-validators.a |64.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_remove_task.cpp |64.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__toggle_config_validator.cpp |64.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__toggle_config_validator.cpp |64.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/logger.cpp |64.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/logger.cpp |64.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/audit_helpers/audit_helper.cpp |64.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/audit_helpers/audit_helper.cpp |64.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/testlib/audit_helpers/libcore-testlib-audit_helpers.a |64.0%| [AR] {RESULT} $(B)/ydb/core/testlib/audit_helpers/libcore-testlib-audit_helpers.a |64.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/testlib/audit_helpers/libcore-testlib-audit_helpers.a |64.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/validators.cpp |64.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/manager.cpp |64.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/manager.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/column_families/schema.cpp |64.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |64.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |64.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/column_families/schema.cpp |64.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__update_subdomain_key.cpp |64.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_subdomain_key.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/usage/config.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp |64.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storages_manager.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/common/utils.cpp |64.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/compute/common/liblibs-compute-common.a |64.1%| [AR] {RESULT} $(B)/ydb/core/fq/libs/compute/common/liblibs-compute-common.a |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/bs/write.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_active_transaction.cpp |64.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_active_transaction.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__replace_config_subscriptions.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__plan_step.cpp |64.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__replace_config_subscriptions.cpp |64.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__plan_step.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_reader/contexts.cpp |64.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_reader/contexts.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_change_sending.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_change_sending.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/indexes/schema.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/indexes/schema.cpp |64.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_reader/fetching_executor.cpp |64.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |64.2%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |64.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_reader/fetching_executor.cpp |64.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storage.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storage.cpp |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__update_tenant_state.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_tenant_state.cpp |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__log_cleanup.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__log_cleanup.cpp |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/walle_remove_task_adapter.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_remove_task_adapter.cpp |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/usage/events.cpp |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/bs/blob_manager.cpp |64.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/conveyor_composite/usage/libtx-conveyor_composite-usage.a |64.2%| [AR] {RESULT} $(B)/ydb/core/tx/conveyor_composite/usage/libtx-conveyor_composite-usage.a |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/basics/helpers.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/helpers.cpp |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/base_utils/format_info.cpp |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__remove_tenant_done.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_tenant_done.cpp |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/actor.cpp |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/mediator_queue.cpp |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/engine/minikql/flat_local_tx_factory.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/engine/minikql/flat_local_tx_factory.cpp |64.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |64.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__update_tenant_pool_config.cpp |64.2%| [AR] {RESULT} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |64.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_tenant_pool_config.cpp |64.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |64.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms.cpp |64.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms.cpp |64.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_result_write.cpp |64.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_gc_indexed.cpp |64.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_gc_indexed.cpp |64.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |64.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |64.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |64.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator.cpp |64.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__remove_computational_units.cpp |64.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_computational_units.cpp |64.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_reader/task.cpp |64.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_reader/task.cpp |64.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |64.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |64.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |64.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_proxy/control_plane_proxy.cpp |64.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |64.3%| [AR] {RESULT} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |64.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/control_plane_proxy.cpp |64.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |64.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/jaeger_tracing_configurator.cpp |64.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/jaeger_tracing_configurator.cpp |64.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/bs/storage.cpp |64.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console_configs_subscriber.cpp |64.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_configs_subscriber.cpp |64.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_bindings.cpp |64.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_bindings.cpp |64.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__drop_yaml_config.cpp |64.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__drop_yaml_config.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_actor.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/net_classifier_updater.cpp |64.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/net_classifier_updater.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/viewer/protos/viewer.pb.cc |64.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/viewer/protos/viewer.pb.cc |64.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/viewer/protos/libcore-viewer-protos.a |64.4%| [AR] {RESULT} $(B)/ydb/core/viewer/protos/libcore-viewer-protos.a |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/basics/runtime.cpp |64.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/viewer/protos/libcore-viewer-protos.a |64.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/runtime.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__get_yaml_config.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__monitoring.cpp |64.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__get_yaml_config.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc.cpp |64.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/make_scan_snapshot_unit.cpp |64.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/make_scan_snapshot_unit.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc_actor.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/config/init/init.cpp |64.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/init/init.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/prepare_kqp_data_tx_in_rs_unit.cpp |64.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/prepare_kqp_data_tx_in_rs_unit.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__schema_upgrade.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/ydb/synchronization_service/synchronization_service.cpp |64.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/compute/ydb/synchronization_service/libcompute-ydb-synchronization_service.a |64.4%| [AR] {RESULT} $(B)/ydb/core/fq/libs/compute/ydb/synchronization_service/libcompute-ydb-synchronization_service.a |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_store_permissions.cpp |64.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_store_permissions.cpp |64.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/feature_flags_configurator.cpp |64.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/feature_flags_configurator.cpp |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console_handshake.cpp |64.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_handshake.cpp |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__update_confirmed_subdomain.cpp |64.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_confirmed_subdomain.cpp |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__get_yaml_metadata.cpp |64.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__get_yaml_metadata.cpp |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/check_scheme_tx_unit.cpp |64.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_scheme_tx_unit.cpp |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/util/config_index.cpp |64.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/console/util/libcms-console-util.a |64.5%| [AR] {RESULT} $(B)/ydb/core/cms/console/util/libcms-console-util.a |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/basics/appdata.cpp |64.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/appdata.cpp |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy.cpp |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__remove_tenant_failed.cpp |64.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy.cpp |64.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_tenant_failed.cpp |64.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |64.5%| [AR] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |64.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/tier/storage.cpp |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__schema.cpp |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__restore_transaction.cpp |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__revert_pool_state.cpp |64.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__revert_pool_state.cpp |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/optimizer.cpp |64.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/error.cpp |64.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/optimizer.cpp |64.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |64.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console.cpp |64.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |64.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console.cpp |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__remove_config_subscriptions.cpp |64.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_config_subscriptions.cpp |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/walle_api_handler.cpp |64.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_api_handler.cpp |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console_configs_provider.cpp |64.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_configs_provider.cpp |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/basics/services.cpp |64.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/services.cpp |64.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |64.6%| [AR] {RESULT} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |64.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/http.cpp |64.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/http.cpp |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/base/events_writer.cpp |64.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |64.6%| [AR] {RESULT} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__s3_upload_txs.cpp |64.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__s3_upload_txs.cpp |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_direct_transaction.cpp |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__check.cpp |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/keyvalue_write.cpp |64.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/keyvalue_write.cpp |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/storages_manager/manager.cpp |64.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |64.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |64.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/configs_config.cpp |64.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_config.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_s3_uploads.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__compact_borrowed.cpp |64.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__compact_borrowed.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/yql_single_query.cpp |64.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/yql_single_query.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/cdc_stream_scan.cpp |64.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/cdc_stream_scan.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__stop_guard.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_remove_expired_notifications.cpp |64.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_remove_expired_notifications.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__set_config.cpp |64.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__set_config.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/tier/write.cpp |64.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |64.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__alter_tenant.cpp |64.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__alter_tenant.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/base_utils/format_util.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_s3_upload_rows.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/subscriber.cpp |64.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/subscriber.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/log_settings_configurator.cpp |64.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/log_settings_configurator.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/deprecated/kicli/schema.cpp |64.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/kicli/schema.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__restore_params.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/config_helpers.cpp |64.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/logger.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/config_helpers.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/logger.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__update_pool_state.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_pool_state.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__update_last_provided_config.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_last_provided_config.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_log_cleanup.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_log_cleanup.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__remove_tenant.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_tenant.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console_configs_manager.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_configs_manager.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/kqp.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/vdisk_write.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/deprecated/kicli/result.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/walle_create_task_adapter.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/kqp.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_create_task_adapter.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/vdisk_write.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_compute_database.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_compute_database.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/account_read_quoter.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/account_read_quoter.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/read_quoter.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_quoter.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator_state.cpp |64.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator_state.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/store_distributed_erase_tx_unit.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/ycsb/test_load_actor.cpp |64.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/partition_sourcemanager.cpp |64.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_sourcemanager.cpp |64.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/test_load_actor.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/executor.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/configs_dispatcher.cpp |64.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_dispatcher.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/ycsb/kqp_upsert.cpp |64.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/kqp_upsert.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/service/service.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_remove_permissions.cpp |64.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_remove_permissions.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__read_step_subscriptions.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/sourceid.cpp |64.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/sourceid.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/remove_locks.cpp |64.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/remove_locks.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/partition_compaction.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__add_config_subscription.cpp |64.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_compaction.cpp |64.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__add_config_subscription.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__init.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__mon_reset_schema_version.cpp |64.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__mon_reset_schema_version.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/hulldb_bulksst_add.cpp |64.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/hulldb_bulksst_add.cpp |64.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |64.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |64.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator_impl.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/actor.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/events/events.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__acquire_read_step.cpp |65.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__acquire_read_step.cpp |64.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp |65.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |65.0%| [AR] {RESULT} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |65.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/events/events.cpp |65.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |65.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/walle_check_task_adapter.cpp |65.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_check_task_adapter.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/user_settings_reader.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/user_info.cpp |65.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/user_info.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/list_all_topics_actor.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__last_step_subscriptions.cpp |65.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/list_all_topics_actor.cpp |65.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__last_step_subscriptions.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/get_queue_url.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cluster_info.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/list_permissions.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/write_quoter.cpp |65.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/write_quoter.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/data_events/columnshard_splitter.cpp |65.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/data_events/columnshard_splitter.cpp |65.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |65.0%| [AR] {RESULT} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |65.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_init_scheme.cpp |65.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_init_scheme.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/partition_blob_encoder.cpp |65.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_blob_encoder.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ownerinfo.cpp |65.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ownerinfo.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/receive_message.cpp |65.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/finish_propose_write_unit.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/event_helpers.cpp |65.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/finish_propose_write_unit.cpp |65.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/event_helpers.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/writer/writer.cpp |65.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/writer/writer.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/internal/rate_limiter_resources.cpp |65.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/rate_limiter_resources.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator__configure.cpp |65.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__configure.cpp |65.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |65.1%| [AR] {RESULT} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/sentinel.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/config/init/init_noop.cpp |65.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/sentinel.cpp |65.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/init/init_noop.cpp |65.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__remove_config_subscription.cpp |65.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_config_subscription.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/walle_list_tasks_adapter.cpp |65.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_list_tasks_adapter.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/service_actor.cpp |65.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/service_actor.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/mirrorer.cpp |65.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/mirrorer.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/proxy_service.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/queue_schema.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/cluster_tracker.cpp |65.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/cluster_tracker.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__cleanup_subscriptions.cpp |65.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_update_downtimes.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__cleanup_subscriptions.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_update_downtimes.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/untag_queue.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/export/session/task.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/erasure_checkers.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/erasure_checkers.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/partition_init.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_init.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/service/workers_pool.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/configs_dispatcher_proxy.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_dispatcher_proxy.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/export/session/selector/backup/selector.cpp |65.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |65.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/ycsb/kqp_select.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/kqp_select.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__configure.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__configure.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/api_adapters.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/api_adapters.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/util.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/util.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/in_memory_control_plane_storage.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/in_memory_control_plane_storage.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__cleanup_in_rs.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__cleanup_in_rs.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/check_read_unit.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_read_unit.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/transaction.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemon.cpp |65.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/transaction.cpp |65.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/backpressure/queue.cpp |65.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/queue.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blob_cache.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/backpressure/load_based_timeout.cpp |65.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/load_based_timeout.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__compaction.cpp |65.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__compaction.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/partition_monitoring.cpp |65.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_monitoring.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/pq_l2_cache.cpp |65.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/pq_l2_cache.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/read_balancer__balancing.cpp |65.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_balancer__balancing.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_reject_notification.cpp |65.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_reject_notification.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/export/session/control.cpp |65.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |65.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/writer/source_id_encoding.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/backpressure/unisched.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/writer/metadata_initializers.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/configs_cache.cpp |65.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_cache.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/partition.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_defs.cpp |65.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_defs.cpp |65.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client.cpp |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/backpressure/event.cpp |65.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/event.cpp |65.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |65.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/partition_write.cpp |65.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |65.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_write.cpp |65.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_ping.cpp |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/read_balancer__balancing_app.cpp |65.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_ping.cpp |65.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_balancer__balancing_app.cpp |65.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |65.4%| [AR] {RESULT} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |65.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_huge.cpp |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request_reporting.cpp |65.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request_reporting.cpp |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/abstract.cpp |65.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/abstract.cpp |65.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |65.4%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |65.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_snapshot.cpp |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/balance/deleter.cpp |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/fetch_request_actor.cpp |65.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/fetch_request_actor.cpp |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp |65.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/partition_scale_request.cpp |65.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_scale_request.cpp |65.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/update_group_latencies.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_quotas.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_quotas.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/update_last_seen_ready.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/update_last_seen_ready.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/pq_impl_app.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/pq_impl_app.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/ydb/result_writer_actor.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/group_sessions.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/group_sessions.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/appdata.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/pq.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/pq.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/read_balancer_app.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_balancer_app.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/downtime.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/downtime.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/statestorage_guardian.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_get_log_tail.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_get_log_tail.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block.cpp |65.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |65.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |65.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/partition_read.cpp |65.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_read.cpp |65.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemonactor.cpp |65.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemonactor.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/writer/partition_chooser_impl.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/info_collector.cpp |65.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/info_collector.cpp |65.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/writer/partition_chooser_impl.cpp |65.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |65.6%| [AR] {RESULT} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |65.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_remove_request.cpp |65.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_remove_request.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_log_and_send.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp |65.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_log_and_send.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/wilson_tracing_control.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/board_publish.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp |65.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_check_integrity_get.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/sys_view.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/balance/balancing_actor.cpp |65.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_check_integrity_get.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/http/types.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/board_replica.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp |65.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/request_controller_info.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/dread_cache_service/caching_service.cpp |65.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp |65.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/dread_cache_service/caching_service.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp |65.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/update_seen_operational.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/partition_scale_manager.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/stat_processor.cpp |65.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_scale_manager.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/pq_impl.cpp |65.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/pq_impl.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp |65.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/statestorage_proxy.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/board_lookup.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/scrub.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/http/xml.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/statestorage_event_filter.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp |65.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/read_balancer.cpp |65.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_balancer.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_stat.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/statestorage_monitoring.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp |65.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp |65.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/balance/sender.cpp |65.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/control/immediate_control_board_actor.cpp |65.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/control/immediate_control_board_actor.cpp |65.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/control/libydb-core-control.a |65.8%| [AR] {RESULT} $(B)/ydb/core/control/libydb-core-control.a |65.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/control/libydb-core-control.a |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_block.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/erase_rows_condition.cpp |65.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/erase_rows_condition.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__switch_drain.cpp |65.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__switch_drain.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp |65.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/cmds_box.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/statestorage_replica.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp |65.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/service/worker.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/minikql_compile/mkql_compile_service.cpp |65.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/minikql_compile/mkql_compile_service.cpp |65.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/conveyor_composite/service/libtx-conveyor_composite-service.a |65.8%| [AR] {RESULT} $(B)/ydb/core/tx/conveyor_composite/service/libtx-conveyor_composite-service.a |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_mon.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/balance/utils.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp |65.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/http/http.cpp |65.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |65.8%| [AR] {RESULT} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_mon.cpp |65.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_mon.cpp |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_import_scheme_query_executor.cpp |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/export/session/selector/abstract/selector.cpp |65.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |65.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/base_utils/node_by_host.cpp |65.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/driver_lib/base_utils/libbase_utils.a |65.9%| [AR] {RESULT} $(B)/ydb/core/driver_lib/base_utils/libbase_utils.a |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/shred.cpp |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp |65.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/json_handlers.cpp |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_queue.cpp |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_source.cpp |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_process_notification.cpp |65.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_process_notification.cpp |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_dummy.cpp |65.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ydb/libydb-services-ydb.a |65.9%| [AR] {RESULT} $(B)/ydb/services/ydb/libydb-services-ydb.a |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__resume_tablet.cpp |65.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__resume_tablet.cpp |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/ttl/validator.cpp |65.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |65.9%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |65.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/node_checkers.cpp |66.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/node_checkers.cpp |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/session_actor/kqp_temp_tables_manager.cpp |66.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_temp_tables_manager.cpp |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/alter_cdc_stream_unit.cpp |66.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/alter_cdc_stream_unit.cpp |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/sync.cpp |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/select_groups.cpp |66.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |66.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/change_collector_async_index.cpp |66.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_collector_async_index.cpp |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__sync_tablets.cpp |66.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__sync_tablets.cpp |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/tools/kqprun/runlib/kikimr_setup.cpp |66.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/runlib/kikimr_setup.cpp |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/compute/ydb/ydb_run_actor.cpp |66.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/compute/ydb/liblibs-compute-ydb.a |66.0%| [AR] {RESULT} $(B)/ydb/core/fq/libs/compute/ydb/liblibs-compute-ydb.a |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/tools/kqprun/runlib/utils.cpp |66.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/runlib/utils.cpp |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_helpers.cpp |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console__init_scheme.cpp |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_process.cpp |66.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__init_scheme.cpp |66.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/console/libcore-cms-console.a |66.0%| [AR] {RESULT} $(B)/ydb/core/cms/console/libcore-cms-console.a |66.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/cms/console/libcore-cms-console.a |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_load_state.cpp |66.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_agg.cpp |66.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_load_state.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp |66.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/json_pipe_req.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/session_actor/kqp_worker_actor.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/session_actor/kqp_worker_common.cpp |66.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_worker_actor.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/propose_group_key.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/json_handlers_scheme.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__update_pile.cpp |66.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_pile.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tx_store_walle_task.cpp |66.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_store_walle_task.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/session_actor/kqp_query_stats.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/register_node.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_request.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/locks/dependencies.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/discovery/discovery.cpp |66.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |66.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |66.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/discovery/libydb-core-discovery.a |66.1%| [AR] {RESULT} $(B)/ydb/core/discovery/libydb-core-discovery.a |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/viewer_topic_data.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_ic_debug.cpp |66.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_ic_debug.cpp |66.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/commit_config.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfullhandler.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__stop_tablet.cpp |66.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__stop_tablet.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/grpc_server.cpp |66.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/grpc_server.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/json_handlers_operation.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_stage_float_up.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/json_handlers_pdisk.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/store/store.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/schema/update.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_precompute.cpp |66.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |66.2%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/json_handlers_storage.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_unreadable.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_change_sender_activation.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/storage_stats_calculator.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/config_fit_groups.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/viewer_request.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/cmds_drive_status.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfull.cpp |66.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/columns/update.cpp |66.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |66.3%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |66.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |66.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replproxy.cpp |66.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replproxy.cpp |66.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/node_service/kqp_node_service.cpp |66.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/node_service/kqp_node_service.cpp |66.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |66.3%| [AR] {RESULT} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |66.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |66.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_sst.cpp |66.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/session_actor/kqp_session_actor.cpp |66.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_session_actor.cpp |66.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/update.cpp |66.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |66.3%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |66.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor/service/service.cpp |66.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor/service/service.cpp |66.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/conveyor/service/libtx-conveyor-service.a |66.3%| [AR] {RESULT} $(B)/ydb/core/tx/conveyor/service/libtx-conveyor-service.a |66.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/conveyor/service/libtx-conveyor-service.a |66.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/session_actor/kqp_response.cpp |66.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/session_actor/kqp_query_state.cpp |66.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |66.3%| [AR] {RESULT} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |66.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |66.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_actor.cpp |66.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/column_families/update.cpp |66.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/constructor.cpp |66.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/constructor.cpp |66.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_reader.cpp |66.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |66.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |66.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |66.4%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |66.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |66.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__update_tablet_groups.cpp |66.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_tablet_groups.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/sharding/hash_intervals.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_monactors.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_loggedrec.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__tablet_owners_reply.cpp |66.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__tablet_owners_reply.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/config.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__reassign_groups.cpp |66.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__reassign_groups.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__update_tablet_status.cpp |66.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_tablet_status.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_mon_dbmainpage.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/object.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__update_tablet_metrics.cpp |66.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_tablet_metrics.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/sharding/hash_modulo.cpp |66.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |66.4%| [AR] {RESULT} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tiering/manager.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_root.cpp |66.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/constructor.cpp |66.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/constructor.cpp |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/context.cpp |66.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |66.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |66.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_and_wait_dependencies_unit.cpp |66.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_and_wait_dependencies_unit.cpp |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server.cpp |66.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server.cpp |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__status.cpp |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_pdisk.cpp |66.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__status.cpp |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/layout/layout.cpp |66.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |66.5%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__update_dc_followers.cpp |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/secret/accessor/secret_id.cpp |66.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_dc_followers.cpp |66.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/accessor/secret_id.cpp |66.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |66.5%| [AR] {RESULT} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |66.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_base/cli_kicli.cpp |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__kill_node.cpp |66.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__kill_node.cpp |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/scrub/restore_corrupted_blob_actor.cpp |66.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |66.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__request_tablet_owners.cpp |66.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__request_tablet_owners.cpp |66.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/ds_table/table_exists.cpp |66.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/table_exists.cpp |66.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |66.6%| [AR] {RESULT} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |66.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/simple.cpp |66.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |66.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/splitter/batch_slice.cpp |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/follower_edge.cpp |66.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |66.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__update_domain.cpp |66.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_domain.cpp |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/cmds_host_config.cpp |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/json_handlers_vdisk.cpp |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/receive_snapshot_cleanup_unit.cpp |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__response_tablet_seq.cpp |66.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__response_tablet_seq.cpp |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/proxy_impl.cpp |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replbroker.cpp |66.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replbroker.cpp |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replmonhandler.cpp |66.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replmonhandler.cpp |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/granule/clean_granule.cpp |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__process_boot_queue.cpp |66.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__process_boot_queue.cpp |66.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/json_wb_req.cpp |66.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/granule/clean_granule.cpp |66.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |66.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |66.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |66.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_pq_read_session_info.cpp |66.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_read_session_info.cpp |66.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_block_and_get.cpp |66.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_http_server.cpp |66.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_http_server.cpp |66.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__request_tablet_seq.cpp |66.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__request_tablet_seq.cpp |66.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/counters.cpp |66.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/counters.cpp |66.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |66.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |66.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |66.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/const.cpp |66.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/const.cpp |66.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/locks/read_start.cpp |66.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |66.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |66.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |66.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_proxy.cpp |66.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_proxy.cpp |66.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |66.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |66.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullrepljob.cpp |66.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__delete_tablet_result.cpp |66.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__delete_tablet_result.cpp |66.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullrepljob.cpp |66.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter.cpp |66.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |66.7%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |66.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |66.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/common/ss_dialog.cpp |66.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/common/ss_dialog.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/bsc.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/cmds_storage_pool.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_cms.cpp |66.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_cms.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_tablet_counters.cpp |66.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_tablet_counters.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/common/timeout.cpp |66.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |66.8%| [AR] {RESULT} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |66.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_blobstorage_config.cpp |66.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_blobstorage_config.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__restart_tablet.cpp |66.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__restart_tablet.cpp |66.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/config_cmd.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/change_sender_async_index.cpp |66.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender_async_index.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__lock_tablet.cpp |66.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__lock_tablet.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/load_write_details_unit.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__release_tablets_reply.cpp |66.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__release_tablets_reply.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard__plan_step.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__seize_tablets_reply.cpp |66.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/storage_pool_info.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__seize_tablets_reply.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/storage_pool_info.cpp |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_tablet_state.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_tablet_state.cpp |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mon/mon.cpp |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/constructor.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/constructor.cpp |66.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mon/libydb-core-mon.a |66.9%| [AR] {RESULT} $(B)/ydb/core/mon/libydb-core-mon.a |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/counters.cpp |66.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |66.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__cut_tablet_history.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__cut_tablet_history.cpp |66.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |66.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |66.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__unlock_tablet.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__unlock_tablet.cpp |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__delete_tablet.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__delete_tablet.cpp |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_node_registration.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_node_registration.cpp |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__block_storage_result.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__block_storage_result.cpp |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__load_everything.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__load_everything.cpp |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tablet_move_info.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tablet_move_info.cpp |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__start_tablet.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__start_tablet.cpp |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__process_pending_operations.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__process_pending_operations.cpp |66.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__seize_tablets.cpp |67.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__seize_tablets.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__release_tablets.cpp |67.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__release_tablets.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/status.cpp |67.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |67.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/drop_index_notice_unit.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_drain_node.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__delete_node.cpp |67.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_drain_node.cpp |67.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__delete_node.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/wait_for_plan_unit.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__init_scheme.cpp |67.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__init_scheme.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_propagator.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__disconnect_node.cpp |67.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__disconnect_node.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_types.cpp |67.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_types.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/finish_propose_unit.cpp |67.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/finish_propose_unit.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_test_shard_request.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__configure_subdomain.cpp |67.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__configure_subdomain.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_scheduler.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc_actor.cpp |67.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__reassign_groups_on_decommit.cpp |67.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |67.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__reassign_groups_on_decommit.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_loans.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/change_collector_cdc_stream.cpp |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_collector_cdc_stream.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/storage_balancer.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/follower_tablet_info.cpp |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/follower_tablet_info.cpp |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/storage_balancer.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/query/query_statdb.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/hive_log.cpp |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_log.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/storage_group_info.cpp |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/storage_group_info.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__adopt_tablet.cpp |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__adopt_tablet.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tablet_info.cpp |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tablet_info.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata.cpp |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__create_tablet.cpp |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__create_tablet.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/events/delete_blobs.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/query/query_barrier.cpp |67.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |67.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/leader_tablet_info.cpp |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/leader_tablet_info.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/hive_statics.cpp |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_statics.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/query/query_range.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/monitoring.cpp |67.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/alter_table_unit.cpp |67.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/alter_table_unit.cpp |67.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/monitoring.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/query/query_readactor.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/transfer.cpp |67.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |67.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |67.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_compactionstate.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/fill.cpp |67.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/fill.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_pq_metacache.cpp |67.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metacache.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/node_info.cpp |67.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/node_info.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/query/query_readbatch.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/counters/blobs_manager.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/hive_domains.cpp |67.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_domains.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__configure_scale_recommender.cpp |67.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__configure_scale_recommender.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tracing/tablet_info.cpp |67.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tracing/tablet_info.cpp |67.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tracing/libydb-core-tracing.a |67.2%| [AR] {RESULT} $(B)/ydb/core/tracing/libydb-core-tracing.a |67.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tracing/libydb-core-tracing.a |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/counters/engine_logs.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/counters/scan.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__generate_data_ut.cpp |67.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__generate_data_ut.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/conflicts_cache.cpp |67.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/conflicts_cache.cpp |67.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_import_getters.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_import__create.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/boot_queue.cpp |67.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/boot_queue.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__register_node.cpp |67.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__register_node.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_context.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_scheme_request.cpp |67.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_scheme_request.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/drain.cpp |67.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/drain.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__object_storage_listing.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/query/query_stathuge.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/query/query_stattablet.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/query/query_public.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/domain_info.cpp |67.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/domain_info.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_scheme_initroot.cpp |67.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_scheme_initroot.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/cdc_stream_heartbeat.cpp |67.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/cdc_stream_heartbeat.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog.cpp |67.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/grouper.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_storage/request_validators.cpp |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/hive_impl.cpp |67.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_impl.cpp |67.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |67.3%| [AR] {RESULT} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |67.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/storage_get_block.cpp |67.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_import__list.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/load_everything.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_resolve_node.cpp |67.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_resolve_node.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__update_tablets_object.cpp |67.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_tablets_object.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/blobs_action/bs/remove.cpp |67.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/remove.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/counters/indexation.cpp |67.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |67.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/init_scheme.cpp |67.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |67.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a |67.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/disk_metrics.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_export__get.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/query/query_extr.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/initiate_build_index_unit.cpp |67.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/initiate_build_index_unit.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog_private_events.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/blobstorage_hullcompdelete.cpp |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_data_tx_out_rs_unit.cpp |67.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_data_tx_out_rs_unit.cpp |67.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |67.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |67.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/self_heal.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__unmark_restore_tables.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/mock/dsproxy_mock.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_recovery.cpp |67.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/dsproxy/mock/libblobstorage-dsproxy-mock.a |67.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/mock/libblobstorage-dsproxy-mock.a |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/query/assimilation.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_hive_create_tablet.cpp |67.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_hive_create_tablet.cpp |67.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |67.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/change_collector_base.cpp |67.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_collector_base.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/common/path_id.cpp |67.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/common/libtx-columnshard-common.a |67.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/common/libtx-columnshard-common.a |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/group_layout_checker.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/finalize_build_index_unit.cpp |67.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/finalize_build_index_unit.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/layout_helpers.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/get_group.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/virtual_group.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_shred.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_persqueue.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_validate.cpp |67.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_persqueue.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_svp_migration.cpp |67.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_tx_request.cpp |67.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_tx_request.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/drop_donor.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/console_interaction.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_readbulksst.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ydb_convert/table_description.cpp |67.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |67.6%| [AR] {RESULT} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |67.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_state.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_self_pinger.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__readset.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_config_base/config_base.cpp |67.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/driver_lib/cli_config_base/libcore-driver_lib-cli_config_base.a |67.6%| [AR] {RESULT} $(B)/ydb/core/driver_lib/cli_config_base/libcore-driver_lib-cli_config_base.a |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/group_mapper.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_shard_deleter.cpp |67.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_shard_deleter.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_mon.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_console.cpp |67.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_console.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_cost_tracker.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_event_filter.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_export.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_logreplay.cpp |67.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_path.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/config_fit_pdisks.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxywrite.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/config/validation/auth_config_validator.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/storage_discover.cpp |67.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_discover.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/cleanup_queue_data.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/node_report.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/migrate.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/balancer.cpp |67.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/balancer.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/recovery/hulldb_recovery.cpp |67.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/recovery/hulldb_recovery.cpp |67.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |67.7%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |67.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/config/validation/column_shard_config_validator.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/node_broker__load_state.cpp |67.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__load_state.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata_proxy.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/node_broker__init_scheme.cpp |67.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__init_scheme.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/group_metrics_exchange.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/hive.cpp |67.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/list_queues.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/node_warden_resource.cpp |67.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_export__list.cpp |67.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |67.8%| [AR] {RESULT} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_firstrun.cpp |67.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/prepare_write_tx_in_rs_unit.cpp |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_public.cpp |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_fsm.cpp |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/config/validation/validators.cpp |67.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/config/validation/libcore-config-validation.a |67.8%| [AR] {RESULT} $(B)/ydb/core/config/validation/libcore-config-validation.a |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/monitoring.cpp |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/session.cpp |67.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |67.8%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_osiris.cpp |67.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |67.8%| [AR] {RESULT} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/load_and_wait_in_rs_unit.cpp |67.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/load_and_wait_in_rs_unit.cpp |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/store_commit_writes_tx_unit.cpp |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/address_classification/net_classifier.cpp |67.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |67.8%| [AR] {RESULT} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/node_warden_scrub.cpp |67.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/check_snapshot_tx_unit.cpp |67.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_snapshot_tx_unit.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/memory_state_migration.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_fill_node.cpp |67.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_fill_node.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisrunner.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxyobtain.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_logreader.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/memory_controller/memory_controller.cpp |67.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |67.9%| [AR] {RESULT} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/check_commit_writes_tx_unit.cpp |67.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_commit_writes_tx_unit.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_snapshots.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/dynamic_nameserver.cpp |67.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/dynamic_nameserver.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_generate.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/local.cpp |67.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/local.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/dynamic_nameserver_mon.cpp |67.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/dynamic_nameserver_mon.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/configured_tablet_bootstrapper.cpp |67.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/configured_tablet_bootstrapper.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/node_broker__register_node.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/labels_maintainer.cpp |67.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__register_node.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_slot_broker__init_scheme.cpp |67.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/labels_maintainer.cpp |67.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__init_scheme.cpp |67.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/node_broker__update_config.cpp |68.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__update_config.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/node_broker.cpp |68.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_task.cpp |68.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_task.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_slot_broker__alter_tenant.cpp |68.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__alter_tenant.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_syslogreader.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/node_broker__graceful_shutdown.cpp |68.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__graceful_shutdown.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/agent.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_direct_erase.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/node_broker__migrate_state.cpp |68.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__migrate_state.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/node_broker__extend_lease.cpp |68.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__extend_lease.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_import__get.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_committer.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_alloc.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/common/columnshard.cpp |68.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/common/columnshard.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_node_enumeration.cpp |68.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_node_enumeration.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/read.cpp |68.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/read.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_domain_links.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__progress_resend_rs.cpp |68.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/node_broker__update_config_subscription.cpp |68.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__update_config_subscription.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/tiling.cpp |68.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/libstorage-optimizer-tiling.global.a |68.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/libstorage-optimizer-tiling.global.a |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__progress_tx.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_change_receiving.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql.cpp |68.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql.cpp |68.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |68.1%| [AR] {RESULT} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |68.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_slot_broker__assign_free_slots.cpp |68.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__assign_free_slots.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_access_database.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_bridge.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest.cpp |68.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_slot_broker__check_slot_status.cpp |68.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__check_slot_status.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_slot_broker__update_config.cpp |68.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__update_config.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_log.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmovedpatch_actor.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/s3.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_slot_broker__update_slot_status.cpp |68.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__update_slot_status.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_slot_broker.cpp |68.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_slot_broker__load_state.cpp |68.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__load_state.cpp |68.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/request.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__cancel_tx_proposal.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_tracker.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_syncloghttp.cpp |68.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_syncloghttp.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/storage_patch.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/grpc_proxy_status.cpp |68.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/grpc_proxy_status.cpp |68.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/client/server/libcore-client-server.a |68.2%| [AR] {RESULT} $(B)/ydb/core/client/server/libcore-client-server.a |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__get.cpp |68.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/client/server/libcore-client-server.a |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/create_volatile_snapshot_unit.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_export__cancel.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_response.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/garbage.cpp |68.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/garbage.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/status.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_overload_handler.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_export_flow_proposals.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/blobstorage_vdisk_internal.grpc.pb.cc |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdsk.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pdisk.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_console.cpp |68.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_slot_broker__update_pool_status.cpp |68.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__update_pool_status.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/metrics.cpp |68.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/metrics.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__forget.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/blob_mapping_cache.cpp |68.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/blob_mapping_cache.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/node_broker__update_epoch.cpp |68.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__update_epoch.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp |68.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__cancel.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/blocks.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_pool.cpp |68.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_pool.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogreader.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group_resolver.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/direct_tx_unit.cpp |68.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/direct_tx_unit.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/plan_queue_unit.cpp |68.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/plan_queue_unit.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/key_validator.cpp |68.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/key_validator.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/change_sender.cpp |68.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/proxy.cpp |68.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_import__forget.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmultiput_actor.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/resolved_value.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_events.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pipe.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_delete.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_direct_upload.cpp |68.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |68.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogrecovery.cpp |68.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |68.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |68.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |68.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery_read_log.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisproxy.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_db.cpp |68.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |68.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_info_types.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/query.cpp |68.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/query.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/storage_put.cpp |68.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mon_alloc/monitor.cpp |68.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |68.5%| [AR] {RESULT} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_utils.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/store_and_send_write_out_rs_unit.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_state_storage.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__cleanup_tx.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_persistent_storage.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_binding.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_storage_config.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/console_config.grpc.pb.cc |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_defrag.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/comm.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery_scan.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/storage_status.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisfinder.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_dynamic.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_part.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_static_group.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_scatter_gather.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_dblogcutter.cpp |68.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_dblogcutter.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/storage_collect_garbage.cpp |68.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/finalize_plan_tx_unit.cpp |68.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/finalize_plan_tx_unit.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_slot_broker__update_node_location.cpp |68.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__update_node_location.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_cache.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/s3_scan.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/node_warden_mon.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/log_backend/log_backend_build.cpp |68.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/log_backend/log_backend_build.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_trans_queue.cpp |68.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_trans_queue.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/storage_block.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/remove_schema_snapshots.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_export__create.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/change_collector.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc |68.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_collector.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_osiris.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/grpc.pb.cc |68.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |68.6%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_common.cpp |68.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/log_backend/log_backend.cpp |68.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/log_backend/log_backend.cpp |68.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |68.7%| [AR] {RESULT} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |68.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_recoverylogwriter.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_backup.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/ycsb/test_load_read_iterator.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/archive.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/coro_tx.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/drop_table_unit.cpp |68.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/drop_table_unit.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/deprecated/kicli/configurator.cpp |68.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/lib/deprecated/kicli/liblib-deprecated-kicli.a |68.7%| [AR] {RESULT} $(B)/ydb/public/lib/deprecated/kicli/liblib-deprecated-kicli.a |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/console_config.pb.cc |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/channel_kind.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/console.pb.cc |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/distconf_connectivity.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_read.cpp |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/grpc.grpc.pb.cc |68.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__s3_download_txs.cpp |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_status.cpp |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_write.cpp |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/storage_range.cpp |68.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_range.cpp |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent/storage_get.cpp |68.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_get.cpp |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |68.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |68.8%| [AR] {RESULT} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/serverless_proxy_config.pb.cc |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/node_warden_vdisk.cpp |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_costmodel.cpp |68.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/ycsb/bulk_mkql_upsert.cpp |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/pdisk_log.cpp |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_cluster_discovery/grpc_service.cpp |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/execute_distributed_erase_tx_unit.cpp |68.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_distributed_erase_tx_unit.cpp |68.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_cluster_discovery/grpc_service.cpp |68.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |68.8%| [AR] {RESULT} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/console.grpc.pb.cc |68.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events.cpp |68.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/cloud_events/libymq-actor-cloud_events.a |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_schema.cpp |68.8%| [AR] {RESULT} $(B)/ydb/core/ymq/actor/cloud_events/libymq-actor-cloud_events.a |68.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yql/providers/generic/actors/yql_generic_lookup_actor.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/blobstorage_vdisk_internal.pb.cc |68.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/generic/actors/libproviders-generic-actors.a |68.9%| [AR] {RESULT} $(B)/ydb/library/yql/providers/generic/actors/libproviders-generic-actors.a |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/node_warden_stat_aggr.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_http.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_common.cpp |68.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/incrhuge/libcore-blobstorage-incrhuge.a |68.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/incrhuge/libcore-blobstorage-incrhuge.a |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/serverless_proxy_config.grpc.pb.cc |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/group_write.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_restore_backup_collection.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/ycsb/common.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/node_warden_cache.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/change_visibility.cpp |68.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |68.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/s3.cpp |68.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_table.cpp |69.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_table.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/aggregated_result.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/change_sender_incr_restore.cpp |69.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender_incr_restore.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/pdisk_write.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/pdisk_read.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/op_init_schema.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__propose_tx_base.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table_index.cpp |69.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table_index.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/memory.cpp |69.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/load_test/libydb-core-load_test.a |69.0%| [AR] {RESULT} $(B)/ydb/core/load_test/libydb-core-load_test.a |69.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/load_test/libydb-core-load_test.a |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__publish_to_scheme_board.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/msgbus.grpc.pb.cc |69.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/msgbus.grpc.pb.cc |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/volatile_tx.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/group_metrics_exchange.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_metadata.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_memory_changes.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/msgbus.pb.cc |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/scheduler/old/kqp_compute_scheduler.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp |69.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__conditional_erase_rows.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_tables.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_scheme_tx_out_rs_unit.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_just_reject.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/op_apply_config.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/data_uncertain.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/data_trash.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table.cpp |69.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/write_id.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/store_write_unit.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__column_stats.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/assimilator.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/data.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/op_load.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/key.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/data_decommit.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/check_distributed_erase_tx_unit.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_index.cpp |69.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_index.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/drop_volatile_snapshot_unit.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/s3_write.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/stream_scan_common.cpp |69.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/stream_scan_common.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/workload_service/tables/table_queries.cpp |69.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_sequence.cpp |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_sequence.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp |69.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |69.2%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/op_commit_blob_seq.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/data_load.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_resource_pool.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_rtmr.cpp |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_rtmr.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_initiate_build_index.cpp |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_initiate_build_index.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/s3_upload.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/data_gc.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/blob.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/blob_depot.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sysview.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__cleanup_borrowed.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_table.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/offload_actor.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/s3_delete.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp |69.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_transport.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_transport.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_output_stream.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/actors/scheme.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/actors/scheme.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/blocks.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/change_record.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_cdc_stream.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_view.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_defs.cpp |69.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |69.3%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__cleanup_uncommitted.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_bg_tasks__list.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/mon_main.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/config.grpc.pb.cc |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_worker.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_worker.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/lease_holder.cpp |69.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/libydb-core-mind.a |69.3%| [AR] {RESULT} $(B)/ydb/core/mind/libydb-core-mind.a |69.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mind/libydb-core-mind.a |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/backup/impl/local_partition_reader.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/garbage_collection.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/backup/impl/local_partition_reader.cpp |69.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/agent.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/rm_service/kqp_snapshot_manager.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_resource_pool.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_vdisk_guids.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_query_blocks_transformer.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_query_blocks_transformer.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/data_resolve.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__store_scan_state.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_mon.cpp |69.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |69.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_actor.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config.cpp |69.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |69.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_replication.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/local_pgwire/local_pgwire_auth_actor.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/wait_for_stream_clearance_unit.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_lock.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_write_table.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_indexed_table.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_indexed_table.cpp |69.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/store_and_send_out_rs_unit.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/kmeans_helper.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/local_pgwire/local_pgwire_connection.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_sequencer_actor.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_expr_nodes.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/given_id_range.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_fs.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_fs.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_read_actor.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/create_incremental_restore_src_unit.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_index.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sysview.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/kqp.grpc.pb.cc |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/space_monitor.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/base/blobstorage_events.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/query_compiler/kqp_olap_compiler.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_lock.cpp |69.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |69.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_assign_bsv.cpp |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/workload_service/kqp_workload_service.cpp |69.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |69.5%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |69.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/rm_service/kqp_rm_service.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/read_op_unit.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/actors/analyze_actor.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/data_mon.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/actors/analyze_actor.cpp |69.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |69.6%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_write_actor.cpp |69.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sort.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sort.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_uniq_helper.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/query_compiler/kqp_query_compiler.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/rm_service/kqp_resource_info_exchanger.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blob_depot/testing.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/rm_service/kqp_resource_estimation.cpp |69.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |69.6%| [AR] {RESULT} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |69.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |69.6%| [AR] {RESULT} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/local_pgwire/local_pgwire.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__store_table_path.cpp |69.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/local_pgwire/pgwire_kqp_proxy.cpp |69.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |69.7%| [AR] {RESULT} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update_index.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_cdc_stream.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_returning.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/query_data/kqp_prepared_query.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/tx_datashard.grpc.pb.cc |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/audit/audit_log_impl.cpp |69.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/audit/libydb-core-audit.a |69.7%| [AR] {RESULT} $(B)/ydb/core/audit/libydb-core-audit.a |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_tasks_runner.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_backup_collection.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_settings.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_datasink.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/config.pb.cc |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__op_traits.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/kqp.pb.cc |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_solomon.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_solomon.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/run_script_actor/kqp_run_script_actor.cpp |69.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_consistent_copy_tables.cpp |69.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |69.7%| [AR] {RESULT} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/actors/run_actor.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_datasource.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/workload_service/actors/scheme_actors.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/receive_snapshot_unit.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/tx_datashard.pb.cc |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/move_index_unit.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_effects.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_defaults.cpp |69.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/libydb-core-protos.a |69.8%| [AR] {RESULT} $(B)/ydb/core/protos/libydb-core-protos.a |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/manager.cpp |69.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |69.8%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/execute_commit_writes_tx_unit.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_commit_writes_tx_unit.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/workload_service/actors/cpu_load_actors.cpp |69.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/libydb-core-protos.a |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__read_iterator.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup_collection.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert_index.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_opt_phase.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_phase.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_change_path_state.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_view.cpp |69.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/topics/kqp_topics.cpp |69.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/topics/libcore-kqp-topics.a |69.9%| [AR] {RESULT} $(B)/ydb/core/kqp/topics/libcore-kqp-topics.a |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_user_db.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_opt.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_opt_build.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/complete_write_unit.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_provider.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_sequence.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_sequence.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_opt_kql.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_kql.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__snapshot_txs.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_apply_build_index.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_opt_phy_finalize.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_phy_finalize.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/list_dead_letter_source_queues.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/workload_service/actors/pool_handlers_actors.cpp |69.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |69.9%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |69.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/utils/metadata_helpers.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/utils/metadata_helpers.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_indexes.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cancel_tx.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_helpers.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_helpers.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_constant_folding_transformer.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_constant_folding_transformer.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_results.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__make_access_database_no_inheritable.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/backup/impl/table_writer.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_bsv.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_bsv.cpp |70.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |70.0%| [AR] {RESULT} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_opt_effects.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_effects.cpp |70.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |70.0%| [AR] {RESULT} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_opt_phy_check.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_phy_check.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_pq.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_backup_collection.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/proxy_service/kqp_session_info.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_databases_cache.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_cdc_stream.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann.cpp |70.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/query_data/kqp_predictor.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/kqp_ic_gateway.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/kqp_ic_gateway.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_factory.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/scheduler/new/kqp_schedulable_actor.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/volatile_tx_mon.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__login_finalize.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_opt_hash_func_propagate_transformer.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_hash_func_propagate_transformer.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/check_write_unit.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__schema_changed.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann_pg.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/backup_unit.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__init_schema.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_exec.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_statistics_transformer.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_statistics_transformer.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_query_plan.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_query_plan.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/kqp_gateway.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/kqp_gateway.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/scheduler/new/tree/snapshot.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__fix_bad_paths.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_read_iterator_common.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_indexes.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_indexes.cpp |70.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/view/manager.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/view/manager.cpp |70.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |70.2%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |70.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__migrate_schemeshard.cpp |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_blob_depot.cpp |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges_predext.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges_predext.cpp |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/local_rpc/helper.cpp |70.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |70.2%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/query_data/kqp_query_data.cpp |70.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |70.2%| [AR] {RESULT} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/restore_unit.cpp |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_delete_index.cpp |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.cpp |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore_incremental_backup.cpp |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_index.cpp |70.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |70.2%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/rbo/kqp_convert_to_physical.cpp |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_sequencer_factory.cpp |70.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__op_rows.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_opt_build_phy_query.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_build_phy_query.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/rbo/kqp_operator.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/logical/kqp_opt_cbo.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_cbo.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_effects.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_effects.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/behaviour.cpp |70.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |70.3%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_service.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_write_constraint.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_scan_data_meta.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/rewrite_io_utils.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__monitoring.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_column_statistics_requester.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_column_statistics_requester.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/read_attributes_utils.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_bsv.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_build_index.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_opt_build_txs.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp |70.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/host/kqp_type_ann.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_build_txs.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_type_ann.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_continuous_backup.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_join.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_join.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/scheduler/new/tree/dynamic.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/kqp_opt.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/workload_service/common/events.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard.cpp |70.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/workload_service/common/libkqp-workload_service-common.a |70.4%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/common/libkqp-workload_service-common.a |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/bridge.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin_compact.cpp |70.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |70.4%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/kqp_scan_data.cpp |70.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_server.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_sharding.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_sharding.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/operation.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__login.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/prepare_distributed_erase_tx_in_rs_unit.cpp |70.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_wide_read.cpp |70.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |70.5%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_selector.cpp |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_data_source.cpp |70.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |70.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/utils/scheme_helpers.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/utils/scheme_helpers.cpp |70.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |70.5%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |70.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compute_actor/kqp_scan_events.cpp |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/scheduler/new/kqp_compute_scheduler_service.cpp |70.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |70.5%| [AR] {RESULT} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_index.cpp |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/tablet_killer.cpp |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/drop_persistent_snapshot_unit.cpp |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/checker.cpp |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/behaviour.cpp |70.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |70.5%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compile_service/kqp_compile_actor.cpp |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/kqp_metadata_loader.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/kqp_metadata_loader.cpp |70.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |70.5%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |70.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_index.cpp |70.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/table/behaviour.cpp |70.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |70.6%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__init_root.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_data_source.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/initializer.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/add_column.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__borrowed_compaction.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/host/kqp_runner.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_runner.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/snapshot.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__notify.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/behaviour.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/behaviour.cpp |70.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |70.6%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |70.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo_transformer.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compute_actor/kqp_compute_state.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__data_cleanup.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_sequence.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_sequence.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_solomon.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_solomon.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__stats.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/actors/task_get.cpp |70.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo_rules.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/behaviour.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/fetcher.cpp |70.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |70.7%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/host/kqp_gateway_proxy.cpp |70.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/opt/rbo/libkqp-opt-rbo.a |70.7%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/rbo/libkqp-opt-rbo.a |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_gateway_proxy.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/host/kqp_translate.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_translate.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/host/kqp_host.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_host.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/manager.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/view/behaviour.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/view/behaviour.cpp |70.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |70.7%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |70.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_validate.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_executer_impl.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_planner.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compile_service/kqp_compile_service.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_partitioned_executer.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/host/kqp_statement_rewrite.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_statement_rewrite.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_column.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_column.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_table.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_column.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp |70.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/object.cpp |70.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |70.8%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/manager.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/manager.cpp |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_distributed_erase_tx_out_rs_unit.cpp |70.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |70.8%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |70.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_table.cpp |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_encrypt.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_encrypt.cpp |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/actors/result_writer.cpp |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/prepare_scheme_tx_in_rs_unit.cpp |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_locks_db.cpp |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/execution_unit.cpp |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/tools/kqprun/runlib/application.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/runlib/application.cpp |70.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |70.8%| [AR] {RESULT} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |70.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/actors/pending_fetcher.cpp |70.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |70.8%| [AR] {RESULT} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |70.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp |70.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |70.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp |70.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_continuous_backup.cpp |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_peer_stats_calculator.cpp |70.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |70.9%| [AR] {RESULT} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__background_cleaning.cpp |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/balance/handoff_map.cpp |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_schema_snapshots.cpp |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp |70.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compute_actor/kqp_scan_common.cpp |70.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_opt.cpp |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/statestorage.cpp |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/util/failure_injection.cpp |70.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/util/libydb-core-util.a |70.9%| [AR] {RESULT} $(B)/ydb/core/util/libydb-core-util.a |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/host/kqp_transform.cpp |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_transform.cpp |70.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/base/libydb-core-base.a |70.9%| [AR] {RESULT} $(B)/ydb/core/base/libydb-core-base.a |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers.cpp |70.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_fs.cpp |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_fs.cpp |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_locks_helper.cpp |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/host/kqp_explain_prepared.cpp |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_planner_strategy.cpp |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_explain_prepared.cpp |71.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |71.0%| [AR] {RESULT} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |71.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/drop_cdc_stream_unit.cpp |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_repl_apply.cpp |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_index.cpp |71.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |71.0%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |71.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_bsv.cpp |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_bsv.cpp |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__conditional_erase.cpp |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_incremental_backup_collection.cpp |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_continuous_backup.cpp |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/counters/kqp_counters.cpp |71.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |71.0%| [AR] {RESULT} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor.cpp |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/options/schema.cpp |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/update.cpp |71.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |71.0%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__clean_pathes.cpp |71.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_literal_executer.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__data_erasure_manager.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compute_actor/kqp_compute_events.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_scan_executer.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_helpers.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/completed_operations_unit.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/shutdown/controller.cpp |71.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/common/shutdown/libkqp-common-shutdown.a |71.1%| [AR] {RESULT} $(B)/ydb/core/kqp/common/shutdown/libkqp-common-shutdown.a |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__list_users.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/schema/schema.cpp |71.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |71.1%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_actors.cpp |71.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |71.1%| [AR] {RESULT} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compute_actor/kqp_pure_compute_actor.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_cdc_stream.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/make_snapshot_unit.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__list.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/viewer.cpp |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/update.cpp |71.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |71.2%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/partition_writer.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/read_table_scan.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/upload_rows_common_impl.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/json_handlers_query.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_build_stage.cpp |71.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |71.2%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/upload_rows.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/options/update.cpp |71.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |71.2%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__find_subdomain_path_id.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/json_handlers_pq.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/table/table.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__init_populator.cpp |71.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |71.2%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__backup_collection_common.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_data_executer.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_scheme_executer.cpp |71.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/json_handlers_browse.cpp |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tiering/fetcher.cpp |71.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |71.3%| [AR] {RESULT} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__plan_step.cpp |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/manager.cpp |71.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |71.3%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/manager/manager.cpp |71.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |71.3%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compile_service/kqp_compile_computation_pattern_service.cpp |71.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |71.3%| [AR] {RESULT} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp |71.3%| [UN] {default-linux-x86_64, relwithdebinfo} $(B)/yql/essentials/tests/common/test_framework/udfs_deps/common-test_framework-udfs_deps.pkg.fake |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/grpc_pq_schema.cpp |71.3%| [UN] {default-linux-x86_64, relwithdebinfo} $(B)/library/recipes/docker_compose/bin/docker-compose |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/update.cpp |71.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |71.3%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/object.cpp |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/simple/helpers.cpp |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_common_upload.cpp |71.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_committer.cpp |71.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/ydb_proxy/local_proxy/libreplication-ydb_proxy-local_proxy.a |71.4%| [AR] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/local_proxy/libreplication-ydb_proxy-local_proxy.a |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard__read_columns.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__background_compaction.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_fs.cpp |71.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_fs.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_discovery.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllog.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_db.cpp |71.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/driver_lib/cli_base/libcli_base.a |71.4%| [AR] {RESULT} $(B)/ydb/core/driver_lib/cli_base/libcli_base.a |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hull.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/local_rate_limiter.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kesus/tablet/tx_self_check.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp |71.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_kh_describe.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/operations/manager.cpp |71.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |71.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/query/rpc_attach_session.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/resource_subscriber/events.cpp |71.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |71.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/deprecated/persqueue_v0/persqueue.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/security/ticket_parser.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp |71.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/update.cpp |71.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |71.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |71.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |71.5%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/keyvalue/keyvalue_state.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/json_handlers_viewer.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_import.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_finish_async.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_execute_scheme_query.cpp |71.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/viewer/libydb-core-viewer.a |71.5%| [AR] {RESULT} $(B)/ydb/core/viewer/libydb-core-viewer.a |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_import_data.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_alter_table.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_drop_coordination_node.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_ping.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_export__forget.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/operators/propose_tx.cpp |71.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |71.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/operations/batch_builder/builder.cpp |71.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |71.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/grpc_pq_read.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/actors/commit_offset_actor.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/table_settings.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp |71.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kesus/tablet/tx_config_set.cpp |71.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |71.6%| [AR] {RESULT} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_describe_external_table.cpp |71.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |71.6%| [AR] {RESULT} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/object.cpp |71.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |71.6%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard__statistics.cpp |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/operators/schema.cpp |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_monitoring.cpp |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_node_registration.cpp |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/tx_controller.cpp |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/common/kqp_ut_common.cpp |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/common/kqp_ut_common.cpp |71.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |71.6%| [AR] {RESULT} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |71.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__init.cpp |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/ydb_over_fq/list_directory.cpp |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_read_columns.cpp |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/normalizer/portion/normalizer.cpp |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_list_objects_in_s3_export.cpp |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_common.cpp |71.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/http_proxy/http_service.cpp |71.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |71.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/http_proxy/auth_factory.cpp |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/query/rpc_fetch_script_results.cpp |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_begin_transaction.cpp |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_index.cpp |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/hooks/abstract/abstract.cpp |71.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |71.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/operators/backup.cpp |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_config.cpp |71.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |71.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_modify_permissions.cpp |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/grpc_endpoint_publish_actor.cpp |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/tasks_list.cpp |71.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |71.7%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_prepare_data_query.cpp |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_discovery.cpp |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/apps/version/version_definition.cpp |71.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/apps/version/libversion_definition.a |71.7%| [AR] {RESULT} $(B)/ydb/apps/version/libversion_definition.a |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/kqp_helper.cpp |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_read_table.cpp |71.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard_view.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/locks_db.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_bridge.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_forget_operation.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_scan.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/general_cache/service/counters.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/columnshard__progress_tx.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_alter_replication.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_add_sharding_info.cpp |71.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |71.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/slide_limiter/usage/config.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_drop_stream_result.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_kh_snapshots.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/mediator/tablet_queue.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_create_stream_result.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/grpc_request_proxy.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/slide_limiter/usage/service.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/service/json_change_record.cpp |71.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/slide_limiter/usage/liblibrary-slide_limiter-usage.a |71.8%| [AR] {RESULT} $(B)/ydb/library/slide_limiter/usage/liblibrary-slide_limiter-usage.a |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_worker_error.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_import.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/graph/shard/tx_get_metrics.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/ydb_over_fq/describe_table.cpp |71.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_make_directory.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/kafka_transactions_coordinator.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_resolve_secret_result.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/slide_limiter/service/service.cpp |71.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/slide_limiter/service/liblibrary-slide_limiter-service.a |71.9%| [AR] {RESULT} $(B)/ydb/library/slide_limiter/service/liblibrary-slide_limiter-service.a |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_init.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_assign_tx_id.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/ydb_over_fq/execute_data_query.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/general_cache/usage/service.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read_actor.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_api_versions_actor.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/ydb_over_fq/keep_alive.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/graph/shard/shard_impl.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_init_schema.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/priorities/usage/config.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_describe_replication.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_alter_dst_result.cpp |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet.cpp |71.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |71.9%| [AR] {RESULT} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/general_cache/usage/config.cpp |71.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/general_cache/usage/libtx-general_cache-usage.a |71.9%| [AR] {RESULT} $(B)/ydb/core/tx/general_cache/usage/libtx-general_cache-usage.a |71.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_assign_stream_name.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_describe_path.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_drop_table.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/protos/out/out.cpp |72.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/out/libcore-protos-out.a |72.0%| [AR] {RESULT} $(B)/ydb/core/protos/out/libcore-protos-out.a |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_get_shard_locations.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_clusters_updater_actor.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_export.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/kqp_timeouts.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/simple/query_id.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/priorities/service/manager.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/graph/shard/tx_store_metrics.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/graph/shard/tx_monitoring.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/limiter/grouped_memory/service/manager.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/kafka_metrics.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugedefs.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_produce_actor.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/test_connection/test_connection.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/graph/shard/tx_aggregate_data.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/kafka_transactional_producers_initializers.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/kafka_consumer_members_metadata_initializers.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_read_session_actor.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_copy_tables.cpp |72.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/service/service.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/general_cache/service/service.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/txn_actor_response_builder.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/ut_common.cpp |72.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/ut_common.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/graph/shard/tx_startup.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_find_coordinator_actor.cpp |72.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |72.1%| [AR] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |72.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/private/labeled_db_counters.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_describe_configs_actor.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_describe_external_data_source.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_fetch_actor.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_create_coordination_node.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/resolve_local_db_table.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/query_actor/query_actor.cpp |72.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |72.1%| [AR] {RESULT} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/resource_broker.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_metrics_actor.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_transaction_actor.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_metadata_actor.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_topic_offsets_actor.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_list_offsets_actor.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_balance_actor_sql.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_scheme_base.cpp |72.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_fetch_actor.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/kafka_connection.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yaml_config/console_dumper.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/actorlib_impl/read_data_protocol.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/long_tx_service/long_tx_service_ut.cpp |72.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/long_tx_service/long_tx_service_ut.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/operation_helpers.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_handshake_actor.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/replica.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_describe_coordination_node.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_create_topics_actor.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_req_blockbs.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_alter_coordination_node.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_copy_table.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/populator.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_remove_directory.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_backup.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_req_findlatest.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_object_storage.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_create_partitions_actor.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/kafka_consumer_groups_metadata_initializers.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/quoter/quoter_service.cpp |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/database/database.cpp |72.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |72.2%| [AR] {RESULT} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |72.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_stream_execute_scan_query.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_req_delete.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_rename_tables.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_req_writelog.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/query/rpc_kqp_tx.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/actors/block_events.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_common/rpc_common_kqp_session.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/service/base_table_writer.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_replication.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/load_actor_write.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_view.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_execute_yql_script.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_rate_limiter_api.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/actors/wait_events.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/test_shard_mon.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/tx_load_everything.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/shared_sausagecache.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_monitoring_proxy.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/test_shard_context.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/http_proxy/http_req.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/counters/proxy_counters.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/tx_init_scheme.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/load_actor_state.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/state_server_interface.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/load_actor_mon.cpp |72.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_sys.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_req_reset.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/tx_initialize.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/long_tx_service/commit_impl.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/test_tablet.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/load_actor_impl.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/node_tablet_monitor.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/tablet_flat_dummy.cpp |72.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/tablet_flat_dummy.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_datashard_scan_response.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/sequenceshard/ut_helpers.cpp |72.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/ut_helpers.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/locks/locks.cpp |72.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |72.4%| [AR] {RESULT} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/fake_coordinator.cpp |72.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/fake_coordinator.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/transfer/transfer_writer.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_analyze_deadline.cpp |72.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/transfer/libydb-core-transfer.a |72.4%| [AR] {RESULT} $(B)/ydb/core/transfer/libydb-core-transfer.a |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/aggregator_impl.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/sequenceproxy/sequenceproxy_ut.cpp |72.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceproxy/sequenceproxy_ut.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/long_tx_service/long_tx_service_impl.cpp |72.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |72.4%| [AR] {RESULT} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/storage/storage_pools.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_configure.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/storage/pdisks.cpp |72.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/tablet_helpers.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yql/providers/solomon/actors/ut/ut_helpers.cpp |72.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/solomon/actors/ut/ut_helpers.cpp |72.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/tablet_helpers.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_linux.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/storage/vslots.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/resource_pools/resource_pools.cpp |72.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |72.5%| [AR] {RESULT} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_compaction.cpp |72.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_compaction.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_navigate.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_init_schema.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/splitter/ut/ut_splitter.cpp |72.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/splitter/ut/ut_splitter.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_background_compaction.cpp |72.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_background_compaction.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/auth/owners.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider.cpp |72.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/security/ldap_auth_provider/libcore-security-ldap_auth_provider.a |72.5%| [AR] {RESULT} $(B)/ydb/core/security/ldap_auth_provider/libcore-security-ldap_auth_provider.a |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/security/login_shared_func.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_cache/scheme_cache.cpp |72.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |72.5%| [AR] {RESULT} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/splitter/ut/batch_slice.cpp |72.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/splitter/ut/batch_slice.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/aggregator.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_finish_trasersal.cpp |72.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_analyze.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_response.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_schedule_traversal.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_aggr_stat_response.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/security/certificate_check/cert_check.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/pg_tables/pg_tables.cpp |72.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |72.6%| [AR] {RESULT} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/tools/kqprun/kqprun.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_ack_timeout.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/security/certificate_check/cert_auth_utils.cpp |72.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/kqprun.cpp |72.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/security/certificate_check/libcore-security-certificate_check.a |72.6%| [AR] {RESULT} $(B)/ydb/core/security/certificate_check/libcore-security-certificate_check.a |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_resolve.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/cs_helper.cpp |72.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/cs_helper.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/auth/permissions.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/query_actor/query_actor_ut.cpp |72.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/query_actor/query_actor_ut.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/keyvalue/grpc_service_ut.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/common_helper.cpp |72.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/keyvalue/grpc_service_ut.cpp |72.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/common_helper.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/auth/groups.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_faketablet.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/rate_limiter/rate_limiter_ut.cpp |72.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_faketablet.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix.cpp |72.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/rate_limiter/rate_limiter_ut.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/partition_stats/top_partitions.cpp |72.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yql/providers/generic/actors/ut/yql_generic_lookup_actor_ut.cpp |72.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/generic/actors/ut/yql_generic_lookup_actor_ut.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/public_http/http_req.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema_ut.cpp |72.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema_ut.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/ut/kafka_test_client.cpp |72.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/kafka_test_client.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/dataset.cpp |72.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/dataset.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/time_cast/time_cast_ut.cpp |72.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/time_cast/time_cast_ut.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_localrecovery.cpp |72.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_localrecovery.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_quantum.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_gc.cpp |72.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_gc.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/public_http/http_service.cpp |72.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/public_http/libydb-core-public_http.a |72.7%| [AR] {RESULT} $(B)/ydb/core/public_http/libydb-core-public_http.a |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/resource_pool_classifiers/resource_pool_classifiers.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_dbstat.cpp |72.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |72.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_dbstat.cpp |72.7%| [AR] {RESULT} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/scan.cpp |72.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |72.7%| [AR] {RESULT} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/ut_sequence/dsproxy_config_retrieval.cpp |72.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/ut_sequence/dsproxy_config_retrieval.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/ut_helpers/mock_service.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_outofspace.cpp |72.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_outofspace.cpp |72.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/ut_helpers/libtx-replication-ut_helpers.a |72.7%| [AR] {RESULT} $(B)/ydb/core/tx/replication/ut_helpers/libtx-replication-ut_helpers.a |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/sentinel_ut.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/sentinel_ut.cpp |72.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/sentinel_ut_unstable.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/sentinel_ut_unstable.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/ut/ut_produce_actor.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_produce_actor.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_simplebs.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_simplebs.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/ut/ut_prefix_kmeans.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_prefix_kmeans.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_bad_blobid.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_bad_blobid.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yaml_config/protos/config.pb.cc |72.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yaml_config/protos/libyaml-config-protos.a |72.8%| [AR] {RESULT} $(B)/ydb/library/yaml_config/protos/libyaml-config-protos.a |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/external_sources/s3/ut/s3_aws_credentials_ut.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/external_sources/s3/ut/s3_aws_credentials_ut.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_many.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_many.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/prepare.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/prepare.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_followers.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_followers.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/gateway/ut/metadata_conversion.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/ut/metadata_conversion.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_synclog.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/vdisk_mock.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_synclog.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/vdisk_mock.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/ut/ut_reshuffle_kmeans.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_reshuffle_kmeans.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/pq_async_io/ut_helpers.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_ut_common.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/federated_query/s3/s3_recipe_ut_helpers.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/s3_recipe_ut_helpers.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/ut/ut_sample_k.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_sample_k.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_ut_common.cpp |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp |72.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp |72.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/http_proxy/discovery_actor.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yaml_config/yaml_config.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/events/script_executions.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/helpers.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_stats.cpp |72.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/helpers.cpp |72.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_stats.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/ut/ut_secondary_index.cpp |72.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_secondary_index.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/compilation/events.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_pipe.cpp |72.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_pipe.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_rs.cpp |72.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_rs.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/limiter/grouped_memory/usage/config.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/kqp_event_impl.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/bootstrapper.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_state.cpp |72.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_state.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/limiter/grouped_memory/usage/service.cpp |72.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/limiter/grouped_memory/usage/liblimiter-grouped_memory-usage.a |72.9%| [AR] {RESULT} $(B)/ydb/core/tx/limiter/grouped_memory/usage/liblimiter-grouped_memory-usage.a |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_bsc.cpp |72.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_bsc.cpp |72.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |72.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |72.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_scheme_ut.cpp |72.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_scheme_ut.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hulldefs.cpp |72.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/tools/fqrun/src/actors.cpp |73.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/fqrun/src/actors.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/ut/actors_ut.cpp |73.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/actors_ut.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.cpp |73.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |73.0%| [AR] {RESULT} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yaml_config/yaml_config_parser.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/events/events.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/keyvalue/keyvalue_storage_request.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/keyvalue/keyvalue_intermediate.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/kqp.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_rewriter.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/common/pq_ut_common.cpp |73.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/common/pq_ut_common.cpp |73.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |73.0%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/tools/fqrun/src/fq_runner.cpp |73.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/fqrun/src/fq_runner.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_whoami.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/combinatory/abstract.cpp |73.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/abstract.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/tx_helpers.cpp |73.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/tx_helpers.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/monitoring.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/priorities/service/service.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/combinatory/execute.cpp |73.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/test_connection/test_data_streams.cpp |73.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/execute.cpp |73.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/priorities/service/libtx-priorities-service.a |73.0%| [AR] {RESULT} $(B)/ydb/core/tx/priorities/service/libtx-priorities-service.a |73.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/test_connection/libfq-libs-test_connection.a |73.0%| [AR] {RESULT} $(B)/ydb/core/fq/libs/test_connection/libfq-libs-test_connection.a |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/keyvalue/keyvalue_state_collect.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/load_test.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/helpers.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/helpers/local.cpp |73.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/local.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx.cpp |73.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/libydb-core-tx.a |73.1%| [AR] {RESULT} $(B)/ydb/core/tx/libydb-core-tx.a |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_query_ut.cpp |73.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_query_ut.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kesus/tablet/tx_init.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_entryserialize.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/hooks/testing/ro_controller.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/helpers/writer.cpp |73.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/hooks/testing/ro_controller.cpp |73.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/writer.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/service/worker.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/subscriber.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/yql_testlib/yql_testlib.cpp |73.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/yql_testlib/yql_testlib.cpp |73.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |73.1%| [AR] {RESULT} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |73.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/kqp_tx.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/nodes_manager.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/http_proxy/grpc_service.cpp |73.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |73.1%| [AR] {RESULT} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/combinatory/select.cpp |73.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/select.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_executor_db_mon.cpp |73.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kesus/tablet/tx_session_timeout.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/keyvalue/keyvalue.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_cancel_operation.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/tools/fqrun/src/fq_setup.cpp |73.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/fqrun/src/fq_setup.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/combinatory/compaction.cpp |73.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/compaction.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/hooks/testing/controller.cpp |73.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/hooks/testing/controller.cpp |73.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |73.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |73.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/kqp_tx_manager.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/ut_common/ut_common.cpp |73.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/ut_common/ut_common.cpp |73.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |73.2%| [AR] {RESULT} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |73.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/query/rpc_execute_script.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_list_operations.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_compactfreshappendix.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_log_store.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/limiter/grouped_memory/service/actor.cpp |73.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/limiter/grouped_memory/service/liblimiter-grouped_memory-service.a |73.2%| [AR] {RESULT} $(B)/ydb/core/tx/limiter/grouped_memory/service/liblimiter-grouped_memory-service.a |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_kqp_base.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/ydb_over_fq/explain_data_query.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_keyvalue.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/tablet_flat_executor.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/logging.cpp |73.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/topic_sdk_test_setup.cpp |73.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/topic_sdk_test_setup.cpp |73.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |73.3%| [AR] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |73.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_commit_transaction.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_explain_yql_script.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_responsiveness_pinger.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/graph/shard/tx_change_backend.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_describe_table.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/helpers/aggregation.cpp |73.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/aggregation.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_explain_data_query.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_discovery_targets_result.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllogcutternotify.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_create_dst_result.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/combinatory/executor.cpp |73.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/executor.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hulldb_bulksstmngr.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idx.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_drop_dst_result.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/tablet_flat_executed.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_heartbeat.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_pipe_client.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_datasnap.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/graph/shard/tx_init_schema.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_executor_data_cleanup_logic.cpp |73.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/graph/shard/libcore-graph-shard.a |73.3%| [AR] {RESULT} $(B)/ydb/core/graph/shard/libcore-graph-shard.a |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_create_replication.cpp |73.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstslice.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_load_blob_queue.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstvec.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/dst_creator.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_get_operation.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/controller.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/cache.cpp |73.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |73.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_executor_bootlogic.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/connector_client_mock.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_executor_compaction_logic.cpp |73.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/connector_client_mock.cpp |73.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |73.4%| [AR] {RESULT} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |73.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idxsnap.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/service/ext_counters.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/common/autoscaling_ut_common.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_boot_lease.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/table_creator/table_creator.cpp |73.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |73.4%| [AR] {RESULT} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |73.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/common/autoscaling_ut_common.cpp |73.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |73.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |73.4%| [AR] {RESULT} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_metrics.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/processor/tx_aggregate.cpp |73.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_auth_actor.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_bio_actor.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/processor/tx_interval_summary.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/processor/tx_collect.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/ut/ut_protocol.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_fq_internal.cpp |73.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_protocol.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_alter_configs_actor.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_boot_misc.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/service/sysview_service.cpp |73.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |73.5%| [AR] {RESULT} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_commit_actor.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/processor/tx_init_schema.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/processor/tx_interval_metrics.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/processor/processor.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_fq.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/processor/db_counters.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/node_whiteboard.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/processor/tx_configure.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/kqp_lwtrace_probes.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_resolver.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/processor/tx_top_partitions.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/mediator/mediator.cpp |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/actors/test_runtime.cpp |73.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |73.5%| [AR] {RESULT} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |73.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/common/schema.cpp |73.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |73.6%| [AR] {RESULT} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_list_renderer.cpp |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/processor/tx_init.cpp |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yql/providers/solomon/actors/ut/dq_solomon_write_actor_ut.cpp |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/processor/processor_impl.cpp |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp |73.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |73.6%| [AR] {RESULT} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yaml_config/yaml_config_helpers.cpp |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/actorlib_impl/connect_socket_protocol.cpp |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_counters_aggregator.cpp |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_balancer_actor.cpp |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/persqueue/topic_parser/topic_parser.cpp |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/load_actor_delete.cpp |73.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |73.6%| [AR] {RESULT} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/service/service.cpp |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/service/table_writer.cpp |73.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |73.6%| [AR] {RESULT} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_stream_execute_yql_script.cpp |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/query/rpc_execute_query.cpp |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/yaml_config/serialize_deserialize.cpp |73.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |73.6%| [AR] {RESULT} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |73.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_brokendevice.cpp |73.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |73.7%| [AR] {RESULT} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_delivery_problem.cpp |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/keyvalue/keyvalue_index_record.cpp |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_load.cpp |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_request.cpp |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree.cpp |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_essence.cpp |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/security/login_page.cpp |73.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/security/libydb-core-security.a |73.7%| [AR] {RESULT} $(B)/ydb/core/security/libydb-core-security.a |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/sequenceshard/ut_sequenceshard.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/ut_sequenceshard.cpp |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_huge.cpp |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullactor.cpp |73.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |73.7%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/pq_async_io/mock_pq_gateway.cpp |73.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |73.7%| [AR] {RESULT} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/show_create/show_create.cpp |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_response_tablet_distribution.cpp |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/opaque_path_description.cpp |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/priorities/usage/service.cpp |73.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/priorities/usage/libtx-priorities-usage.a |73.7%| [AR] {RESULT} $(B)/ydb/core/tx/priorities/usage/libtx-priorities-usage.a |73.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/mediator/mediator__schema.cpp |73.8%| [PK] {default-linux-x86_64, relwithdebinfo} $(B)/yql/essentials/tests/common/test_framework/udfs_deps/{common-test_framework-udfs_deps.final.pkg.fake ... yql/essentials/udfs/common/hyperscan/libhyperscan_udf.so} |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request.cpp |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_init.cpp |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/show_create/create_table_formatter.cpp |73.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |73.8%| [AR] {RESULT} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kesus/proxy/proxy.cpp |73.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |73.8%| [AR] {RESULT} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/tablets/tablets.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/tablets/tablets.cpp |73.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |73.8%| [AR] {RESULT} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |73.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/auth/users.cpp |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/storage/groups.cpp |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/keyvalue/keyvalue_collector.cpp |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/dst_remover.cpp |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/query_stats/query_metrics.cpp |73.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |73.8%| [AR] {RESULT} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/events.cpp |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/mediator/mediator_impl.cpp |73.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |73.8%| [AR] {RESULT} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/actorlib_impl/send_data_protocol.cpp |73.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |73.8%| [AR] {RESULT} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |73.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/general_cache/service/manager.cpp |73.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/general_cache/service/libtx-general_cache-service.a |73.9%| [AR] {RESULT} $(B)/ydb/core/tx/general_cache/service/libtx-general_cache-service.a |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_chain.cpp |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_executor.cpp |73.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a |73.9%| [AR] {RESULT} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/mediator/mediator__configure.cpp |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/stream_creator.cpp |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_public.cpp |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/partition_stats/partition_stats.cpp |73.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |73.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |73.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |73.9%| [AR] {RESULT} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugerecovery.cpp |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/query_stats/query_stats.cpp |73.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |73.9%| [AR] {RESULT} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst.cpp |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_dynamic_config.cpp |73.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |73.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_maintenance.cpp |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/signals/owner.cpp |73.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/signals/libydb-library-signals.a |73.9%| [AR] {RESULT} $(B)/ydb/library/signals/libydb-library-signals.a |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_describe_table_options.cpp |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_get_scale_recommendation.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_get_scale_recommendation.cpp |73.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/simple/settings.cpp |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/program/resolver.cpp |74.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/common/simple/libkqp-common-simple.a |74.0%| [AR] {RESULT} $(B)/ydb/core/kqp/common/simple/libkqp-common-simple.a |74.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/program/libcore-tx-program.a |74.0%| [AR] {RESULT} $(B)/ydb/core/tx/program/libcore-tx-program.a |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_req_rebuildhistory.cpp |74.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tablet/libydb-core-tablet.a |74.0%| [AR] {RESULT} $(B)/ydb/core/tablet/libydb-core-tablet.a |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_repl.cpp |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_load_rows.cpp |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/tx_drop_replication.cpp |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_defrag.cpp |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_rollback_transaction.cpp |74.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |74.0%| [AR] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_helpers/ls_checks.cpp |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap.cpp |74.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |74.0%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/events/query.cpp |74.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/common/events/libkqp-common-events.a |74.0%| [AR] {RESULT} $(B)/ydb/core/kqp/common/events/libkqp-common-events.a |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_helpers/failing_mtpq.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/failing_mtpq.cpp |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_keep_alive.cpp |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/quoter/kesus_quoter_proxy.cpp |74.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/quoter/libydb-core-quoter.a |74.0%| [AR] {RESULT} $(B)/ydb/core/quoter/libydb-core-quoter.a |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/secret_resolver.cpp |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_helpers/export_reboots_common.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/export_reboots_common.cpp |74.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/helpers/query_executor.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/query_executor.cpp |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/compilation/result.cpp |74.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/common/compilation/libkqp-common-compilation.a |74.1%| [AR] {RESULT} $(B)/ydb/core/kqp/common/compilation/libkqp-common-compilation.a |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/mediator/mediator__init.cpp |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/jaeger_tracing/sampling_throttling_configurator.cpp |74.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/jaeger_tracing/libydb-core-jaeger_tracing.a |74.1%| [AR] {RESULT} $(B)/ydb/core/jaeger_tracing/libydb-core-jaeger_tracing.a |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_column_stats.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_column_stats.cpp |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/test_tablet/load_actor_read_validate.cpp |74.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |74.1%| [AR] {RESULT} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/test_server.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/test_server.cpp |74.1%| [UN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/postgresql/psql/psql |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/combinatory/bulk_upsert.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/bulk_upsert.cpp |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/tools/fqrun/src/common.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/fqrun/src/common.cpp |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/dst_alterer.cpp |74.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |74.1%| [AR] {RESULT} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |74.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_s3_plan_ut.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_s3_plan_ut.cpp |74.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |74.1%| [AR] {RESULT} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_replication.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_replication.cpp |74.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/mediator/execute_queue.cpp |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank.cpp |74.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_actor.cpp |74.2%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/counters/counters.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_actor.cpp |74.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |74.2%| [AR] {RESULT} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql_ut.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql_ut.cpp |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/kqp_resolve.cpp |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/tx_schemeshard_stats.cpp |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/helpers/typed_local.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/typed_local.cpp |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/ydb_over_fq/create_session.cpp |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/ut/ut_local_kmeans.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_local_kmeans.cpp |74.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |74.2%| [AR] {RESULT} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |74.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |74.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |74.2%| [AR] {RESULT} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/yc_search_ut/test_events_writer.cpp |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/mediator/mediator__schema_upgrade.cpp |74.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |74.2%| [AR] {RESULT} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/build_index/ut/ut_recompute_kmeans.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_recompute_kmeans.cpp |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/service/service_impl.cpp |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kafka_proxy/ut/metarequest_ut.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/metarequest_ut.cpp |74.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |74.2%| [AR] {RESULT} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kesus/tablet/tx_semaphore_timeout.cpp |74.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_init.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_init.cpp |74.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |74.3%| [AR] {RESULT} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/combinatory/actualization.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/actualization.cpp |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write_actor.cpp |74.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |74.3%| [AR] {RESULT} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/sessions/sessions.cpp |74.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |74.3%| [AR] {RESULT} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/ut_utils.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/ut_utils.cpp |74.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |74.3%| [AR] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |74.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/apps/ydbd/main.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/auth/group_members.cpp |74.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |74.3%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |74.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |74.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |74.3%| [AR] {RESULT} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/driver_lib/run/auto_config_initializer_ut.cpp |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/nodes/nodes.cpp |74.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |74.3%| [AR] {RESULT} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/grpc_request_proxy_simple.cpp |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_cms.cpp |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/view/view_ut.cpp |74.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_execute_data_query.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/view/view_ut.cpp |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/storage/storage_stats.cpp |74.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |74.4%| [AR] {RESULT} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_login.cpp |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/tenant_runtime.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/tenant_runtime.cpp |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/workload_service/ut/common/kqp_workload_service_ut_common.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/ut/common/kqp_workload_service_ut_common.cpp |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/apps/etcd_proxy/service/etcd_impl.cpp |74.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |74.4%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |74.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |74.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |74.4%| [AR] {RESULT} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/base/board_subscriber_ut.cpp |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/common/kqp_ru_calc.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/board_subscriber_ut.cpp |74.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |74.4%| [AR] {RESULT} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_create_table.cpp |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/combinatory/variator.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/variator.cpp |74.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/olap/combinatory/libut-olap-combinatory.a |74.4%| [AR] {RESULT} $(B)/ydb/core/kqp/ut/olap/combinatory/libut-olap-combinatory.a |74.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/ut/olap/combinatory/libut-olap-combinatory.a |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/backup/impl/table_writer_ut.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/backup/impl/table_writer_ut.cpp |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cluster_info_ut.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cluster_info_ut.cpp |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/ut/ut_datetime.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_datetime.cpp |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet_ut.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet_ut.cpp |74.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/control/immediate_control_board_actor_ut.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/control/immediate_control_board_actor_ut.cpp |74.5%| [PK] {default-linux-x86_64, relwithdebinfo} $(B)/library/recipes/docker_compose/bin/{recipes-docker_compose-bin.final.pkg.fake ... library/recipes/docker_compose/bin/docker-compose} |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tools/query_replay/main.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay/main.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/ut_sequence/datashard_ut_sequence.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/yc_search_ut/index_events_processor_ut.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/log_settings_configurator_ut.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/ut_sequence/datashard_ut_sequence.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/log_settings_configurator_ut.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/yc_search_ut/index_events_processor_ut.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/ut/ut_other.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_other.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_sysview_reboots/ut_sysview_reboots.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_sysview_reboots/ut_sysview_reboots.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/health_check/health_check.cpp |74.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/health_check/libydb-core-health_check.a |74.5%| [AR] {RESULT} $(B)/ydb/core/health_check/libydb-core-health_check.a |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_counters_ut.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_counters_ut.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_services/rpc_read_rows.cpp |74.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |74.5%| [AR] {RESULT} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tools/query_replay_yt/query_compiler.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay_yt/query_compiler.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ut_cdc_stream_reboots.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ut_cdc_stream_reboots.cpp |74.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_external_table_reboots/ut_external_table_reboots.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_table_reboots/ut_external_table_reboots.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl_utility.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl_utility.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_view/ut_view.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_view/ut_view.cpp |74.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ext_index/ut/ut_ext_index.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/ut/ut_ext_index.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ut_split_merge_reboots.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ut_split_merge_reboots.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy_ut.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy_ut.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/initializer/ut/ut_init.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/ut/ut_init.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_auditsettings/ut_auditsettings.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_auditsettings/ut_auditsettings.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/test_client.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/test_client.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp |74.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/testlib/libydb-core-testlib.a |74.6%| [AR] {RESULT} $(B)/ydb/core/testlib/libydb-core-testlib.a |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/cms/cms_ut.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/cms/cms_ut.cpp |74.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/testlib/libydb-core-testlib.a |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_reboots/ut_reboots.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_reboots/ut_reboots.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/rbo/kqp_rbo_ut.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/rbo/kqp_rbo_ut.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/service/kqp_document_api_ut.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/service/kqp_document_api_ut.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/optimizer_ut.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/optimizer_ut.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/ut/flat_test_db.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/flat_test_db.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/topic_yql_ut.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/topic_yql_ut.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/storage_service_ydb_ut.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/storage_service_ydb_ut.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_backup_collection/ut_backup_collection.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup_collection/ut_backup_collection.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/query/kqp_types_ut.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_types_ut.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/keyvalue/keyvalue_ut_trace.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_ut_trace.cpp |74.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/service/kqp_qs_scripts_ut.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/service/kqp_qs_scripts_ut.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_change_collector.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_change_collector.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/first_class_src_ids_ut.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/first_class_src_ids_ut.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/actorlib_impl/actor_tracker_ut.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/actor_tracker_ut.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/subscriber_ut.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/subscriber_ut.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_sequence/ut_sequence.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_sequence/ut_sequence.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_scan.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_scan.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tools/query_replay/query_compiler.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay/query_compiler.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_upload_rows.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/write_ut.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_upload_rows.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/write_ut.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/topic_data_ut.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/topic_data_ut.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/decimal_ut.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/decimal_ut.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/ydb_cli/commands/benchmark_utils.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/ut_rw/ut_backup.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_backup.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/statistics_ut.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/statistics_ut.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/json_ut.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/json_ut.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/metadata/secret/ut/ut_secret.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/ut/ut_secret.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/format_handler_ut.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/format_handler_ut.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/arrow/kqp_types_arrow_ut.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_types_arrow_ut.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |74.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/apps/ydb/ut/parse_command_line.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/ut_common.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_common.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_executor_leases_ut.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_leases_ut.cpp |74.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |74.8%| [LD] {RESULT} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_backup/ut_backup.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup/ut_backup.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/schemereq_ut.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/schemereq_ut.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/backup_ut/encrypted_backup_ut.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/backup_ut/encrypted_backup_ut.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_node_enumeration_ut.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_node_enumeration_ut.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_view_ut.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_view_ut.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/service/worker_ut.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/worker_ut.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_ut.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_ut.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/persqueue_new_schemecache_ut.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_new_schemecache_ut.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/sys_view_ut.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/sys_view_ut.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_range_ops.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_range_ops.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_order.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_order.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/ut_counters.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kesus/proxy/proxy_actor_ut.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_counters.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/proxy/proxy_actor_ut.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console_ut_tenants.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_ut_tenants.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/service/kqp_service_ut.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/service/kqp_service_ut.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/ut_external_data_source_reboots.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/ut_external_data_source_reboots.cpp |74.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/row_dispatcher/ut/topic_session_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/service/ut/ut_aggregation/ut_aggregate_statistics.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/ut/ut_aggregation/ut_aggregate_statistics.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/runtime/scheduler/old/kqp_compute_scheduler_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_sequence_reboots/ut_sequence_reboots.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_sequence_reboots/ut_sequence_reboots.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_pipecache_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_pipecache_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/query/kqp_query_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_query_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/row_dispatcher/ut/leader_election_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_change_exchange.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_change_exchange.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/service/topic_reader_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/topic_reader_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/row_dispatcher/ut/row_dispatcher_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/row_dispatcher_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_monitoring_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_monitoring_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_update_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_update_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/flat_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/flat_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/runtime/kqp_re2_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/hive_impl_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_re2_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_impl_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/check_integrity.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/check_integrity.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |74.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_split_merge.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_split_merge.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/conveyor_composite/ut/ut_simple.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/list_all_topics_ut.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/list_all_topics_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/counters_ut.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/counters_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/resource_broker_ut.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/resource_broker_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_bsvolume/ut_bsvolume.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_bsvolume/ut_bsvolume.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_table_split_ut.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_table_split_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/query/kqp_params_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_filestore_reboots/ut_filestore_reboots.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_filestore_reboots/ut_filestore_reboots.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_params_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/ncloud/impl/access_service_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/scale_recommender_policy_ut.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/scale_recommender_policy_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/query/kqp_stats_ut.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_stats_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/federated_query/large_results/kqp_scriptexec_results_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_base_reboots/ut_base_reboots.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/large_results/kqp_scriptexec_results_ut.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base_reboots/ut_base_reboots.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_bulk_upsert_ut.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_bulk_upsert_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/ut_helpers.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/perf/kqp_workload_ut.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/perf/kqp_workload_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_patch_ut.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_patch_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/http_proxy/ut/kinesis_ut.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/ut/kinesis_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/sparsed_ut.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/sparsed_ut.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |75.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/ut_labeled.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_internal_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_labeled.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_internal_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_allocator/txallocator_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_ut_common.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/ut_vector_index_build_reboots.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/ut_vector_index_build_reboots.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ut_export_reboots_s3.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ut_export_reboots_s3.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/tx/kqp_mvcc_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_mvcc_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/kqp_olap_stats_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/kqp_olap_stats_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/tx/kqp_sink_locks_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_locks_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/keyvalue/keyvalue_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/viewer/viewer_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/viewer_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/direct_read_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/direct_read_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/effects/kqp_immediate_effects_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_immediate_effects_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/cache_ut.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/cache_ut.cpp |75.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_olap_reboots/ut_olap_reboots.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_olap_reboots/ut_olap_reboots.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/read_attributes_utils_ut.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/read_attributes_utils_ut.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ut_bsvolume_reboots.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ut_bsvolume_reboots.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/backup_ut/backup_path_ut.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/backup_ut/backup_path_ut.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_sysview/ut_sysview.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_sysview/ut_sysview.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_compaction/ut_compaction.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_compaction/ut_compaction.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_minstep.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_minstep.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/group_size_in_units.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/group_size_in_units.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/net_classifier_updater_ut.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/net_classifier_updater_ut.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/mediator/mediator_ut.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator_ut.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup/ut_continuous_backup.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup/ut_continuous_backup.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/ut/kqp_mock.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/kqp_mock.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/target_discoverer_ut.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/target_discoverer_ut.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/ut/topic_service_ut.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/topic_service_ut.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/mv_object_map_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/mv_object_map_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_ut.cpp |75.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/ut/ut_rename_table_column.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/quoter/quoter_service_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kesus/proxy/ut_helpers.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/proxy/ut_helpers.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/opt/kqp_returning_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_returning_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/opt/kqp_ranges_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_ranges_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/fq/ut_integration/fq_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/fq/ut_integration/fq_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/service/table_writer_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/table_writer_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore_reboots/ut_incremental_restore_reboots.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/perf/kqp_query_perf_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore_reboots/ut_incremental_restore_reboots.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/perf/kqp_query_perf_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_quotas_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_quotas_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/pg/kqp_pg_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/pg/kqp_pg_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_stream_lookup.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_stream_lookup.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_trace.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_trace.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/ut_with_sdk/commitoffset_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/commitoffset_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_delete_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_delete_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/opt/kqp_not_null_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_system_names/ut_system_names.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_object_storage_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_system_names/ut_system_names.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_object_storage_ut.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_not_null_ut.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp |75.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_register_node_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_register_node_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tools/query_replay/query_replay.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay/query_replay.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/pqrb_describes_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/pqrb_describes_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/ut_kqp.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_kqp.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/ycloud/impl/user_account_service_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/locks_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ycloud/impl/user_account_service_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/locks_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_failure_injection/ut_failure_injection.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_failure_injection/ut_failure_injection.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_fetcher_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_fetcher_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/wrappers/s3_wrapper_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/wrappers/s3_wrapper_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/object_storage_listing_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/object_storage_listing_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/actorlib_impl/actor_bootstrapped_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/actor_bootstrapped_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/actorlib_impl/test_interconnect_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/test_interconnect_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/ydb_cli/topic/topic_read_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_read_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/ut_ycsb.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ut_ycsb.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/row_dispatcher/ut/coordinator_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |75.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/effects/kqp_write_ut.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_write_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/http_proxy/ut/ymq_ut.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/ut/ymq_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions_ut.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/blobs_sharing_ut.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/blobs_sharing_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/ut_rw/ut_columnshard_read_write.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_columnshard_read_write.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/persqueue_common_ut.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_common_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_external_data_source/ut_external_data_source.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_data_source/ut_external_data_source.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_executor_ut.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_columnshard.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_columnshard.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/arrow/kqp_arrow_in_channels_ut.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_arrow_in_channels_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/security/certificate_check/cert_utils_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_discover_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_keys.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_keys.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/downtime_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream/ut_cdc_stream.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream/ut_cdc_stream.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_index/ut_unique_index.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_unique_index.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/opt/kqp_named_expressions_ut.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_named_expressions_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/object_distribution_ut.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/object_distribution_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/pqtablet_ut.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/pqtablet_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_write.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_write.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_maintenance_api_ut.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp |75.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/slow/autopartitioning_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/slow/autopartitioning_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/data/kqp_read_null_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/data/kqp_read_null_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_query_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_query_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/yql/kqp_pragma_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_pragma_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp |75.6%| [ld] {default-linux-x86_64, relwithdebinfo} $(B)/tools/black_linter/black_linter |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_read_rows_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_read_rows_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_user_attributes/ut_user_attributes.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/dictionary_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_user_attributes/ut_user_attributes.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/node_broker_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/dictionary_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/config/bsconfig_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/config/bsconfig_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/rm_service/kqp_rm_ut.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/rm_service/kqp_rm_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/actorlib_impl/actor_activity_ut.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_replication_reboots/ut_replication_reboots.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_replication_reboots/ut_replication_reboots.cpp |75.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_actors_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_actors_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_permissions_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_permissions_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/scheme/kqp_constraints_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_constraints_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/encrypted_storage_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/encrypted_storage_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_permissions_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_permissions_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/opt/kqp_ne_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_ne_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/tx/kqp_sink_tx_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_tx_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/tx/kqp_snapshot_isolation_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_snapshot_isolation_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/actors/ut/database_resolver_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_pipe_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_pipe_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_external_table/ut_external_table.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_table/ut_external_table.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kesus/tablet/tablet_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/persqueue_compat_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tablet_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_compat_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_allocator_client/ut_helpers.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator_client/ut_helpers.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/opt/kqp_sqlin_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_sqlin_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/datatime64_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/datatime64_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_state_storage_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/join/kqp_flip_join_ut.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/join/kqp_flip_join_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/provider/yql_kikimr_provider_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |75.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kesus/tablet/quoter_performance_test/main.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/quoter_performance_test/main.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/hive_ut.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_ut.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/federated_topic/ut/basic_usage_ut.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/federated_topic/ut/basic_usage_ut.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/proxy_ext_tenant_ut.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ext_tenant_ut.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/quoter/ut_helpers.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/ut_helpers.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/ut_common.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_common.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_vector_ut.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_vector_ut.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/ut_fat/blobstorage_node_warden_ut_fat.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/ut_fat/blobstorage_node_warden_ut_fat.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_kqp.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/cancel_tx_ut.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/cancel_tx_ut.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_ldap_login_ut.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_ldap_login_ut.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/ydb_cli/commands/ydb_sql.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_fixture.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_fixture.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_base/ut_commit_redo_limit.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_commit_redo_limit.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_serverless_reboots/ut_serverless_reboots.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_serverless_reboots/ut_serverless_reboots.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker_ut.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker_ut.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ut_extsubdomain_reboots.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ut_extsubdomain_reboots.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/partition_ut.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/partition_ut.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_login_ut.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_login_ut.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_stats_ut.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_stats_ut.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/sharding/ut/ut_sharding.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/ut/ut_sharding.cpp |75.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/scheme/kqp_acl_ut.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_acl_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/dst_creator_ut.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/dst_creator_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/address_classification/net_classifier_ut.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/address_classification/net_classifier_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/ut_rw/ut_normalizer.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_normalizer.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/control_plane_storage/in_memory_control_plane_storage_ut.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/in_memory_control_plane_storage_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/slow/pq_ut.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/slow/pq_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/ut/describes_ut/describe_topic_ut.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/describes_ut/describe_topic_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_tenants_ut.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tenants_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/table_creator/table_creator_ut.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/table_creator/table_creator_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/flat_executor_database_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_counters_aggregator_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_index/ut_async_index.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_async_index.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_ut_local.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_ut_local.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/partition_chooser_ut.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/partition_chooser_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tools/stress_tool/device_test_tool.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_ut.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_ut.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tools/query_replay_yt/main.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/ut/ut_script.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_script.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ut_topic_splitmerge.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ut_topic_splitmerge.cpp |75.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/group_mapper_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_minikql.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_minikql.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/yql/kqp_scripting_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_scripting_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/tx/kqp_locks_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_locks_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/compression_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/compression_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/stream_creator_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/stream_creator_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_col_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_col_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_errors.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_errors.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/persqueue_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/backpressure.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/tx/kqp_sink_mvcc_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_mvcc_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/monitoring_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/monitoring_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_erase_rows.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_erase_rows.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/actorlib_impl/test_protocols_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/configs_dispatcher_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_dispatcher_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/join/kqp_index_lookup_join_ut.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/join/kqp_index_lookup_join_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tools/query_replay/query_proccessor.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/pq_ut.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/pq_ut.cpp |76.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_import_ut.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_import_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/storage_pool_info_ut.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/storage_pool_info_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/tests/kikimr_tpch/kqp_tpch_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/effects/kqp_inplace_update_ut.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_inplace_update_ut.cpp |76.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch |76.1%| [LD] {RESULT} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_bulk_upsert_olap_ut.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_bulk_upsert_olap_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/ut_user_attributes_reboots.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/ut_user_attributes_reboots.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/engine/mkql_engine_flat_ut.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/engine/mkql_engine_flat_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/result_formatter/result_formatter_ut.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/result_formatter/result_formatter_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/security/certificate_check/cert_check_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ydb_convert/table_description_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_tables_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/populator_ut.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/populator_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/sourceid_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_rtmr/ut_rtmr.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_rtmr/ut_rtmr.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/keyvalue/keyvalue_collector_ut.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_collector_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/jaeger_tracing_configurator_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/pqtablet_mock.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/pqtablet_mock.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_index_table_ut.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_index_table_ut.cpp |76.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_write_actor_ut.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_base/ut_info_types.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_info_types.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_volatile.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_volatile.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/ut_large.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_large.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/scan/kqp_flowcontrol_ut.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_flowcontrol_ut.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/tenant_ut_pool.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/graph/ut/graph_ut.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/ut/graph_ut.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/idx_test/ydb_index_ut.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/idx_test/ydb_index_ut.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/ut/ut_data_cleanup.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/opt/kqp_sort_ut.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_sort_ut.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_ut.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_ut.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/internals_ut.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/effects/kqp_effects_ut.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_effects_ut.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_olap/ut_olap.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/database/ut/ut_database.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/database/ut/ut_database.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_olap/ut_olap.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/common/entity_id_ut.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/common/entity_id_ut.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_object_storage_listing.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_object_storage_listing.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/ut/pqtablet_mock.cpp |76.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_move/ut_move.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_move/ut_move.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_coordination_ut.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_coordination_ut.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/memory_controller/memory_controller_ut.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/memory_controller/memory_controller_ut.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_logstore_ut.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_logstore_ut.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator_volatile_ut.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator_volatile_ut.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/benchmark/b_part.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_filter_ut.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_filter_ut.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_index/ut_vector_index.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_vector_index.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/dread_cache_service/ut/caching_proxy_ut.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/dread_cache_service/ut/caching_proxy_ut.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/sys_view/partition_stats/partition_stats_ut.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/coordinator/coordinator_ut.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator_ut.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/tx/kqp_tx_ut.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_tx_ut.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/gc_ut.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compression_ut.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/gc_ut.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compression_ut.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/lib/ydb_cli/commands/ydb_node_config.cpp |76.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/lib/ydb_cli/commands/libclicommands.a |76.3%| [AR] {RESULT} $(B)/ydb/public/lib/ydb_cli/commands/libclicommands.a |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/phantom_blobs.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_read_table.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_table.cpp |76.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_index_build_reboots.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_index_build_reboots.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_replication/ut_replication.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_replication/ut_replication.cpp |76.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/apps/ydb/ydb |76.4%| [LD] {RESULT} $(B)/ydb/apps/ydb/ydb |76.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydb/ydb |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore/ut_incremental_restore.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore/ut_incremental_restore.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_allocator/txallocator_ut_helpers.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_ut_helpers.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/configs_cache_ut.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_cache_ut.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/cms_ut.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_ut.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_ut.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/engines/ut/ut_program.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_program.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ut_rtmr_reboots.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ut_rtmr_reboots.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/replication/controller/assign_tx_id_ut.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/assign_tx_id_ut.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_prefixed_vector_ut.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_prefixed_vector_ut.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/testlib/actors/test_runtime_ut.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_resolver_ut.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_resolver_ut.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/ut_backup_collection_reboots.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/ut_backup_collection_reboots.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache_actor.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache_actor.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_ut.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_ut.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/yql/kqp_yql_ut.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_yql_ut.cpp |76.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |76.4%| [LD] {RESULT} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/cloud_events_ut.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/cloud_events_ut.cpp |76.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/replica_ut.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/replica_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/aggregations_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/aggregations_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_parser_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/graph/shard/ut/shard_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/ut/shard_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/persqueue_common_new_schemecache_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_common_new_schemecache_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_table_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_table_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_permissions_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_scripting_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_permissions_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_scripting_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/quoter/kesus_quoter_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/kesus_quoter_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/console_ut_configs.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_ut_configs.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/health_check/health_check_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/health_check/health_check_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/fetch_request_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/immediate_controls_configurator_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/fetch_request_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/delete_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/delete_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/backup/impl/local_partition_reader_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/backup/impl/local_partition_reader_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_datashard.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_datashard.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/query/kqp_explain_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_explain_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_allocator_client/actor_client_ut.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator_client/actor_client_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |76.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_login_large/ut_login_large.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_login_large/ut_login_large.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_base/ut_base.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_base.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_decimal_types.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_decimal_types.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/iceberg_ut_data.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/iceberg_ut_data.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/user_info_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_quorum_tracker_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/ycloud/impl/folder_service_ut.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ycloud/impl/folder_service_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/opt/kqp_extract_predicate_unpack_ut.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_extract_predicate_unpack_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/grpc_streaming/grpc_streaming_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_streaming/grpc_streaming_ut.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/service/ut/ut_basic_statistics.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/ut/ut_basic_statistics.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/bootstrapper_ut.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/bootstrapper_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/scan/kqp_scan_ut.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_scan_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_request_reporting_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/modifications_validator_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/ut/describes_ut/ic_cache_ut.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/describes_ut/ic_cache_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tablet/tablet_req_blockbs_ut.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_req_blockbs_ut.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tools/stress_tool/device_test_tool_ut.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |76.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tiering/ut/ut_object.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/scheme/kqp_secrets_ut.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_secrets_ut.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_login/ut_login.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_login/ut_login.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_column_build/ut_column_build.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_column_build/ut_column_build.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_datashard.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_datashard.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/scan/kqp_split_ut.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_split_ut.cpp |76.7%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_common_pq.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_common_pq.cpp |76.7%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/scan/kqp_point_consolidation_ut.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_point_consolidation_ut.cpp |76.7%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/9270b323afac6e83cea5bf4aa4_raw.auxcpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/9270b323afac6e83cea5bf4aa4_raw.auxcpp |76.7%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |76.7%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |76.7%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp |76.7%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/backup_ut/list_objects_in_s3_export_ut.cpp |76.7%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/backup_ut/list_objects_in_s3_export_ut.cpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |76.7%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/494cbbb0dc8b7c9860714b57e7_raw.auxcpp |76.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/dynamic_config/dynamic_config_ut.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/dynamic_config/dynamic_config_ut.cpp |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/normalizer/abstract/abstract.h_serialized.cpp |76.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/494cbbb0dc8b7c9860714b57e7_raw.auxcpp |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/metering.h_serialized.cpp |76.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |76.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/ycloud/impl/access_service_ut.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ycloud/impl/access_service_ut.cpp |76.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_locks.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_locks.cpp |76.8%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/console_config__intpy3___pb2.py.p5ju.yapyc3 |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/kqprun/src/common.h_serialized.cpp |76.8%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/msgbus__intpy3___pb2_grpc.py.p5ju.yapyc3 |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |76.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_pg_types.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_pg_types.cpp |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |76.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |76.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/cms/console/feature_flags_configurator_ut.cpp |76.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/compute_actor/kqp_compute_state.h_serialized.cpp |76.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/security/ticket_parser_ut.cpp |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/config/init/init.h_serialized.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/ticket_parser_ut.cpp |76.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/bscontroller/grouper_ut.cpp |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/upload_rows_counters.h_serialized.cpp |76.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/metering.h_serialized.cpp |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |76.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tiering/ut/ut_tiers.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tiering/ut/ut_tiers.cpp |76.8%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/node_checkers.h_serialized.cpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/tiering_ut.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/tiering_ut.cpp |76.9%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/indexes_ut.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/indexes_ut.cpp |76.9%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/library/ycloud/impl/service_account_service_ut.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ycloud/impl/service_account_service_ut.cpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/opt/kqp_merge_ut.cpp |76.9%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_merge_ut.cpp |76.9%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |76.9%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |76.9%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/ydb_olapstore_ut.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_olapstore_ut.cpp |76.9%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/clickbench_ut.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/clickbench_ut.cpp |76.9%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/generated/dispatch_op.h |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp |76.9%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tools/query_replay_yt/query_replay.cpp |76.9%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/e6ce42a762195cf7e946ca411e_raw.auxcpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/service/ut/ut_column_statistics.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/ut/ut_column_statistics.cpp |76.9%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/5732afb9e28f3811e12c561eb5_raw.auxcpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/common/cache_ut.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/common/cache_ut.cpp |76.9%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/storage_tenant_ut.cpp |76.9%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/storage_tenant_ut.cpp |76.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain/ut_extsubdomain.cpp |77.0%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain/ut_extsubdomain.cpp |77.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |77.0%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |77.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/e6ce42a762195cf7e946ca411e_raw.auxcpp |77.0%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |77.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/5732afb9e28f3811e12c561eb5_raw.auxcpp |77.0%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/console_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |77.0%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp |77.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/config/init/init.h_serialized.cpp |77.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/node_checkers.h_serialized.cpp |77.0%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/afa8c81460609cfd6247d9dbf7_raw.auxcpp |77.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/config/init/libcore-config-init.a |77.0%| [AR] {RESULT} $(B)/ydb/core/config/init/libcore-config-init.a |77.0%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_reader/contexts.h_serialized.cpp |77.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/statistics/service/ut/ut_http_request.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/ut/ut_http_request.cpp |77.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/kqprun/src/common.h_serialized.cpp |77.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ut_subdomain_reboots.cpp |77.0%| [EN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ut_subdomain_reboots.cpp |77.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |77.0%| [AR] {RESULT} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |77.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |77.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/libydb-core-cms.a |77.0%| [AR] {RESULT} $(B)/ydb/core/cms/libydb-core-cms.a |77.0%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/msgbus__intpy3___pb2.py.p5ju.yapyc3 |77.0%| [PR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/8760973cadf4aa816a9d37589f_raw.auxcpp |77.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/afa8c81460609cfd6247d9dbf7_raw.auxcpp |77.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_transfer/ut_transfer.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_transfer/ut_transfer.cpp >> TActorTest::TestDie [GOOD] >> TActorTest::TestFilteredGrab >> TActorTest::TestFilteredGrab [GOOD] |77.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestFilteredGrab [GOOD] >> TActorTest::TestCreateChildActor [GOOD] >> TActorTest::TestBlockEvents >> TActorTest::TestBlockEvents [GOOD] |77.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/8760973cadf4aa816a9d37589f_raw.auxcpp >> TActorTest::TestWaitForFirstEvent |77.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a >> TActorTest::TestWaitForFirstEvent [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestBlockEvents [GOOD] Test command err: ... waiting for blocked 3 events ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... waiting for blocked 3 events (done) ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... waiting for blocked 1 more event ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... waiting for blocked 1 more event (done) ... waiting for processed 2 more events ... waiting for processed 2 more events (done) ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... waiting for processed 3 more events ... waiting for processed 3 more events (done) |77.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/common/iceberg_processor_ut.cpp |77.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/normalizer/abstract/abstract.h_serialized.cpp |77.1%| [AR] {RESULT} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/common/iceberg_processor_ut.cpp ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitForFirstEvent [GOOD] Test command err: ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger (done) ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger (done) >> TActorTest::TestWaitFuture [GOOD] |77.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp |77.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitFuture [GOOD] >> TActorTest::TestWaitFor [GOOD] >> TActorTest::TestSendFromAnotherThread >> TActorTest::TestSendEvent [GOOD] >> TActorTest::TestSendAfterDelay >> TActorTest::TestScheduleEvent [GOOD] >> TActorTest::TestScheduleReaction [GOOD] >> TActorTest::TestSendAfterDelay [GOOD] >> TActorTest::TestStateSwitch ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitFor [GOOD] Test command err: ... waiting for value = 42 ... waiting for value = 42 (done) >> TActorTest::TestStateSwitch [GOOD] |77.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp |77.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestScheduleReaction [GOOD] |77.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestSendAfterDelay [GOOD] |77.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestStateSwitch [GOOD] >> TActorTest::TestHandleEvent [GOOD] >> TActorTest::TestGetCtxTime [GOOD] |77.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/upload_rows_counters.h_serialized.cpp >> TActorTest::TestSendFromAnotherThread [GOOD] |77.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestGetCtxTime [GOOD] |77.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestSendFromAnotherThread [GOOD] |77.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |77.1%| [LD] {RESULT} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |77.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |77.1%| [LD] {RESULT} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |77.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |77.1%| [LD] {RESULT} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |77.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |77.1%| [LD] {RESULT} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts >> conftest.py::black [GOOD] >> test_join.py::black [GOOD] |77.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/statestorage.cpp |77.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |77.1%| [LD] {RESULT} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |77.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |77.1%| [LD] {RESULT} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |77.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |77.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |77.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |77.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |77.1%| [LD] {RESULT} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |77.1%| [LD] {RESULT} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |77.1%| [TA] $(B)/ydb/core/testlib/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} |77.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/bridge/ydb-tests-functional-bridge |77.1%| [TA] {RESULT} $(B)/ydb/core/testlib/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} |77.1%| [LD] {RESULT} $(B)/ydb/tests/functional/bridge/ydb-tests-functional-bridge |77.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/generic/streaming/black >> test_join.py::black [GOOD] >> common.cpp::clang_format [GOOD] |77.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/secondary_index/ydb-tests-datashard-secondary_index |77.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |77.1%| [LD] {RESULT} $(B)/ydb/tests/datashard/secondary_index/ydb-tests-datashard-secondary_index |77.1%| [LD] {RESULT} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |77.2%| [TS] {RESULT} ydb/tests/fq/generic/streaming/black >> common.h::clang_format [GOOD] >> conftest.py::black [GOOD] >> test_clickhouse.py::black [GOOD] >> test_greenplum.py::black [GOOD] >> test_join.py::black [GOOD] >> test_mysql.py::black [GOOD] >> test_postgresql.py::black [GOOD] >> test_ydb.py::black [GOOD] |77.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/persqueue/ut/ut_with_sdk/balancing_ut.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/balancing_ut.cpp |77.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/opt/kqp_agg_ut.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_agg_ut.cpp |77.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/generic/analytics/black >> test_ydb.py::black [GOOD] |77.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |77.2%| [TS] {RESULT} ydb/tests/fq/generic/analytics/black |77.2%| [LD] {RESULT} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |77.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/common/clang_format >> common.h::clang_format [GOOD] |77.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |77.2%| [TS] {RESULT} ydb/core/kqp/ut/federated_query/common/clang_format |77.2%| [AR] {RESULT} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |77.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp >> ydb-tests-fq-streaming_optimize::import_test [GOOD] |77.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/streaming_optimize/import_test >> ydb-tests-fq-streaming_optimize::import_test [GOOD] |77.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |77.2%| [TS] {RESULT} ydb/tests/fq/streaming_optimize/import_test |77.2%| [LD] {RESULT} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |77.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/vector_index/medium/ydb-tests-datashard-vector_index-medium |77.2%| [LD] {RESULT} $(B)/ydb/tests/datashard/vector_index/medium/ydb-tests-datashard-vector_index-medium |77.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |77.2%| [LD] {RESULT} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |77.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |77.2%| [LD] {RESULT} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |77.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |77.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |77.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |77.2%| [LD] {RESULT} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |77.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a |77.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a |77.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |77.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |77.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |77.2%| [LD] {RESULT} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |77.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |77.3%| [LD] {RESULT} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |77.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |77.3%| [LD] {RESULT} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |77.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |77.3%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common >> ydb-tests-functional-kqp-plan2svg::import_test [GOOD] >> ydb-tests-tools-kqprun-tests::import_test [GOOD] |77.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |77.3%| [LD] {RESULT} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |77.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a |77.3%| [AR] {RESULT} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a |77.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |77.3%| [LD] {RESULT} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |77.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |77.3%| [LD] {RESULT} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |77.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/kqp/plan2svg/import_test >> ydb-tests-functional-kqp-plan2svg::import_test [GOOD] |77.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/kqprun/tests/import_test >> ydb-tests-tools-kqprun-tests::import_test [GOOD] >> ydb-tests-olap-s3_import-large::import_test [GOOD] |77.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |77.3%| [TS] {RESULT} ydb/tests/functional/kqp/plan2svg/import_test |77.3%| [TS] {RESULT} ydb/tests/tools/kqprun/tests/import_test |77.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |77.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/s3_import/large/import_test >> ydb-tests-olap-s3_import-large::import_test [GOOD] >> ydb-tests-olap-load::import_test [GOOD] |77.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |77.3%| [TS] {RESULT} ydb/tests/olap/s3_import/large/import_test |77.3%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |77.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/load/import_test >> ydb-tests-olap-load::import_test [GOOD] |77.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_pq_reboots/ut_pq_reboots.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_pq_reboots/ut_pq_reboots.cpp |77.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |77.3%| [TS] {RESULT} ydb/tests/olap/load/import_test |77.3%| [LD] {RESULT} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |77.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |77.3%| [LD] {RESULT} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |77.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |77.3%| [LD] {RESULT} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |77.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |77.3%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |77.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |77.3%| [LD] {RESULT} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |77.4%| [LD] {RESULT} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |77.4%| [LD] {RESULT} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |77.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_reassign.cpp |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_reassign.cpp |77.4%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |77.4%| [LD] {RESULT} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |77.4%| [LD] {RESULT} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |77.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/query/kqp_limits_ut.cpp |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/olap/delete/ydb-tests-olap-delete |77.4%| [LD] {RESULT} $(B)/ydb/tests/olap/delete/ydb-tests-olap-delete |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_limits_ut.cpp |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |77.4%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |77.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/olap/ydb-tests-olap |77.4%| [LD] {RESULT} $(B)/ydb/tests/olap/ydb-tests-olap |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |77.4%| [LD] {RESULT} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/suite_tests/ydb-tests-functional-suite_tests |77.4%| [LD] {RESULT} $(B)/ydb/tests/functional/suite_tests/ydb-tests-functional-suite_tests |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |77.4%| [LD] {RESULT} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |77.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/proxy_ut.cpp |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |77.4%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut.cpp |77.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |77.4%| [LD] {RESULT} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |77.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |77.5%| [LD] {RESULT} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |77.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |77.5%| [LD] {RESULT} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |77.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |77.5%| [LD] {RESULT} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |77.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |77.5%| [LD] {RESULT} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |77.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |77.5%| [LD] {RESULT} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |77.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/cost/kqp_cost_ut.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/cost/kqp_cost_ut.cpp |77.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp >> ydb-tests-functional-script_execution::import_test [GOOD] |77.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |77.5%| [LD] {RESULT} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |77.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/compute_actor/kqp_compute_state.h_serialized.cpp |77.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_move_reboots/ut_move_reboots.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_move_reboots/ut_move_reboots.cpp |77.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |77.5%| [LD] {RESULT} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |77.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |77.5%| [LD] {RESULT} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |77.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/import_test >> ydb-tests-functional-script_execution::import_test [GOOD] |77.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |77.5%| [TS] {RESULT} ydb/tests/functional/script_execution/import_test |77.5%| [LD] {RESULT} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication >> ydb-tests-functional-ydb_cli::import_test [GOOD] |77.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |77.5%| [AR] {RESULT} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |77.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |77.5%| [LD] {RESULT} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |77.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ydb_cli/import_test >> ydb-tests-functional-ydb_cli::import_test [GOOD] |77.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/library/ut/ydb-tests-library-ut >> ydb-tests-olap-ttl_tiering::import_test [GOOD] |77.5%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |77.5%| [TS] {RESULT} ydb/tests/functional/ydb_cli/import_test |77.5%| [LD] {RESULT} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |77.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |77.6%| [LD] {RESULT} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |77.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |77.6%| [LD] {RESULT} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |77.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/import_test >> ydb-tests-olap-ttl_tiering::import_test [GOOD] |77.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |77.6%| [TS] {RESULT} ydb/tests/olap/ttl_tiering/import_test |77.6%| [LD] {RESULT} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |77.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/pg/pg_catalog_ut.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/pg/pg_catalog_ut.cpp |77.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |77.6%| [LD] {RESULT} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |77.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |77.6%| [LD] {RESULT} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |77.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |77.6%| [LD] {RESULT} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests >> ydb-tests-functional-hive::import_test [GOOD] >> ydb-tests-functional-bridge::import_test [GOOD] |77.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp >> ydb-tests-functional-serverless::import_test [GOOD] |77.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/import_test >> ydb-tests-functional-hive::import_test [GOOD] >> ydb-tests-functional-restarts::import_test [GOOD] |77.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/stress-reconfig_state_storage_workload-tests |77.6%| [TS] {RESULT} ydb/tests/functional/hive/import_test |77.6%| [LD] {RESULT} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/stress-reconfig_state_storage_workload-tests |77.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/bridge/import_test >> ydb-tests-functional-bridge::import_test [GOOD] |77.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/import_test >> ydb-tests-functional-serverless::import_test [GOOD] |77.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/import_test >> ydb-tests-functional-restarts::import_test [GOOD] |77.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |77.6%| [TS] {RESULT} ydb/tests/functional/bridge/import_test |77.6%| [TS] {RESULT} ydb/tests/functional/serverless/import_test |77.6%| [TS] {RESULT} ydb/tests/functional/restarts/import_test |77.6%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |77.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/s3_backups/s3_backups |77.6%| [LD] {RESULT} $(B)/ydb/tests/stress/s3_backups/s3_backups >> ydb-tests-functional-audit::import_test [GOOD] |77.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/fq/libs/common/rows_proto_splitter_ut.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/common/rows_proto_splitter_ut.cpp |77.6%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |77.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/import_test >> ydb-tests-functional-audit::import_test [GOOD] |77.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |77.6%| [TS] {RESULT} ydb/tests/functional/audit/import_test |77.6%| [LD] {RESULT} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |77.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |77.6%| [LD] {RESULT} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |77.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/sql/ydb-tests-sql |77.6%| [LD] {RESULT} $(B)/ydb/tests/sql/ydb-tests-sql |77.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |77.7%| [LD] {RESULT} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |77.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |77.7%| [LD] {RESULT} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |77.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |77.7%| [LD] {RESULT} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests >> ydb-tests-functional-canonical::import_test [GOOD] |77.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a |77.7%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a |77.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |77.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ut_data_erasure_reboots.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ut_data_erasure_reboots.cpp |77.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/import_test >> ydb-tests-functional-canonical::import_test [GOOD] |77.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |77.7%| [TS] {RESULT} ydb/tests/functional/canonical/import_test |77.7%| [LD] {RESULT} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |77.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |77.7%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |77.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/join/kqp_join_ut.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_ut.cpp |77.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/kqp_generic_provider_ut.cpp >> ydb-tests-datashard-secondary_index::import_test [GOOD] |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/kqp_generic_provider_ut.cpp |77.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |77.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |77.7%| [LD] {RESULT} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |77.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/datashard/vector_index/large/ydb-tests-datashard-vector_index-large |77.7%| [LD] {RESULT} $(B)/ydb/tests/datashard/vector_index/large/ydb-tests-datashard-vector_index-large |77.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/datastreams/datastreams_ut.cpp |77.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests >> ydb-tests-datashard-dump_restore::import_test [GOOD] |77.7%| [LD] {RESULT} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |77.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |77.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |77.7%| [LD] {RESULT} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |77.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/import_test >> ydb-tests-datashard-secondary_index::import_test [GOOD] |77.7%| [TS] {RESULT} ydb/tests/datashard/secondary_index/import_test |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/datastreams/datastreams_ut.cpp |77.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/import_test >> ydb-tests-datashard-dump_restore::import_test [GOOD] |77.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a >> ydb-tests-functional-rename::import_test [GOOD] |77.8%| [TS] {RESULT} ydb/tests/datashard/dump_restore/import_test |77.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a |77.8%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/tests/tools/fqrun/fqrun.cpp |77.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |77.8%| [LD] {RESULT} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |77.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a |77.8%| [AR] {RESULT} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a |77.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |77.8%| [LD] {RESULT} $(B)/ydb/tests/compatibility/ydb-tests-compatibility >> ydb-tests-functional-autoconfig::import_test [GOOD] |77.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/import_test >> ydb-tests-functional-rename::import_test [GOOD] |77.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |77.8%| [TS] {RESULT} ydb/tests/functional/rename/import_test |77.8%| [LD] {RESULT} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |77.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/import_test >> ydb-tests-functional-autoconfig::import_test [GOOD] >> ydb-tests-olap-column_family-compression::import_test [GOOD] |77.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |77.8%| [TS] {RESULT} ydb/tests/functional/autoconfig/import_test |77.8%| [LD] {RESULT} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |77.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |77.8%| [LD] {RESULT} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |77.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/large_serializable/ydb-tests-functional-large_serializable |77.8%| [LD] {RESULT} $(B)/ydb/tests/functional/large_serializable/ydb-tests-functional-large_serializable |77.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/import_test >> ydb-tests-olap-column_family-compression::import_test [GOOD] |77.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |77.9%| [TS] {RESULT} ydb/tests/olap/column_family/compression/import_test |77.9%| [LD] {RESULT} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |77.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests >> ydb-tests-functional-sqs-common::import_test [GOOD] |77.9%| [LD] {RESULT} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |77.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |77.9%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |77.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |77.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/import_test >> ydb-tests-functional-sqs-common::import_test [GOOD] |77.9%| [TS] {RESULT} ydb/tests/functional/sqs/common/import_test |77.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp |77.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |77.9%| [LD] {RESULT} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |77.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |77.9%| [LD] {RESULT} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import >> ydb-tests-functional-sqs-cloud::import_test [GOOD] |77.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |77.9%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |77.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/import_test >> ydb-tests-functional-sqs-cloud::import_test [GOOD] |77.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |77.9%| [LD] {RESULT} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |77.9%| [TS] {RESULT} ydb/tests/functional/sqs/cloud/import_test |77.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/ut_background_cleaning/ut_background_cleaning.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_background_cleaning/ut_background_cleaning.cpp |77.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |77.9%| [LD] {RESULT} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |77.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/minidumps/ydb-tests-functional-minidumps |77.9%| [LD] {RESULT} $(B)/ydb/tests/functional/minidumps/ydb-tests-functional-minidumps >> ydb-tests-functional-sqs-with_quotas::import_test [GOOD] |77.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |77.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |77.9%| [LD] {RESULT} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp >> ydb-tests-functional-tpc-medium::import_test [GOOD] >> ydb-tests-functional-ttl::import_test [GOOD] >> ydb-tests-functional-scheme_shard::import_test [GOOD] |77.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/import_test >> ydb-tests-functional-sqs-with_quotas::import_test [GOOD] |77.9%| [TS] {RESULT} ydb/tests/functional/sqs/with_quotas/import_test |77.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/example/ydb-tests-example |77.9%| [LD] {RESULT} $(B)/ydb/tests/example/ydb-tests-example |77.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/ydb_serializable/ydb_serializable |77.9%| [LD] {RESULT} $(B)/ydb/tests/tools/ydb_serializable/ydb_serializable |78.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tpc/medium/import_test >> ydb-tests-functional-tpc-medium::import_test [GOOD] |78.0%| [TS] {RESULT} ydb/tests/functional/tpc/medium/import_test |78.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |78.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming |78.0%| [LD] {RESULT} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |78.0%| [LD] {RESULT} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming >> ydb-tests-olap-scenario::import_test [GOOD] |78.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/import_test >> ydb-tests-functional-ttl::import_test [GOOD] |78.0%| [TS] {RESULT} ydb/tests/functional/ttl/import_test |78.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/import_test >> ydb-tests-functional-scheme_shard::import_test [GOOD] |78.0%| [TS] {RESULT} ydb/tests/functional/scheme_shard/import_test |78.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/import_test >> ydb-tests-olap-scenario::import_test [GOOD] |78.0%| [TS] {RESULT} ydb/tests/olap/scenario/import_test >> ydb-tests-functional-sqs-multinode::import_test [GOOD] >> ydb-tests-datashard-s3::import_test [GOOD] |78.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/import_test >> ydb-tests-functional-sqs-multinode::import_test [GOOD] |78.0%| [TS] {RESULT} ydb/tests/functional/sqs/multinode/import_test |78.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |78.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/import_test >> ydb-tests-datashard-s3::import_test [GOOD] |78.0%| [TS] {RESULT} ydb/tests/datashard/s3/import_test >> ydb-tests-library-ut::import_test [GOOD] >> ydb-tests-tools-nemesis-ut::import_test [GOOD] |78.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/library/ut/import_test >> ydb-tests-library-ut::import_test [GOOD] |78.0%| [TS] {RESULT} ydb/tests/library/ut/import_test |78.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/import_test >> ydb-tests-tools-nemesis-ut::import_test [GOOD] |78.0%| [TS] {RESULT} ydb/tests/tools/nemesis/ut/import_test |78.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_reader/contexts.h_serialized.cpp |78.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |78.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a |78.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a |78.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp >> stress-reconfig_state_storage_workload-tests::import_test [GOOD] |78.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/libydb-core-persqueue.a |78.0%| [AR] {RESULT} $(B)/ydb/core/persqueue/libydb-core-persqueue.a |78.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp |78.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/reconfig_state_storage_workload/tests/import_test >> stress-reconfig_state_storage_workload-tests::import_test [GOOD] |78.0%| [TS] {RESULT} ydb/tests/stress/reconfig_state_storage_workload/tests/import_test >> ydb-tests-functional-api::import_test [GOOD] |78.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp >> ydb-tests-stress-s3_backups-tests::import_test [GOOD] |78.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/import_test >> ydb-tests-functional-api::import_test [GOOD] |78.0%| [TS] {RESULT} ydb/tests/functional/api/import_test >> ydb-tests-olap-oom::import_test [GOOD] >> ydb-tests-datashard-partitioning::import_test [GOOD] >> ydb-tests-stress-node_broker-tests::import_test [GOOD] |78.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a |78.1%| [AR] {RESULT} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/oom/import_test >> ydb-tests-olap-oom::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/olap/oom/import_test |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/s3_backups/tests/import_test >> ydb-tests-stress-s3_backups-tests::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/stress/s3_backups/tests/import_test |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/import_test >> ydb-tests-datashard-partitioning::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/datashard/partitioning/import_test |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/node_broker/tests/import_test >> ydb-tests-stress-node_broker-tests::import_test [GOOD] >> ydb-tests-fq-plans::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/stress/node_broker/tests/import_test >> ydb-core-viewer-tests::import_test [GOOD] >> ydb-tests-stress-transfer-tests::import_test [GOOD] |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/plans/import_test >> ydb-tests-fq-plans::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/fq/plans/import_test >> ydb-tests-functional-query_cache::import_test [GOOD] |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/transfer/tests/import_test >> ydb-tests-stress-transfer-tests::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/stress/transfer/tests/import_test |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/tests/import_test >> ydb-core-viewer-tests::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/core/viewer/tests/import_test |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/query_cache/import_test >> ydb-tests-functional-query_cache::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/functional/query_cache/import_test >> ydb-tests-postgres_integrations-go-libpq::import_test [GOOD] >> kqprun_recipe::import_test [GOOD] |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/postgres_integrations/go-libpq/import_test >> ydb-tests-postgres_integrations-go-libpq::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/postgres_integrations/go-libpq/import_test |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/kqprun/recipe/import_test >> kqprun_recipe::import_test [GOOD] >> ydb-tests-functional-postgresql::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/tools/kqprun/recipe/import_test |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/postgresql/import_test >> ydb-tests-functional-postgresql::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/functional/postgresql/import_test >> ydb-tests-stress-cdc-tests::import_test [GOOD] |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/cdc/tests/import_test >> ydb-tests-stress-cdc-tests::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/stress/cdc/tests/import_test |78.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp >> functional-sqs-merge_split_common_table-fifo::import_test [GOOD] >> ydb-tests-fq-http_api::import_test [GOOD] >> ydb-tests-stress-show_create-view-tests::import_test [GOOD] |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/merge_split_common_table/fifo/import_test >> functional-sqs-merge_split_common_table-fifo::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/fifo/import_test |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/http_api/import_test >> ydb-tests-fq-http_api::import_test [GOOD] >> ydb-tests-fq-common::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/fq/http_api/import_test |78.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp >> ydb-tests-stress-log-tests::import_test [GOOD] |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/show_create/view/tests/import_test >> ydb-tests-stress-show_create-view-tests::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/stress/show_create/view/tests/import_test |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/common/import_test >> ydb-tests-fq-common::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/fq/common/import_test |78.1%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a |78.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/log/tests/import_test >> ydb-tests-stress-log-tests::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/stress/log/tests/import_test >> ydb-tests-fq-mem_alloc::import_test [GOOD] >> ydb-tests-datashard-split_merge::import_test [GOOD] >> ydb-tests-stress-kv-tests::import_test [GOOD] |78.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/mem_alloc/import_test >> ydb-tests-fq-mem_alloc::import_test [GOOD] |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/kv/tests/import_test >> ydb-tests-stress-kv-tests::import_test [GOOD] |78.1%| [TS] {RESULT} ydb/tests/fq/mem_alloc/import_test |78.2%| [TS] {RESULT} ydb/tests/stress/kv/tests/import_test |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/import_test >> ydb-tests-datashard-split_merge::import_test [GOOD] |78.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator.cpp |78.2%| [TS] {RESULT} ydb/tests/datashard/split_merge/import_test |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator.cpp >> ydb-tests-stress-oltp_workload-tests::import_test [GOOD] >> ydb-tests-stress-simple_queue-tests::import_test [GOOD] >> ydb-tests-functional-wardens::import_test [GOOD] |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/oltp_workload/tests/import_test >> ydb-tests-stress-oltp_workload-tests::import_test [GOOD] |78.2%| [TS] {RESULT} ydb/tests/stress/oltp_workload/tests/import_test |78.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/compaction_ut.cpp |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/simple_queue/tests/import_test >> ydb-tests-stress-simple_queue-tests::import_test [GOOD] |78.2%| [TS] {RESULT} ydb/tests/stress/simple_queue/tests/import_test |78.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a >> ydb-tests-functional-large_serializable::import_test [GOOD] |78.2%| [AR] {RESULT} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a |78.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/compaction_ut.cpp |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/wardens/import_test >> ydb-tests-functional-wardens::import_test [GOOD] |78.2%| [TS] {RESULT} ydb/tests/functional/wardens/import_test >> ydb-tests-stress-olap_workload-tests::import_test [GOOD] |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/large_serializable/import_test >> ydb-tests-functional-large_serializable::import_test [GOOD] |78.2%| [TS] {RESULT} ydb/tests/functional/large_serializable/import_test >> functional-sqs-merge_split_common_table-std::import_test [GOOD] >> ydb-tests-functional-serializable::import_test [GOOD] |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/olap_workload/tests/import_test >> ydb-tests-stress-olap_workload-tests::import_test [GOOD] |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/merge_split_common_table/std/import_test >> functional-sqs-merge_split_common_table-std::import_test [GOOD] |78.2%| [TS] {RESULT} ydb/tests/stress/olap_workload/tests/import_test |78.2%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |78.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |78.2%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/std/import_test >> ydb-tests-fq-s3::import_test [GOOD] >> ydb-tests-fq-restarts::import_test [GOOD] |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serializable/import_test >> ydb-tests-functional-serializable::import_test [GOOD] |78.2%| [TS] {RESULT} ydb/tests/functional/serializable/import_test >> ydb-tests-functional-minidumps::import_test [GOOD] |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/import_test >> ydb-tests-fq-s3::import_test [GOOD] |78.2%| [TS] {RESULT} ydb/tests/fq/s3/import_test |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/restarts/import_test >> ydb-tests-fq-restarts::import_test [GOOD] |78.2%| [TS] {RESULT} ydb/tests/fq/restarts/import_test |78.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/minidumps/import_test >> ydb-tests-functional-minidumps::import_test [GOOD] |78.2%| [TS] {RESULT} ydb/tests/functional/minidumps/import_test >> ydb-tests-stress-mixedpy::import_test [GOOD] >> ydb-tests-example::import_test [GOOD] >> ydb-tests-tools-pq_read-test::import_test [GOOD] |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/mixedpy/import_test >> ydb-tests-stress-mixedpy::import_test [GOOD] |78.2%| [TS] {RESULT} ydb/tests/stress/mixedpy/import_test |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/example/import_test >> ydb-tests-example::import_test [GOOD] |78.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/pq_read/test/import_test >> ydb-tests-tools-pq_read-test::import_test [GOOD] |78.2%| [TS] {RESULT} ydb/tests/example/import_test |78.3%| [TS] {RESULT} ydb/tests/tools/pq_read/test/import_test |78.2%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp >> ydb-tests-functional-encryption::import_test [GOOD] |78.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/kqp/ut/olap/locks_ut.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/locks_ut.cpp >> ydb-tests-functional-scheme_tests::import_test [GOOD] |78.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a >> ydb-tests-functional-tpc-large::import_test [GOOD] |78.3%| [AR] {RESULT} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/encryption/import_test >> ydb-tests-functional-encryption::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/functional/encryption/import_test >> ydb-tests-olap-s3_import::import_test [GOOD] |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_tests/import_test >> ydb-tests-functional-scheme_tests::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/functional/scheme_tests/import_test |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tpc/large/import_test >> ydb-tests-functional-tpc-large::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/functional/tpc/large/import_test |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/s3_import/import_test >> ydb-tests-olap-s3_import::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/olap/s3_import/import_test >> ydb-tests-functional-limits::import_test [GOOD] >> ydb-tests-datashard-vector_index-large::import_test [GOOD] >> ydb-tests-functional-cms::import_test [GOOD] |78.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/import_test >> ydb-tests-functional-limits::import_test [GOOD] |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/large/import_test >> ydb-tests-datashard-vector_index-large::import_test [GOOD] >> ydb-tests-datashard-async_replication::import_test [GOOD] >> ydb-tests-functional-sqs-messaging::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/functional/limits/import_test |78.3%| [TS] {RESULT} ydb/tests/datashard/vector_index/large/import_test |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/import_test >> ydb-tests-functional-cms::import_test [GOOD] >> ydb-tests-sql::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/functional/cms/import_test |78.3%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a |78.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/import_test >> ydb-tests-datashard-async_replication::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/datashard/async_replication/import_test |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/import_test >> ydb-tests-functional-sqs-messaging::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/functional/sqs/messaging/import_test >> ydb-tests-functional-blobstorage::import_test [GOOD] |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/sql/import_test >> ydb-tests-sql::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/sql/import_test >> ydb-tests-functional-sqs-large::import_test [GOOD] |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/import_test >> ydb-tests-functional-blobstorage::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/functional/blobstorage/import_test >> ydb-tests-functional-suite_tests::import_test [GOOD] >> ydb-tests-fq-yds::import_test [GOOD] |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/import_test >> ydb-tests-functional-sqs-large::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/functional/sqs/large/import_test |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yds/import_test >> ydb-tests-fq-yds::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/fq/yds/import_test |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/import_test >> ydb-tests-functional-suite_tests::import_test [GOOD] >> ydb-tests-olap::import_test [GOOD] >> ydb-tests-olap-delete::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/functional/suite_tests/import_test |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/delete/import_test >> ydb-tests-olap-delete::import_test [GOOD] |78.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/import_test >> ydb-tests-olap::import_test [GOOD] |78.3%| [TS] {RESULT} ydb/tests/olap/delete/import_test |78.4%| [TS] {RESULT} ydb/tests/olap/import_test |78.3%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp >> ydb-tests-fq-multi_plane::import_test [GOOD] >> ydb-tests-olap-data_quotas::import_test [GOOD] |78.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/multi_plane/import_test >> ydb-tests-fq-multi_plane::import_test [GOOD] |78.4%| [TS] {RESULT} ydb/tests/fq/multi_plane/import_test >> ydb-tests-functional-tenants::import_test [GOOD] >> ydb-tests-compatibility::import_test [GOOD] |78.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a |78.4%| [AR] {RESULT} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a |78.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/import_test >> ydb-tests-olap-data_quotas::import_test [GOOD] |78.4%| [TS] {RESULT} ydb/tests/olap/data_quotas/import_test |78.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/import_test >> ydb-tests-functional-tenants::import_test [GOOD] |78.4%| [TS] {RESULT} ydb/tests/functional/tenants/import_test |78.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/compatibility/import_test >> ydb-tests-compatibility::import_test [GOOD] |78.4%| [TS] {RESULT} ydb/tests/compatibility/import_test >> ydb-tests-functional-config::import_test [GOOD] |78.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/import_test >> ydb-tests-functional-config::import_test [GOOD] |78.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |78.4%| [TS] {RESULT} ydb/tests/functional/config/import_test |78.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |78.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a |78.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a >> ydb-tests-datashard-select::import_test [GOOD] |78.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/import_test >> ydb-tests-datashard-select::import_test [GOOD] |78.4%| [TS] {RESULT} ydb/tests/datashard/select/import_test >> ydb-tests-sql-large::import_test [GOOD] |78.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/sql/large/import_test >> ydb-tests-sql-large::import_test [GOOD] |78.4%| [TS] {RESULT} ydb/tests/sql/large/import_test >> ydb-tests-datashard-copy_table::import_test [GOOD] >> ydb-tests-datashard-dml::import_test [GOOD] |78.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_ext_blobs_multiple_channels.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_ext_blobs_multiple_channels.cpp |78.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/import_test >> ydb-tests-datashard-dml::import_test [GOOD] |78.4%| [TS] {RESULT} ydb/tests/datashard/dml/import_test |78.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/import_test >> ydb-tests-datashard-copy_table::import_test [GOOD] |78.4%| [TS] {RESULT} ydb/tests/datashard/copy_table/import_test |78.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator_ext_blobs.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator_ext_blobs.cpp >> ydb-tests-datashard-parametrized_queries::import_test [GOOD] |78.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/import_test >> ydb-tests-datashard-parametrized_queries::import_test [GOOD] |78.4%| [TS] {RESULT} ydb/tests/datashard/parametrized_queries/import_test >> ydb-tests-datashard-ttl::import_test [GOOD] >> ydb-tests-datashard-vector_index-medium::import_test [GOOD] |78.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/import_test >> ydb-tests-datashard-ttl::import_test [GOOD] |78.4%| [TS] {RESULT} ydb/tests/datashard/ttl/import_test |78.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/import_test >> ydb-tests-datashard-vector_index-medium::import_test [GOOD] |78.4%| [TS] {RESULT} ydb/tests/datashard/vector_index/medium/import_test |78.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/services/persqueue_v1/ut/demo_tx.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/demo_tx.cpp |78.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |78.4%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a |78.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a |78.4%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/tx/schemeshard/schemeshard__operation.cpp |78.5%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |78.5%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |78.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |78.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |78.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |78.5%| [LD] {RESULT} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |78.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |78.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |78.5%| [LD] {RESULT} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut >> TComputeScheduler::ResourceWeight [GOOD] >> TKqpScanData::ArrowToUnboxedValueConverter [GOOD] >> TComputeScheduler::QueryLimits [GOOD] >> TKqpScanData::FailOnUnsupportedPgType [GOOD] >> TKqpScanData::EmptyColumns [GOOD] >> TComputeScheduler::TTotalLimits [GOOD] >> TKqpScanData::EmptyColumnsAndNonEmptyArrowBatch [GOOD] >> TKqpScanData::DifferentNumberOfInputAndResultColumns |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::UnboxedValueSize [GOOD] >> TBalanceCoverageBuilderTest::TestComplexSplitWithDuplicates [GOOD] >> TBalanceCoverageBuilderTest::TestSplitWithMergeBack [GOOD] >> TBalanceCoverageBuilderTest::TestSplitWithPartialMergeOne [GOOD] >> TKqpScanData::DifferentNumberOfInputAndResultColumns [GOOD] >> TBalanceCoverageBuilderTest::TestZeroTracks [GOOD] >> TBalanceCoverageBuilderTest::TestSimpleSplit [GOOD] >> TBalanceCoverageBuilderTest::TestOneSplit [GOOD] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::ArrowToUnboxedValueConverter [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::ResourceWeight [GOOD] Test command err: 510 500 1510 1500 990 1000 1000 1000 >> TBalanceCoverageBuilderTest::TestEmpty [GOOD] >> TBalanceCoverageBuilderTest::TestComplexSplit [GOOD] >> TBalanceCoverageBuilderTest::TestSplitWithPartialMergeAll [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::QueryLimits [GOOD] Test command err: 800 800 800 800 |78.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/balance_coverage/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::TTotalLimits [GOOD] Test command err: 1610 1600 1610 1600 |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::FailOnUnsupportedPgType [GOOD] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::EmptyColumns [GOOD] |78.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestComplexSplitWithDuplicates [GOOD] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::UnboxedValueSize [GOOD] |78.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestSplitWithMergeBack [GOOD] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::EmptyColumnsAndNonEmptyArrowBatch [GOOD] |78.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestSplitWithPartialMergeOne [GOOD] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::DifferentNumberOfInputAndResultColumns [GOOD] |78.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestSimpleSplit [GOOD] |78.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestEmpty [GOOD] |78.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestZeroTracks [GOOD] |78.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestOneSplit [GOOD] |78.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestSplitWithPartialMergeAll [GOOD] |78.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestComplexSplit [GOOD] |78.5%| [TA] $(B)/ydb/core/kqp/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |78.5%| [TA] $(B)/ydb/core/tx/balance_coverage/ut/test-results/unittest/{meta.json ... results_accumulator.log} |78.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |78.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |78.5%| [TA] {RESULT} $(B)/ydb/core/kqp/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |78.5%| [TA] {RESULT} $(B)/ydb/core/tx/balance_coverage/ut/test-results/unittest/{meta.json ... results_accumulator.log} |78.5%| [LD] {RESULT} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |78.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |78.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |78.5%| [LD] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut >> TTxLocatorTest::TestAllocateAllByPieces >> TTxLocatorTest::TestAllocateAll >> TTxLocatorTest::TestImposibleSize >> TTxLocatorTest::TestSignificantRequestWhenRunReserveTx >> TTxLocatorTest::TestZeroRange >> TTxLocatorTest::Boot >> TTxLocatorTest::TestAllocateAllByPieces [GOOD] >> TTxLocatorTest::TestAllocateAll [GOOD] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestImposibleSize [GOOD] >> TTxLocatorTest::TestSignificantRequestWhenRunReserveTx [GOOD] >> TTxLocatorTest::TestZeroRange [GOOD] >> TTxLocatorTest::Boot [GOOD] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_allocator/ut/unittest |78.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots >> TTxLocatorTest::TestWithReboot |78.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |78.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_allocator/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestAllocateAllByPieces [GOOD] Test command err: 2025-06-30T09:16:37.474823Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-30T09:16:37.474935Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-30T09:16:37.475045Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-30T09:16:37.475440Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.475523Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-30T09:16:37.477808Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.477832Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.477842Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.477856Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-30T09:16:37.477878Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.477894Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-30T09:16:37.477923Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-30T09:16:37.478052Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#8796093022207 2025-06-30T09:16:37.478137Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.478144Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.478157Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 8796093022207 2025-06-30T09:16:37.478163Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 0 to# 8796093022207 expected SUCCESS 2025-06-30T09:16:37.478842Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:78:2111] requested range size#8796093022207 2025-06-30T09:16:37.478917Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.478927Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.478938Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8796093022207 Reserved to# 17592186044414 2025-06-30T09:16:37.478944Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:78:2111] TEvAllocateResult from# 8796093022207 to# 17592186044414 expected SUCCESS 2025-06-30T09:16:37.478991Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:82:2115] requested range size#8796093022207 2025-06-30T09:16:37.479026Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479034Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479045Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 17592186044414 Reserved to# 26388279066621 2025-06-30T09:16:37.479049Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:82:2115] TEvAllocateResult from# 17592186044414 to# 26388279066621 expected SUCCESS 2025-06-30T09:16:37.479089Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:86:2119] requested range size#8796093022207 2025-06-30T09:16:37.479119Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479128Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479138Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 26388279066621 Reserved to# 35184372088828 2025-06-30T09:16:37.479141Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:86:2119] TEvAllocateResult from# 26388279066621 to# 35184372088828 expected SUCCESS 2025-06-30T09:16:37.479180Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:90:2123] requested range size#8796093022207 2025-06-30T09:16:37.479208Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479217Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479228Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 35184372088828 Reserved to# 43980465111035 2025-06-30T09:16:37.479233Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:90:2123] TEvAllocateResult from# 35184372088828 to# 43980465111035 expected SUCCESS 2025-06-30T09:16:37.479275Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:94:2127] requested range size#8796093022207 2025-06-30T09:16:37.479308Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479317Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479326Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 43980465111035 Reserved to# 52776558133242 2025-06-30T09:16:37.479331Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:94:2127] TEvAllocateResult from# 43980465111035 to# 52776558133242 expected SUCCESS 2025-06-30T09:16:37.479377Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:98:2131] requested range size#8796093022207 2025-06-30T09:16:37.479418Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479426Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479435Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 52776558133242 Reserved to# 61572651155449 2025-06-30T09:16:37.479439Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:98:2131] TEvAllocateResult from# 52776558133242 to# 61572651155449 expected SUCCESS 2025-06-30T09:16:37.479479Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:102:2135] requested range size#8796093022207 2025-06-30T09:16:37.479509Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479516Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479524Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 61572651155449 Reserved to# 70368744177656 2025-06-30T09:16:37.479529Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:102:2135] TEvAllocateResult from# 61572651155449 to# 70368744177656 expected SUCCESS 2025-06-30T09:16:37.479569Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:106:2139] requested range size#8796093022207 2025-06-30T09:16:37.479595Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479604Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479612Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 70368744177656 Reserved to# 79164837199863 2025-06-30T09:16:37.479617Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:106:2139] TEvAllocateResult from# 70368744177656 to# 79164837199863 expected SUCCESS 2025-06-30T09:16:37.479667Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:110:2143] requested range size#8796093022207 2025-06-30T09:16:37.479702Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479711Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.479720Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Suc ... node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:158:2191] TEvAllocateResult from# 184717953466347 to# 193514046488554 expected SUCCESS 2025-06-30T09:16:37.481053Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:162:2195] requested range size#8796093022207 2025-06-30T09:16:37.481089Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:25:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481100Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:25:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481108Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 193514046488554 Reserved to# 202310139510761 2025-06-30T09:16:37.481113Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:162:2195] TEvAllocateResult from# 193514046488554 to# 202310139510761 expected SUCCESS 2025-06-30T09:16:37.481175Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:166:2199] requested range size#8796093022207 2025-06-30T09:16:37.481210Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:26:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481219Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:26:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481229Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 202310139510761 Reserved to# 211106232532968 2025-06-30T09:16:37.481233Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:166:2199] TEvAllocateResult from# 202310139510761 to# 211106232532968 expected SUCCESS 2025-06-30T09:16:37.481303Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:170:2203] requested range size#8796093022207 2025-06-30T09:16:37.481329Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:27:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481338Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:27:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481349Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 211106232532968 Reserved to# 219902325555175 2025-06-30T09:16:37.481353Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:170:2203] TEvAllocateResult from# 211106232532968 to# 219902325555175 expected SUCCESS 2025-06-30T09:16:37.481413Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:174:2207] requested range size#8796093022207 2025-06-30T09:16:37.481437Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:28:1:24576:75:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481448Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:28:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481457Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 219902325555175 Reserved to# 228698418577382 2025-06-30T09:16:37.481460Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:174:2207] TEvAllocateResult from# 219902325555175 to# 228698418577382 expected SUCCESS 2025-06-30T09:16:37.481525Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:178:2211] requested range size#8796093022207 2025-06-30T09:16:37.481549Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:29:1:24576:73:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481558Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:29:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481567Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 228698418577382 Reserved to# 237494511599589 2025-06-30T09:16:37.481570Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:178:2211] TEvAllocateResult from# 228698418577382 to# 237494511599589 expected SUCCESS 2025-06-30T09:16:37.481645Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:182:2215] requested range size#8796093022207 2025-06-30T09:16:37.481673Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:30:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481682Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:30:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481691Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 237494511599589 Reserved to# 246290604621796 2025-06-30T09:16:37.481695Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:182:2215] TEvAllocateResult from# 237494511599589 to# 246290604621796 expected SUCCESS 2025-06-30T09:16:37.481763Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:186:2219] requested range size#8796093022207 2025-06-30T09:16:37.481790Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:31:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481799Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:31:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481809Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 246290604621796 Reserved to# 255086697644003 2025-06-30T09:16:37.481814Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:186:2219] TEvAllocateResult from# 246290604621796 to# 255086697644003 expected SUCCESS 2025-06-30T09:16:37.481883Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:190:2223] requested range size#8796093022207 2025-06-30T09:16:37.481921Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:32:1:24576:75:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481931Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:32:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.481939Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 255086697644003 Reserved to# 263882790666210 2025-06-30T09:16:37.481943Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:190:2223] TEvAllocateResult from# 255086697644003 to# 263882790666210 expected SUCCESS 2025-06-30T09:16:37.482019Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:194:2227] requested range size#8796093022207 2025-06-30T09:16:37.482065Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:33:1:24576:77:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.482075Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:33:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.482084Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 263882790666210 Reserved to# 272678883688417 2025-06-30T09:16:37.482087Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:194:2227] TEvAllocateResult from# 263882790666210 to# 272678883688417 expected SUCCESS 2025-06-30T09:16:37.482162Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:198:2231] requested range size#8796093022207 2025-06-30T09:16:37.482192Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:34:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.482202Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:34:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.482211Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 272678883688417 Reserved to# 281474976710624 2025-06-30T09:16:37.482214Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:198:2231] TEvAllocateResult from# 272678883688417 to# 281474976710624 expected SUCCESS 2025-06-30T09:16:37.482285Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:202:2235] requested range size#31 2025-06-30T09:16:37.482314Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:35:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.482322Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:35:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.482331Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 281474976710624 Reserved to# 281474976710655 2025-06-30T09:16:37.482335Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:202:2235] TEvAllocateResult from# 281474976710624 to# 281474976710655 expected SUCCESS 2025-06-30T09:16:37.482407Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:206:2239] requested range size#1 2025-06-30T09:16:37.482421Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 281474976710655 Reserved to# 0 2025-06-30T09:16:37.482425Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:206:2239] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestZeroRange [GOOD] Test command err: 2025-06-30T09:16:37.560435Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-30T09:16:37.560510Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-30T09:16:37.560584Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-30T09:16:37.560910Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.560976Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-30T09:16:37.562547Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.562561Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.562568Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.562577Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-30T09:16:37.562591Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.562602Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-30T09:16:37.562618Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-30T09:16:37.562694Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#0 2025-06-30T09:16:37.562778Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.562788Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.562796Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 0 2025-06-30T09:16:37.562800Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 0 to# 0 expected SUCCESS ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestAllocateAll [GOOD] Test command err: 2025-06-30T09:16:37.511024Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-30T09:16:37.511100Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-30T09:16:37.511177Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-30T09:16:37.511470Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.511532Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-30T09:16:37.513097Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.513110Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.513116Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.513126Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-30T09:16:37.513140Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.513151Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-30T09:16:37.513168Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-30T09:16:37.513244Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#281474976710655 2025-06-30T09:16:37.513298Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.513303Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.513311Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 281474976710655 2025-06-30T09:16:37.513315Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 0 to# 281474976710655 expected SUCCESS 2025-06-30T09:16:37.513799Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:78:2111] requested range size#1 2025-06-30T09:16:37.513821Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 281474976710655 Reserved to# 0 2025-06-30T09:16:37.513825Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:78:2111] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::Boot [GOOD] Test command err: 2025-06-30T09:16:37.584600Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-30T09:16:37.584699Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-30T09:16:37.584792Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-30T09:16:37.585182Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585248Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-30T09:16:37.587409Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.587428Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.587437Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.587452Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-30T09:16:37.587471Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.587486Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-30T09:16:37.587508Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestImposibleSize [GOOD] Test command err: 2025-06-30T09:16:37.552165Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-30T09:16:37.552268Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-30T09:16:37.552383Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-30T09:16:37.552762Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.552840Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-30T09:16:37.555118Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.555142Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.555152Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.555167Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-30T09:16:37.555188Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.555203Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-30T09:16:37.555229Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-30T09:16:37.555341Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#281474976710656 2025-06-30T09:16:37.555377Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 0 Reserved to# 0 2025-06-30T09:16:37.555990Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE 2025-06-30T09:16:37.556054Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:77:2110] requested range size#123456 2025-06-30T09:16:37.556125Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.556134Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.556148Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 123456 2025-06-30T09:16:37.556153Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:77:2110] TEvAllocateResult from# 0 to# 123456 expected SUCCESS 2025-06-30T09:16:37.556192Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:81:2114] requested range size#281474976587200 2025-06-30T09:16:37.556217Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 123456 Reserved to# 0 2025-06-30T09:16:37.556222Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:81:2114] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE 2025-06-30T09:16:37.556261Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:84:2117] requested range size#246912 2025-06-30T09:16:37.556299Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.556308Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.556318Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 123456 Reserved to# 370368 2025-06-30T09:16:37.556323Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:84:2117] TEvAllocateResult from# 123456 to# 370368 expected SUCCESS 2025-06-30T09:16:37.556362Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:88:2121] requested range size#281474976340288 2025-06-30T09:16:37.556370Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 370368 Reserved to# 0 2025-06-30T09:16:37.556374Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:88:2121] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestSignificantRequestWhenRunReserveTx [GOOD] Test command err: 2025-06-30T09:16:37.583144Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-30T09:16:37.583223Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-30T09:16:37.583304Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-30T09:16:37.583590Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.583651Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-30T09:16:37.585212Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585225Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585232Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585241Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-30T09:16:37.585255Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585266Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-30T09:16:37.585282Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-30T09:16:37.585467Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:81:2115] requested range size#100000 2025-06-30T09:16:37.585536Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:83:2117] requested range size#100000 2025-06-30T09:16:37.585607Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:85:2119] requested range size#100000 2025-06-30T09:16:37.585638Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:87:2121] requested range size#100000 2025-06-30T09:16:37.585680Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:89:2123] requested range size#100000 2025-06-30T09:16:37.585717Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585740Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585754Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:91:2125] requested range size#100000 2025-06-30T09:16:37.585775Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585789Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:79:2113] requested range size#100000 2025-06-30T09:16:37.585808Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585835Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585847Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585862Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#100000 2025-06-30T09:16:37.585884Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585904Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585911Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585921Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:75:2109] requested range size#100000 2025-06-30T09:16:37.585945Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:77:2111] requested range size#100000 2025-06-30T09:16:37.585966Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.585981Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 100000 2025-06-30T09:16:37.585988Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:81:2115] TEvAllocateResult from# 0 to# 100000 2025-06-30T09:16:37.586004Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.586014Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.586022Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.586030Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 100000 Reserved to# 200000 2025-06-30T09:16:37.586050Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:83:2117] TEvAllocateResult from# 100000 to# 200000 2025-06-30T09:16:37.586069Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.586084Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 200000 Reserved to# 300000 2025-06-30T09:16:37.586088Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:85:2119] TEvAllocateResult from# 200000 to# 300000 2025-06-30T09:16:37.586098Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.586107Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 300000 Reserved to# 400000 2025-06-30T09:16:37.586112Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:87:2121] TEvAllocateResult from# 300000 to# 400000 2025-06-30T09:16:37.586126Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.586134Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 400000 Reserved to# 500000 2025-06-30T09:16:37.586138Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:89:2123] TEvAllocateResult from# 400000 to# 500000 2025-06-30T09:16:37.586149Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 500000 Reserved to# 600000 2025-06-30T09:16:37.586152Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:91:2125] TEvAllocateResult from# 500000 to# 600000 2025-06-30T09:16:37.586165Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.586172Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 600000 Reserved to# 700000 2025-06-30T09:16:37.586176Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:79:2113] TEvAllocateResult from# 600000 to# 700000 2025-06-30T09:16:37.586190Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.586200Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.586209Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 700000 Reserved to# 800000 2025-06-30T09:16:37.586213Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 700000 to# 800000 2025-06-30T09:16:37.586226Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.586233Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 800000 Reserved to# 900000 2025-06-30T09:16:37.586236Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:75:2109] TEvAllocateResult from# 800000 to# 900000 2025-06-30T09:16:37.586252Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 900000 Reserved to# 1000000 2025-06-30T09:16:37.586255Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:77:2111] TEvAllocateResult from# 900000 to# 1000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-06-30T09:16:37.587085Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 720575 ... Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595015Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8600000 Reserved to# 8700000 2025-06-30T09:16:37.595022Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:406:2439] TEvAllocateResult from# 8600000 to# 8700000 2025-06-30T09:16:37.595030Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:92:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595037Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8700000 Reserved to# 8800000 2025-06-30T09:16:37.595041Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:408:2441] TEvAllocateResult from# 8700000 to# 8800000 2025-06-30T09:16:37.595054Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:92:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595061Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8800000 Reserved to# 8900000 2025-06-30T09:16:37.595065Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:410:2443] TEvAllocateResult from# 8800000 to# 8900000 2025-06-30T09:16:37.595081Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8900000 Reserved to# 9000000 2025-06-30T09:16:37.595085Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:412:2445] TEvAllocateResult from# 8900000 to# 9000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-06-30T09:16:37.595532Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:450:2483] requested range size#100000 2025-06-30T09:16:37.595584Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:452:2485] requested range size#100000 2025-06-30T09:16:37.595623Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:434:2467] requested range size#100000 2025-06-30T09:16:37.595663Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:93:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595685Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:436:2469] requested range size#100000 2025-06-30T09:16:37.595702Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:93:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595735Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:438:2471] requested range size#100000 2025-06-30T09:16:37.595771Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:94:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595794Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:440:2473] requested range size#100000 2025-06-30T09:16:37.595810Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:94:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595833Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:95:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595848Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:442:2475] requested range size#100000 2025-06-30T09:16:37.595868Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:95:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595887Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:96:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595906Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:444:2477] requested range size#100000 2025-06-30T09:16:37.595923Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:96:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595939Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:97:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595955Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:446:2479] requested range size#100000 2025-06-30T09:16:37.595971Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:97:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.595989Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:98:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.596004Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:448:2481] requested range size#100000 2025-06-30T09:16:37.596046Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9000000 Reserved to# 9100000 2025-06-30T09:16:37.596050Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:450:2483] TEvAllocateResult from# 9000000 to# 9100000 2025-06-30T09:16:37.596059Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:98:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.596065Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:99:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.596085Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9100000 Reserved to# 9200000 2025-06-30T09:16:37.596090Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:452:2485] TEvAllocateResult from# 9100000 to# 9200000 2025-06-30T09:16:37.596099Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:99:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.596117Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9200000 Reserved to# 9300000 2025-06-30T09:16:37.596121Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:434:2467] TEvAllocateResult from# 9200000 to# 9300000 2025-06-30T09:16:37.596129Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:100:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.596144Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9300000 Reserved to# 9400000 2025-06-30T09:16:37.596148Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:436:2469] TEvAllocateResult from# 9300000 to# 9400000 2025-06-30T09:16:37.596155Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:100:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.596170Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9400000 Reserved to# 9500000 2025-06-30T09:16:37.596173Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:438:2471] TEvAllocateResult from# 9400000 to# 9500000 2025-06-30T09:16:37.596182Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:101:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.596193Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9500000 Reserved to# 9600000 2025-06-30T09:16:37.596197Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:440:2473] TEvAllocateResult from# 9500000 to# 9600000 2025-06-30T09:16:37.596238Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9600000 Reserved to# 9700000 2025-06-30T09:16:37.596242Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:442:2475] TEvAllocateResult from# 9600000 to# 9700000 2025-06-30T09:16:37.596249Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:101:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.596255Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:102:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.596268Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9700000 Reserved to# 9800000 2025-06-30T09:16:37.596272Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:444:2477] TEvAllocateResult from# 9700000 to# 9800000 2025-06-30T09:16:37.596280Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:102:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.596297Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9800000 Reserved to# 9900000 2025-06-30T09:16:37.596301Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:446:2479] TEvAllocateResult from# 9800000 to# 9900000 2025-06-30T09:16:37.596312Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9900000 Reserved to# 10000000 2025-06-30T09:16:37.596317Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:448:2481] TEvAllocateResult from# 9900000 to# 10000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS >> TTxLocatorTest::TestWithReboot [GOOD] |78.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |78.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |78.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |78.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |78.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |78.6%| [LD] {RESULT} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestWithReboot [GOOD] Test command err: 2025-06-30T09:16:37.964344Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-30T09:16:37.964508Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-30T09:16:37.964661Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-30T09:16:37.965275Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.965429Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-30T09:16:37.968759Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.968800Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.968815Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.968838Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-30T09:16:37.968872Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.968897Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-30T09:16:37.968939Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-30T09:16:37.969316Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:81:2115] requested range size#100000 2025-06-30T09:16:37.969456Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:83:2117] requested range size#100000 2025-06-30T09:16:37.969562Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:85:2119] requested range size#100000 2025-06-30T09:16:37.969603Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:87:2121] requested range size#100000 2025-06-30T09:16:37.969702Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:89:2123] requested range size#100000 2025-06-30T09:16:37.969739Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.969771Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.969789Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:91:2125] requested range size#100000 2025-06-30T09:16:37.969815Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.969834Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:79:2113] requested range size#100000 2025-06-30T09:16:37.969857Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.969896Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.969914Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.969930Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#100000 2025-06-30T09:16:37.969959Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.969984Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.969992Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.970005Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:75:2109] requested range size#100000 2025-06-30T09:16:37.970030Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:77:2111] requested range size#100000 2025-06-30T09:16:37.970064Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.970080Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 100000 2025-06-30T09:16:37.970085Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:81:2115] TEvAllocateResult from# 0 to# 100000 2025-06-30T09:16:37.970101Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.970110Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.970116Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.970125Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 100000 Reserved to# 200000 2025-06-30T09:16:37.970128Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:83:2117] TEvAllocateResult from# 100000 to# 200000 2025-06-30T09:16:37.970140Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.970147Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 200000 Reserved to# 300000 2025-06-30T09:16:37.970150Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:85:2119] TEvAllocateResult from# 200000 to# 300000 2025-06-30T09:16:37.970158Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.970164Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 300000 Reserved to# 400000 2025-06-30T09:16:37.970167Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:87:2121] TEvAllocateResult from# 300000 to# 400000 2025-06-30T09:16:37.970178Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.970184Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 400000 Reserved to# 500000 2025-06-30T09:16:37.970187Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:89:2123] TEvAllocateResult from# 400000 to# 500000 2025-06-30T09:16:37.970195Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 500000 Reserved to# 600000 2025-06-30T09:16:37.970198Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:91:2125] TEvAllocateResult from# 500000 to# 600000 2025-06-30T09:16:37.970207Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.970212Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 600000 Reserved to# 700000 2025-06-30T09:16:37.970215Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:79:2113] TEvAllocateResult from# 600000 to# 700000 2025-06-30T09:16:37.970224Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.970231Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.970236Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 700000 Reserved to# 800000 2025-06-30T09:16:37.970239Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 700000 to# 800000 2025-06-30T09:16:37.970249Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:37.970255Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 800000 Reserved to# 900000 2025-06-30T09:16:37.970258Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:75:2109] TEvAllocateResult from# 800000 to# 900000 2025-06-30T09:16:37.970270Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 900000 Reserved to# 1000000 2025-06-30T09:16:37.970272Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:77:2111] TEvAllocateResult from# 900000 to# 1000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-06-30T09:16:37.971033Z node 1 :TABLET_MAIN NOTICE: tablet_sys.cpp:1849: Tablet: 7205759404 ... e.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9300000 Reserved to# 9400000 2025-06-30T09:16:38.143966Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:626:2554] TEvAllocateResult from# 9300000 to# 9400000 2025-06-30T09:16:38.143973Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:9:1:24576:78:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:38.143980Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9400000 Reserved to# 9500000 2025-06-30T09:16:38.143982Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:628:2556] TEvAllocateResult from# 9400000 to# 9500000 2025-06-30T09:16:38.143993Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9500000 Reserved to# 9600000 2025-06-30T09:16:38.143996Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:630:2558] TEvAllocateResult from# 9500000 to# 9600000 2025-06-30T09:16:38.144003Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:9:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:38.144007Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:10:1:24576:78:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:38.144012Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9600000 Reserved to# 9700000 2025-06-30T09:16:38.144015Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:632:2560] TEvAllocateResult from# 9600000 to# 9700000 2025-06-30T09:16:38.144025Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9700000 Reserved to# 9800000 2025-06-30T09:16:38.144027Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:634:2562] TEvAllocateResult from# 9700000 to# 9800000 2025-06-30T09:16:38.144035Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:10:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:38.144043Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9800000 Reserved to# 9900000 2025-06-30T09:16:38.144046Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:636:2564] TEvAllocateResult from# 9800000 to# 9900000 2025-06-30T09:16:38.144053Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:11:1:24576:72:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:38.144060Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:11:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:38.144066Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9900000 Reserved to# 10000000 2025-06-30T09:16:38.144069Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:638:2566] TEvAllocateResult from# 9900000 to# 10000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-06-30T09:16:38.144381Z node 1 :TABLET_MAIN NOTICE: tablet_sys.cpp:1849: Tablet: 72057594046447617 Type: TxAllocator, EReason: ReasonPill, SuggestedGeneration: 0, KnownGeneration: 11 Marker# TSYS31 2025-06-30T09:16:38.144591Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:811: Tablet: 72057594046447617 HandleStateStorageInfoResolve, KnownGeneration: 11 Promote Marker# TSYS16 2025-06-30T09:16:38.144696Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:421: TabletId# 72057594046447617 TTabletReqRebuildHistoryGraph::ProcessKeyEntry, LastBlobID: [72057594046447617:11:11:0:0:71:0] Snap: 11:1 for 72057594046447617 Marker# TRRH04 2025-06-30T09:16:38.144708Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:11:0:0:71:0], refs: [[72057594046447617:11:11:1:24576:72:0],] for 72057594046447617 2025-06-30T09:16:38.144731Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:1:0:0:42:0], refs: [[72057594046447617:11:1:1:28672:1483:0],] for 72057594046447617 2025-06-30T09:16:38.144736Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:2:0:0:69:0], refs: [[72057594046447617:11:2:1:24576:76:0],] for 72057594046447617 2025-06-30T09:16:38.144740Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:3:0:0:71:0], refs: [[72057594046447617:11:3:1:24576:78:0],] for 72057594046447617 2025-06-30T09:16:38.144744Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:4:0:0:71:0], refs: [[72057594046447617:11:4:1:24576:75:0],] for 72057594046447617 2025-06-30T09:16:38.144748Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:5:0:0:71:0], refs: [[72057594046447617:11:5:1:24576:78:0],] for 72057594046447617 2025-06-30T09:16:38.144758Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:6:0:0:71:0], refs: [[72057594046447617:11:6:1:24576:78:0],] for 72057594046447617 2025-06-30T09:16:38.144763Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:7:0:0:71:0], refs: [[72057594046447617:11:7:1:24576:78:0],] for 72057594046447617 2025-06-30T09:16:38.144767Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:8:0:0:71:0], refs: [[72057594046447617:11:8:1:24576:75:0],] for 72057594046447617 2025-06-30T09:16:38.144771Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:9:0:0:71:0], refs: [[72057594046447617:11:9:1:24576:78:0],] for 72057594046447617 2025-06-30T09:16:38.144778Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:10:0:0:71:0], refs: [[72057594046447617:11:10:1:24576:78:0],] for 72057594046447617 2025-06-30T09:16:38.144804Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:625: TabletId# 72057594046447617 TTabletReqRebuildHistoryGraph::BuildHistory - Process generation 11 from 1 with 11 steps Marker# TRRH09 2025-06-30T09:16:38.144808Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:1:1:28672:1483:0],] for 72057594046447617 2025-06-30T09:16:38.144812Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:2:1:24576:76:0],] for 72057594046447617 2025-06-30T09:16:38.144814Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:3:1:24576:78:0],] for 72057594046447617 2025-06-30T09:16:38.144818Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:4:1:24576:75:0],] for 72057594046447617 2025-06-30T09:16:38.144821Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:5:1:24576:78:0],] for 72057594046447617 2025-06-30T09:16:38.144824Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:6:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:6:1:24576:78:0],] 2025-06-30T09:16:38.144828Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:7:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:7:1:24576:78:0],] 2025-06-30T09:16:38.144831Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:8:1:24576:75:0],] for 72057594046447617, Gc+: [[72057594046447617:11:8:1:24576:75:0],] 2025-06-30T09:16:38.144834Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:9:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:9:1:24576:78:0],] 2025-06-30T09:16:38.144837Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:10:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:10:1:24576:78:0],] 2025-06-30T09:16:38.144839Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:11:1:24576:72:0],] for 72057594046447617, Gc+: [[72057594046447617:11:11:1:24576:72:0],] 2025-06-30T09:16:38.144871Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:12:0:0:0:0:0] Marker# TSYS01 2025-06-30T09:16:38.145043Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:12:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:38.145465Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-30T09:16:38.145492Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-30T09:16:38.145584Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 12, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-30T09:16:38.145590Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:12:1:1:28672:1639:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:38.145599Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:12:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-30T09:16:38.145608Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 12:0 Marker# TSYS28 >> TSchemeShardTestExtSubdomainReboots::Fake [GOOD] >> TSchemeShardTestExtSubdomainReboots::CreateForceDrop-AlterDatabaseCreateHiveFirst-true >> TSchemeShardTestExtSubdomainReboots::CreateExternalSubdomainWithoutHive-AlterDatabaseCreateHiveFirst-false >> TSchemeShardTestExtSubdomainReboots::CreateForceDrop-AlterDatabaseCreateHiveFirst-false >> TSchemeShardTestExtSubdomainReboots::SchemeLimits-AlterDatabaseCreateHiveFirst-true |78.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |78.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |78.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows >> TSchemeShardTestExtSubdomainReboots::CreateExternalSubdomain-AlterDatabaseCreateHiveFirst-false >> TSchemeShardTestExtSubdomainReboots::SchemeLimits-AlterDatabaseCreateHiveFirst-false >> TSchemeShardTestExtSubdomainReboots::CreateExternalSubdomainWithoutHive-AlterDatabaseCreateHiveFirst-true >> TSchemeShardTestExtSubdomainReboots::AlterForceDrop-AlterDatabaseCreateHiveFirst-false >> TSchemeShardTestExtSubdomainReboots::CreateExternalSubdomain-AlterDatabaseCreateHiveFirst-true |78.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |78.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |78.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |78.6%| [TA] $(B)/ydb/core/tx/tx_allocator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |78.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain_reboots/unittest >> TSchemeShardTestExtSubdomainReboots::Fake [GOOD] |78.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/security/ut/ydb-core-security-ut |78.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/ut/ydb-core-security-ut |78.6%| [TA] {RESULT} $(B)/ydb/core/tx/tx_allocator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |78.6%| [LD] {RESULT} $(B)/ydb/core/security/ut/ydb-core-security-ut |78.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |78.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |78.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup >> GraphShard::NormalizeAndDownsample1 [GOOD] >> GraphShard::NormalizeAndDownsample2 [GOOD] >> GraphShard::NormalizeAndDownsample3 [GOOD] >> GraphShard::NormalizeAndDownsample4 [GOOD] >> GraphShard::NormalizeAndDownsample5 [GOOD] >> GraphShard::NormalizeAndDownsample6 [GOOD] >> GraphShard::CheckHistogramToPercentileConversions [GOOD] >> GraphShard::CreateGraphShard >> GraphShard::CreateGraphShard [GOOD] >> TTxDataShardUploadRows::TestUploadRows >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataSplitThenPublish >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataPublishThenSplit >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflow >> TTxDataShardUploadRows::TestUploadShadowRows >> TTicketParserTest::AuthenticationWithUserAccount |78.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |78.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |78.6%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut >> TTicketParserTest::LoginGood >> TTicketParserTest::AccessServiceAuthenticationOk >> TTicketParserTest::LoginRefreshGroupsWithError >> TTicketParserTest::TicketFromCertificateWithValidationGood >> TTicketParserTest::NebiusAuthenticationUnavailable >> TTicketParserTest::BulkAuthorizationRetryError >> TTicketParserTest::TicketFromCertificateCheckIssuerGood >> TTicketParserTest::AuthorizationRetryError >> TTicketParserTest::LoginBad ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/graph/shard/ut/unittest >> GraphShard::CreateGraphShard [GOOD] Test command err: [100,200,300,400,500,600,700,800,900,1000] [[1,2,3,4,5,6,7,8,9,10],[1,2,3,4,5,6,7,8,9,10]] [100,200,300,400,500,520,540,560,580,600] [[1,2,3,4,5,6.5,8.5,10.5,12.5,14.5]] [100,200,300,400,500,600] [[1,2,3,4,5,10.5]] [100,350,600] [[1,3,10]] [100,200,300,400,500,520,540,560,580,600] [[1,2,3,4,5,6.5,8.5,10,12,14.5]] [600] [[8]] p50=32 p75=192 p90=512 p99=972.8 Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:16:39.788853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:39.788871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:39.788875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:39.788878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:39.788887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:39.788890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:39.788896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:39.788909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:39.788987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:39.789037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:39.798335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:16:39.798356Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:39.800599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:39.800635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:39.800662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:39.801764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:39.801805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:39.801879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:39.801915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:39.802361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:39.802386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:39.802574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:39.802582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:39.802598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:39.802603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:39.802607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:39.802619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:16:39.803499Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:16:39.816188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:39.816248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:39.816292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:39.816298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:39.816342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:39.816355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:39.817105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:39.817146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:39.817193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:39.817204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:39.817210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:39.817216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:39.817687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:39.817697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:39.817706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:39.818091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:39.818101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:39.818108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:39.818116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:39.818869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:39.819351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:39.819390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:39.819579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:39.819604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:39.819618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:39.819683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:16:39.819691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:39.819726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:39.819739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomain ... ubDomainState::TPropose ProgressState leave, operationId 102:1, at tablet# 72057594046678944 2025-06-30T09:16:39.856256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 102 ready parts: 2/2 2025-06-30T09:16:39.856277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:39.856514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-30T09:16:39.856531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-30T09:16:39.856577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:39.856589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:39.856592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:711: TTxOperationPlanStep Execute operation part is already done, operationId: 102:0 2025-06-30T09:16:39.856596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:1, at tablet# 72057594046678944 2025-06-30T09:16:39.856639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:1 128 -> 240 2025-06-30T09:16:39.856645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:1, at tablet# 72057594046678944 2025-06-30T09:16:39.856658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-30T09:16:39.856671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[1:410:2376], EffectiveACLVersion: 0, SubdomainVersion: 3, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 72075186234409549, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:16:39.856953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:39.856958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:16:39.856981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:39.856985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-30T09:16:39.857028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:1, at schemeshard: 72057594046678944 2025-06-30T09:16:39.857033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 102:1, ProgressState, NeedSyncHive: 0 2025-06-30T09:16:39.857036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:1 240 -> 240 2025-06-30T09:16:39.857106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:16:39.857113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:16:39.857116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:16:39.857119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-30T09:16:39.857123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-30T09:16:39.857132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/2, is published: true 2025-06-30T09:16:39.857415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:1, at schemeshard: 72057594046678944 2025-06-30T09:16:39.857422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:1 ProgressState 2025-06-30T09:16:39.857430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:1 progress is 2/2 2025-06-30T09:16:39.857433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-30T09:16:39.857437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:1 progress is 2/2 2025-06-30T09:16:39.857446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-30T09:16:39.857448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/2, is published: true 2025-06-30T09:16:39.857452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-30T09:16:39.857455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:16:39.857458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:16:39.857473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-30T09:16:39.857476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:1 2025-06-30T09:16:39.857478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:1 2025-06-30T09:16:39.857486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-30T09:16:39.857675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-30T09:16:39.857920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:16:39.857925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-30T09:16:39.857964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:16:39.857976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:16:39.857979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:573:2500] TestWaitNotification: OK eventTxId 102 2025-06-30T09:16:39.858027Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/db1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:16:39.858062Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/db1" took 40us result status StatusSuccess 2025-06-30T09:16:39.858136Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/db1" PathDescription { Self { Name: "db1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 GraphShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> IncrementalBackup::SimpleRestoreBackupCollection+WithIncremental >> IncrementalBackup::BackupRestore >> IncrementalBackup::SimpleBackup >> IncrementalBackup::SimpleRestore >> TTicketParserTest::AuthenticationWithUserAccount [GOOD] >> TTicketParserTest::AuthenticationUnsupported >> TTicketParserTest::LoginGood [GOOD] >> TTicketParserTest::LoginGoodWithGroups >> TTicketParserTest::AccessServiceAuthenticationOk [GOOD] >> TTicketParserTest::AccessServiceAuthenticationApiKeyOk >> TTicketParserTest::NebiusAuthenticationUnavailable [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryError >> TTicketParserTest::LoginBad [GOOD] >> TTicketParserTest::BulkAuthorizationWithRequiredPermissions >> TTicketParserTest::TicketFromCertificateCheckIssuerGood [GOOD] >> TTicketParserTest::TicketFromCertificateCheckIssuerBad >> TTxDataShardUploadRows::TestUploadRows [GOOD] >> TTxDataShardUploadRows::TestUploadRowsDropColumnRace >> TTicketParserTest::TicketFromCertificateWithValidationGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersGood >> TTicketParserTest::LoginGoodWithGroups [GOOD] >> TTicketParserTest::LoginRefreshGroupsGood >> TTicketParserTest::AuthenticationUnsupported [GOOD] >> TTicketParserTest::AuthenticationUnknown >> TTicketParserTest::AccessServiceAuthenticationApiKeyOk [GOOD] >> TTicketParserTest::AuthenticationUnavailable >> TTicketParserTest::BulkAuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::BulkAuthorizationWithUserAccount >> TxUsage::Sinks_Oltp_WriteToTopic_1_Table >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflow [GOOD] >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetry >> TxUsage::WriteToTopic_Demo_20_RestartNo_Table >> TxUsage::Sinks_Oltp_WriteToTopic_1_Query >> TxUsage::WriteToTopic_Demo_11_Table >> TxUsage::WriteToTopic_Demo_18_RestartNo_Table >> TxUsage::WriteToTopic_Demo_27_Table >> TxUsage::WriteToTopic_Demo_13_Table >> BasicUsage::ReadWithoutConsumerWithRestarts >> TTxDataShardUploadRows::TestUploadShadowRows [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowData >> TxUsage::ReadRuleGeneration >> TTicketParserTest::AuthenticationUnknown [GOOD] >> TTicketParserTest::Authorization >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataPublishThenSplit [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish >> TTicketParserTest::AuthenticationUnavailable [GOOD] >> TTicketParserTest::AuthenticationRetryError >> TTicketParserTest::BulkAuthorizationWithUserAccount [GOOD] >> TTicketParserTest::BulkAuthorizationWithUserAccount2 >> TTicketParserTest::TicketFromCertificateCheckIssuerBad [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationBad >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersBad >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataSplitThenPublish [GOOD] >> TTxDataShardUploadRows::UploadRowsToReplicatedTable >> TTicketParserTest::Authorization [GOOD] >> TTicketParserTest::AuthorizationModify >> TTxDataShardUploadRows::TestUploadRowsDropColumnRace [GOOD] >> TTxDataShardUploadRows::TestUploadRowsLocks >> TTicketParserTest::BulkAuthorizationWithUserAccount2 [GOOD] >> TTicketParserTest::BulkAuthorizationUnavailable >> TTicketParserTest::AuthorizationModify [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationBad [GOOD] >> TTicketParserTest::NebiusAuthorizationWithRequiredPermissions >> IncrementalBackup::SimpleRestore [GOOD] >> IncrementalBackup::SimpleBackupBackupCollection+WithIncremental >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetry [GOOD] >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersBad [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDefaultGroupGood >> TTicketParserTest::BulkAuthorizationUnavailable [GOOD] >> IncrementalBackup::SimpleRestoreBackupCollection+WithIncremental [FAIL] >> IncrementalBackup::SimpleRestoreBackupCollection-WithIncremental ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::AuthorizationModify [GOOD] Test command err: 2025-06-30T09:16:40.301706Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668854588520016:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.301733Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004405/r3tmp/tmpYHAhiQ/pdisk_1.dat 2025-06-30T09:16:40.361734Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13759, node 1 2025-06-30T09:16:40.382946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.382962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.382964Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.383002Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8926 2025-06-30T09:16:40.404637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.404669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.406174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:40.434348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:40.437233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:40.463126Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:40.463144Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:40.463147Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:40.463459Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-30T09:16:40.463478Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [534d3e3fe330] Connect to grpc://localhost:16610 2025-06-30T09:16:40.464083Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [534d3e3fe330] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-30T09:16:40.469705Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [534d3e3fe330] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:40.469816Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-30T09:16:40.470112Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [534d3e3feb70] Connect to grpc://localhost:22952 2025-06-30T09:16:40.470247Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [534d3e3feb70] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-30T09:16:40.475607Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [534d3e3feb70] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-30T09:16:40.475789Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-30T09:16:40.904496Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521668852492518513:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.904526Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004405/r3tmp/tmpg9MwPT/pdisk_1.dat 2025-06-30T09:16:40.920442Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:40.920701Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521668852492518489:2080] 1751275000904358 != 1751275000904361 TServer::EnableGrpc on GrpcPort 12559, node 2 2025-06-30T09:16:40.932287Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.932302Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.932304Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.932357Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1639 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.012207Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.012254Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.012738Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:41.013265Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:41.015328Z node 2 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (8E120919): Token is not supported 2025-06-30T09:16:41.429970Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521668857398573694:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.430004Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004405/r3tmp/tmpIIIerk/pdisk_1.dat 2025-06-30T09:16:41.446498Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13211, node 3 2025-06-30T09:16:41.456673Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:41.456689Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:41.456700Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:41.456752Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20797 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.534980Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.535015Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.535594Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16: ... ER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:42.038540Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:42.038559Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [534d3c7d33b0] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "XXXXXXXX" type: "ydb.database" } resource_path { id: "XXXXXXXX" type: "resource-manager.folder" } } 2025-06-30T09:16:42.038802Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [534d3c7d33b0] Status 16 Access Denied 2025-06-30T09:16:42.038845Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.read now has a permanent error "Access Denied" retryable:0 2025-06-30T09:16:42.038852Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'Access Denied' 2025-06-30T09:16:42.038920Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:42.038927Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:42.038928Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:42.038930Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:42.038952Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [534d3c7d33b0] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "XXXXXXXX" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:42.039192Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [534d3c7d33b0] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:42.039223Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-30T09:16:42.039252Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:42.039331Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:42.039339Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:42.039340Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:42.039343Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:42.039356Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [534d3c7d33b0] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "XXXXXXXX" type: "resource-manager.folder" } } 2025-06-30T09:16:42.039562Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [534d3c7d33b0] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:42.039587Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-30T09:16:42.039596Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:42.039669Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:42.039677Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:42.039678Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:42.039680Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(monitoring.view) 2025-06-30T09:16:42.039691Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [534d3c7d33b0] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "monitoring.view" resource_path { id: "gizmo" type: "iam.gizmo" } } 2025-06-30T09:16:42.039915Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [534d3c7d33b0] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:42.039936Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission monitoring.view now has a valid subject "user1@as" 2025-06-30T09:16:42.039942Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:42.425875Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521668862037874087:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:42.425897Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004405/r3tmp/tmpRszopj/pdisk_1.dat 2025-06-30T09:16:42.447290Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23313, node 5 2025-06-30T09:16:42.452683Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:42.452697Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:42.452698Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:42.452741Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9588 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:42.531021Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:42.531054Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:42.531873Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:42.532030Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:16:42.533984Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:42.533997Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:42.534001Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:42.534012Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:42.534047Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [534d3d042330] Connect to grpc://localhost:19424 2025-06-30T09:16:42.534282Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [534d3d042330] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:42.536412Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [534d3d042330] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:42.536468Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-30T09:16:42.536496Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:42.536679Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:42.536686Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:42.536688Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:42.536695Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:42.536701Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-30T09:16:42.536724Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [534d3d042330] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:42.536932Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [534d3d042330] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:42.537272Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [534d3d042330] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:42.537296Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-30T09:16:42.537503Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [534d3d042330] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:42.537542Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-06-30T09:16:42.537559Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as >> TTxDataShardUploadRows::TestUploadShadowRowsShadowData [GOOD] >> TTicketParserTest::NebiusAuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::NebiusAuthorizationUnavailable >> TTxDataShardUploadRows::UploadRowsToReplicatedTable [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::BulkAuthorizationUnavailable [GOOD] Test command err: 2025-06-30T09:16:40.487059Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668854052478227:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.495506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043f3/r3tmp/tmpwuf9CL/pdisk_1.dat 2025-06-30T09:16:40.565805Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18743, node 1 2025-06-30T09:16:40.583924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.583944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.583947Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.583989Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3734 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:40.635621Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.635655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.636576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:40.636635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:16:40.643227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:40.694816Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:16:40.694987Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:40.694992Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:40.695391Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (5DAB89DE) () has now permanent error message 'Token is not in correct format' 2025-06-30T09:16:40.695396Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Token is not in correct format 2025-06-30T09:16:40.695403Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (5DAB89DE): Token is not in correct format 2025-06-30T09:16:41.009564Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521668855673897805:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.009612Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043f3/r3tmp/tmpF1Yr2k/pdisk_1.dat 2025-06-30T09:16:41.032698Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:41.034410Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521668855673897784:2080] 1751275001009439 != 1751275001009442 TServer::EnableGrpc on GrpcPort 32078, node 2 2025-06-30T09:16:41.046724Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:41.046756Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:41.046759Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:41.046817Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29040 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.115913Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.115949Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.116459Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:41.118436Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:41.118491Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:41.119534Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:41.119555Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:41.119558Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:41.119588Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-30T09:16:41.119614Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [136dbcf02330] Connect to grpc://localhost:24114 2025-06-30T09:16:41.120552Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [136dbcf02330] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-30T09:16:41.123947Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [136dbcf02330] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } } } 2025-06-30T09:16:41.124089Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-30T09:16:41.124128Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:41.124604Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:41.124618Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:41.124620Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:41.124636Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-30T09:16:41.124689Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [136dbcf02330] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-30T09:16:41.125465Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [136dbcf02330] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } } } 2025-06-30T09:16:41.125558Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-30T09:16:41.125571Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'something.write for folder_id aaaa1234 - Access Denied' 2025-06-30T09:16:41.512233Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActo ... CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:42.152468Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:42.152517Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:42.153062Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:42.153727Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:16:42.156243Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:42.156261Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:42.156275Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:42.156312Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read somewhere.sleep something.list something.write something.eat) 2025-06-30T09:16:42.156335Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [136dbcf12070] Connect to grpc://localhost:3519 2025-06-30T09:16:42.156575Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [136dbcf12070] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "somewhere.sleep" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.list" ...(truncated) } 2025-06-30T09:16:42.159474Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [136dbcf12070] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } items { permission: "somewhere.sleep" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } items { permission: "something.list" r...(truncated) } 2025-06-30T09:16:42.159608Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.read access denied for subject "user1@as" 2025-06-30T09:16:42.159616Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission somewhere.sleep access denied for subject "user1@as" 2025-06-30T09:16:42.159621Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.list access denied for subject "user1@as" 2025-06-30T09:16:42.159625Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.eat access denied for subject "user1@as" 2025-06-30T09:16:42.159630Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-30T09:16:42.159688Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [136dbceff0f0] Connect to grpc://localhost:26275 2025-06-30T09:16:42.159862Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [136dbceff0f0] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-30T09:16:42.174891Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [136dbceff0f0] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-30T09:16:42.175504Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-30T09:16:42.547056Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521668860619760510:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:42.547093Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043f3/r3tmp/tmp11LI5c/pdisk_1.dat 2025-06-30T09:16:42.559288Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:42.559522Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7521668860619760492:2080] 1751275002546953 != 1751275002546956 TServer::EnableGrpc on GrpcPort 3470, node 5 2025-06-30T09:16:42.569030Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:42.569043Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:42.569045Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:42.569085Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23151 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:42.650831Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:42.650870Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:42.651323Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:42.651787Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:42.653531Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:42.653547Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:42.653551Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:42.653576Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-30T09:16:42.653595Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [136dbcef33b0] Connect to grpc://localhost:11397 2025-06-30T09:16:42.653846Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [136dbcef33b0] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-30T09:16:42.656166Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [136dbcef33b0] Status 14 Service Unavailable 2025-06-30T09:16:42.656234Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-30T09:16:42.656245Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "Service Unavailable" retryable: 1 2025-06-30T09:16:42.656251Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:42.656266Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-30T09:16:42.656359Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [136dbcef33b0] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-30T09:16:42.657147Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [136dbcef33b0] Status 1 CANCELLED 2025-06-30T09:16:42.657215Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" retryable: 1 2025-06-30T09:16:42.657221Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "CANCELLED" retryable: 1 2025-06-30T09:16:42.657225Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' >> TTicketParserTest::TicketFromCertificateWithValidationDefaultGroupGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadShadowRowsShadowData [GOOD] Test command err: 2025-06-30T09:16:40.557202Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:16:40.557290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:16:40.557309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004932/r3tmp/tmpIPihxO/pdisk_1.dat 2025-06-30T09:16:40.671723Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:16:40.672671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:40.693522Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:40.694916Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275000158916 != 1751275000158920 2025-06-30T09:16:40.743642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.743690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.755488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:40.835496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:40.855861Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:16:40.855966Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:16:40.867568Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:16:40.867620Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:16:40.867832Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:16:40.867842Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:16:40.867850Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:16:40.867915Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:16:40.867935Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:16:40.867950Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:16:40.878337Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:16:40.883978Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:16:40.884079Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:16:40.884113Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:16:40.884119Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:16:40.884125Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:16:40.884132Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:40.884324Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:16:40.884351Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:16:40.884364Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:16:40.884372Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:16:40.884383Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:16:40.884389Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:40.884413Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:16:40.884535Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:16:40.884592Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:16:40.884615Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:16:40.885002Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:16:40.895372Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:16:40.895422Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:16:41.055453Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:16:41.056635Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:16:41.056662Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:41.056950Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:16:41.056964Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:16:41.056977Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:16:41.057056Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:16:41.057099Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:16:41.057242Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:16:41.057260Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:16:41.057388Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:16:41.057502Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:16:41.057888Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:16:41.057897Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:41.058057Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:16:41.058072Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:41.058390Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:41.058404Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:16:41.058411Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:16:41.058431Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:16:41.058444Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:16:41.058457Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:41.059466Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:16:41.059882Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:16:41.059916Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:16:41.060122Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:16:41.066851Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:41.066888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: ... ndencies 2025-06-30T09:16:42.935085Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2000/281474976715664 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-30T09:16:42.935093Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:281474976715665] at 72075186224037888 2025-06-30T09:16:42.935097Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-30T09:16:42.935099Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:16:42.935102Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit MakeScanSnapshot 2025-06-30T09:16:42.935107Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit MakeScanSnapshot 2025-06-30T09:16:42.935111Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-30T09:16:42.935115Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit MakeScanSnapshot 2025-06-30T09:16:42.935119Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit WaitForStreamClearance 2025-06-30T09:16:42.935123Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit WaitForStreamClearance 2025-06-30T09:16:42.935131Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:99: Requested stream clearance from [2:906:2717] for [0:281474976715665] at 72075186224037888 2025-06-30T09:16:42.935135Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Continue 2025-06-30T09:16:42.935174Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287942, Sender [2:906:2717], Recipient [2:629:2533]: NKikimrTx.TEvStreamClearancePending TxId: 281474976715665 2025-06-30T09:16:42.935178Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvTxProcessing::TEvStreamClearancePending 2025-06-30T09:16:42.935190Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:906:2717], Recipient [2:629:2533]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715665 Cleared: true 2025-06-30T09:16:42.935193Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-30T09:16:42.935201Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:629:2533], Recipient [2:629:2533]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:16:42.935204Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:16:42.935208Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:16:42.935212Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:16:42.935216Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715665] at 72075186224037888 for WaitForStreamClearance 2025-06-30T09:16:42.935218Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit WaitForStreamClearance 2025-06-30T09:16:42.935222Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [0:281474976715665] at 72075186224037888 2025-06-30T09:16:42.935225Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-30T09:16:42.935228Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit WaitForStreamClearance 2025-06-30T09:16:42.935230Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit ReadTableScan 2025-06-30T09:16:42.935233Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit ReadTableScan 2025-06-30T09:16:42.935269Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Continue 2025-06-30T09:16:42.935274Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:16:42.935276Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-30T09:16:42.935279Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:16:42.935281Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:16:42.935370Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:913:2722], Recipient [2:629:2533]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-30T09:16:42.935378Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-30T09:16:42.935420Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715665, MessageQuota: 1 2025-06-30T09:16:42.935481Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:16:42.935485Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:42.935556Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715665, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-30T09:16:42.935580Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715665, PendingAcks: 0 2025-06-30T09:16:42.935584Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715665, MessageQuota: 0 2025-06-30T09:16:42.935614Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:899:2710], Recipient [2:629:2533]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046644480 ClientId: [2:899:2710] ServerId: [2:901:2712] } 2025-06-30T09:16:42.935618Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-30T09:16:42.935666Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:16:42.935670Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715665, at: 72075186224037888 2025-06-30T09:16:42.935701Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:629:2533], Recipient [2:629:2533]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:16:42.935707Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:16:42.935712Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:16:42.935716Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:16:42.935720Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715665] at 72075186224037888 for ReadTableScan 2025-06-30T09:16:42.935723Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit ReadTableScan 2025-06-30T09:16:42.935727Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715665] at 72075186224037888 error: , IsFatalError: 0 2025-06-30T09:16:42.935732Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-30T09:16:42.935735Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit ReadTableScan 2025-06-30T09:16:42.935738Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:16:42.935740Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-06-30T09:16:42.935748Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715665 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-30T09:16:42.935757Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is DelayComplete 2025-06-30T09:16:42.935759Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:16:42.935763Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:16:42.935766Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:16:42.935772Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-30T09:16:42.935774Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:16:42.935777Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715665] at 72075186224037888 has finished 2025-06-30T09:16:42.935780Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:16:42.935782Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-30T09:16:42.935784Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:16:42.935787Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:16:42.935793Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:42.935796Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-06-30T09:16:42.935801Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::UploadRowsToReplicatedTable [GOOD] Test command err: 2025-06-30T09:16:40.403525Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:16:40.403621Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:16:40.403640Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004949/r3tmp/tmpvupPSf/pdisk_1.dat 2025-06-30T09:16:40.523776Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:16:40.524671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:40.542599Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:40.543814Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275000079546 != 1751275000079550 2025-06-30T09:16:40.588069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.588112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.598773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:40.677499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:40.699739Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:16:40.700057Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:16:40.700176Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:16:40.700265Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:16:40.713196Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:16:40.713528Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:16:40.713573Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:16:40.713838Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:16:40.713855Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:16:40.713865Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:16:40.713954Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:16:40.713993Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:16:40.714014Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:16:40.731015Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:16:40.737437Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:16:40.737555Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:16:40.737589Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:16:40.737597Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:16:40.737603Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:16:40.737610Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:40.737708Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:16:40.737718Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:16:40.737839Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:16:40.737868Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:16:40.737885Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:16:40.737896Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:16:40.737906Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:16:40.737913Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:16:40.737919Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:16:40.737926Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:16:40.737933Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:40.737961Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:40.737968Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:40.737977Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:16:40.738120Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:16:40.738129Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:16:40.738158Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:16:40.738222Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:16:40.738236Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:16:40.738259Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:16:40.738271Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:16:40.738277Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:16:40.738285Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:16:40.738291Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:16:40.738375Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:16:40.738381Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:16:40.738387Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:16:40.738392Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:16:40.738407Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:16:40.738413Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:16:40.738420Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:16:40.738425Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:16:40.738432Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:16:40.746970Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:16:40.747009Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:16:40.758530Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:16:40.758566Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:16:40.758575Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:16:40.758591Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... eartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:16:42.596513Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:272:2315], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:16:42.596583Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:16:42.596591Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004949/r3tmp/tmpgCP8j0/pdisk_1.dat 2025-06-30T09:16:42.679634Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-30T09:16:42.680259Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:42.694620Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:42.695238Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1751275002151137 != 1751275002151140 2025-06-30T09:16:42.737603Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:42.737660Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:42.748431Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:42.821823Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.834400Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:629:2533] 2025-06-30T09:16:42.834479Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:16:42.843638Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:16:42.843695Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:16:42.843926Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:16:42.843939Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:16:42.843949Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:16:42.844037Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:16:42.844070Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:16:42.844086Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:645:2533] in generation 1 2025-06-30T09:16:42.854468Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:16:42.854515Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:16:42.854560Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:16:42.854576Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:647:2543] 2025-06-30T09:16:42.854582Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:16:42.854587Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:16:42.854593Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:42.854733Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:16:42.854786Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:16:42.854804Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:16:42.854813Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:16:42.854824Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:16:42.854831Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:42.854856Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:625:2530], serverId# [2:636:2537], sessionId# [0:0:0] 2025-06-30T09:16:42.854993Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:16:42.855092Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:16:42.855121Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:16:42.855560Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:16:42.865980Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:16:42.866084Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:16:43.012005Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:662:2552], serverId# [2:664:2554], sessionId# [0:0:0] 2025-06-30T09:16:43.012196Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:16:43.012210Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:43.012247Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:16:43.012256Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:16:43.012269Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:16:43.012351Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:16:43.012387Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:16:43.012423Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:16:43.012434Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:16:43.012517Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:16:43.012607Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:16:43.013029Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:16:43.013039Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:43.013178Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:16:43.013187Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:43.013394Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:43.013402Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:16:43.013407Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:16:43.013420Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:16:43.013428Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:16:43.013437Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:43.013671Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:16:43.013915Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:16:43.013949Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:16:43.013955Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:16:43.016335Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:698:2580], serverId# [2:699:2581], sessionId# [0:0:0] 2025-06-30T09:16:43.016362Z node 2 :TX_DATASHARD NOTICE: datashard__op_rows.cpp:168: Rejecting bulk upsert request on datashard: tablet# 72075186224037888, error# Can't execute bulk upsert at replicated table >> IncrementalBackup::SimpleBackup [GOOD] >> IncrementalBackup::MultiRestore >> TTicketParserTest::NebiusAuthorizationUnavailable [GOOD] >> TTxDataShardUploadRows::TestUploadRowsLocks [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadRowsLocks [GOOD] Test command err: 2025-06-30T09:16:40.549050Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:16:40.549146Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:16:40.549165Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004961/r3tmp/tmpZe7raB/pdisk_1.dat 2025-06-30T09:16:40.664672Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:16:40.665604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:40.684009Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:40.685220Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275000104088 != 1751275000104092 2025-06-30T09:16:40.729192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.729238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.740639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:40.822117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:40.843335Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:649:2544] 2025-06-30T09:16:40.843432Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:16:40.855637Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:650:2545] 2025-06-30T09:16:40.855696Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:16:40.857690Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:16:40.857744Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:16:40.857965Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:16:40.857977Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:16:40.857986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:16:40.858083Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:16:40.858126Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:16:40.858143Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:683:2544] in generation 1 2025-06-30T09:16:40.858301Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:16:40.858342Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:16:40.858509Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-30T09:16:40.858519Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-30T09:16:40.858527Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-30T09:16:40.858564Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:16:40.858591Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:16:40.858601Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:686:2545] in generation 1 2025-06-30T09:16:40.859213Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [1:654:2547] 2025-06-30T09:16:40.859268Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:16:40.861245Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:656:2549] 2025-06-30T09:16:40.861289Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:16:40.862883Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:16:40.862925Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:16:40.863116Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037891 2025-06-30T09:16:40.863127Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037891 2025-06-30T09:16:40.863136Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037891 2025-06-30T09:16:40.863187Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:16:40.863224Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:16:40.863237Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037891 persisting started state actor id [1:715:2547] in generation 1 2025-06-30T09:16:40.863311Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:16:40.863331Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:16:40.863455Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-30T09:16:40.863464Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-30T09:16:40.863471Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-30T09:16:40.863501Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:16:40.863517Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:16:40.863525Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:716:2549] in generation 1 2025-06-30T09:16:40.873941Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:16:40.880297Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:16:40.880400Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:16:40.880433Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:721:2584] 2025-06-30T09:16:40.880440Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:16:40.880446Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:16:40.880454Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:40.880582Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:16:40.880592Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-30T09:16:40.880605Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:16:40.880616Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:722:2585] 2025-06-30T09:16:40.880621Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:16:40.880626Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-30T09:16:40.880631Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:16:40.880741Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:16:40.880749Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037891 2025-06-30T09:16:40.880760Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037891 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:16:40.880769Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [1:723:2586] 2025-06-30T09:16:40.880774Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-06-30T09:16:40.880781Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037891, state: WaitScheme 2025-06-30T09:16:40.880786Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-30T09:16:40.880849Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:16:40.880882Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:16:40.880904Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:16:40.880911Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-30T09:16:40.880921Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:16:40.880929Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:724:2587] 2025-06-30T09:16:40.880934Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-30T09:16:40.880939Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-30T09:16:40.880944Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: ... 2Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:16:43.227065Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:16:43.227231Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:16:43.227802Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:16:43.227822Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:43.228205Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:16:43.228226Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:43.228476Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:43.228489Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:16:43.228497Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:16:43.228521Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:16:43.228535Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:16:43.228554Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:43.229011Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:16:43.229352Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:16:43.229373Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:16:43.229556Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:16:43.243685Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:43.243809Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:706:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:43.243824Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:43.245171Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:43.246657Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:16:43.289794Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:43.401214Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:16:43.401887Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:710:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:16:43.456300Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:780:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:16:43.538144Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz01zqbbd2bev14vvfg8c8j7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OTcyNWIwNTMtNGI3ZTRhNDItZDZkODZiNTgtMjBkYmQ2NGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:16:43.540391Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:811:2642], serverId# [3:812:2643], sessionId# [0:0:0] 2025-06-30T09:16:43.540566Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:2] at 72075186224037888 2025-06-30T09:16:43.540631Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:2] at 72075186224037888, row count=3 2025-06-30T09:16:43.551238Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:43.587229Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz01zqn33vgtee7rzhdsg9nj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OTYzNTIxOWQtZTkzZDQwZTYtOWUwZjc4MjEtOTVmMjVkYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:16:43.587960Z node 3 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715661, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] { items { uint32_value: 300 } } 2025-06-30T09:16:43.589498Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(36) Execute: at tablet# 72075186224037888 2025-06-30T09:16:43.601926Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(36) Complete: at tablet# 72075186224037888 2025-06-30T09:16:43.601969Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:43.601989Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-30T09:16:43.602284Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-30T09:16:43.602294Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:43.613092Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz01zqpj07zbpht72ev70fzx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OTYzNTIxOWQtZTkzZDQwZTYtOWUwZjc4MjEtOTVmMjVkYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:16:43.613869Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:5] at 72075186224037888 2025-06-30T09:16:43.613939Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:105;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=5; 2025-06-30T09:16:43.615563Z node 3 :TX_DATASHARD INFO: datashard_write_operation.cpp:720: Write transaction 5 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-06-30T09:16:43.615642Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-30T09:16:43.615700Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-30T09:16:43.615721Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:43.615799Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [3:869:2648], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:818:2648]Got LOCKS BROKEN for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:869:2648].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-30T09:16:43.615910Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:862:2648], SessionActorId: [3:818:2648], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:818:2648]. isRollback=0 2025-06-30T09:16:43.616024Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1938: SessionId: ydb://session/3?node_id=3&id=OTYzNTIxOWQtZTkzZDQwZTYtOWUwZjc4MjEtOTVmMjVkYTg=, ActorId: [3:818:2648], ActorState: ExecuteState, TraceId: 01jz01zqpj07zbpht72ev70fzx, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:863:2648] from: [3:862:2648] 2025-06-30T09:16:43.616081Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [3:863:2648] TxId: 281474976715662. Ctx: { TraceId: 01jz01zqpj07zbpht72ev70fzx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OTYzNTIxOWQtZTkzZDQwZTYtOWUwZjc4MjEtOTVmMjVkYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-30T09:16:43.616171Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:6] at 72075186224037888 2025-06-30T09:16:43.616183Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:421: Skip empty write operation for [0:6] at 72075186224037888 2025-06-30T09:16:43.616214Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:43.616242Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=3&id=OTYzNTIxOWQtZTkzZDQwZTYtOWUwZjc4MjEtOTVmMjVkYTg=, ActorId: [3:818:2648], ActorState: ExecuteState, TraceId: 01jz01zqpj07zbpht72ev70fzx, Create QueryResponse for error on request, msg: ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAuthorizationUnavailable [GOOD] Test command err: 2025-06-30T09:16:40.625430Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668852812238604:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.625961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043cc/r3tmp/tmpx4omUY/pdisk_1.dat 2025-06-30T09:16:40.708116Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7400, node 1 2025-06-30T09:16:40.730878Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.730911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.738488Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.738499Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.738502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.738555Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:40.738851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16634 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:40.798027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:40.801981Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 3C782312D9C83177CE3C244396493B94336D436E61EDD5047EF8904AF8265DA9 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-30T09:16:41.494954Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521668859656040247:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.495087Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043cc/r3tmp/tmpvNS0dw/pdisk_1.dat 2025-06-30T09:16:41.511052Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2826, node 2 2025-06-30T09:16:41.521117Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:41.521132Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:41.521138Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:41.521183Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28397 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.603176Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.603205Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.603715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:41.603893Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:16:41.605552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:41.606081Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 96F000B8E399B906E554C3EB63BAB5C98482D6624A45328135BED2671C009402 () has now permanent error message 'Cannot create token from certificate. Client`s certificate and server`s certificate have different issuers' 2025-06-30T09:16:41.606135Z node 2 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 96F000B8E399B906E554C3EB63BAB5C98482D6624A45328135BED2671C009402: Cannot create token from certificate. Client`s certificate and server`s certificate have different issuers 2025-06-30T09:16:42.340157Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521668862453587303:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:42.340183Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043cc/r3tmp/tmp4MSjIE/pdisk_1.dat 2025-06-30T09:16:42.358009Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17684, node 3 2025-06-30T09:16:42.368656Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:42.368672Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:42.368674Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:42.368726Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15958 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:42.446536Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:42.446561Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:42.447272Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:42.447612Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:16:42.448730Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 3B43AE01B6098DCA63EA5198374E2C319783CB5864ED1783B348A2252798E13B () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-30T09:16:42.448781Z node 3 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 3B43AE01B6098DCA63EA5198374E2C319783CB5864ED1783B348A2252798E13B: Cannot create token from certificate. Client certificate failed verification 2025-06-30T09:16:42.816786Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521668861864323331:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:42.816810Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043cc/r3tmp/tmpKzreKv/pdisk_1.dat 2025-06-30T09:16:42.833006Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-3 ... 5: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:42.923520Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:42.923531Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:42.923534Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:42.923758Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-30T09:16:42.923778Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [3170bd8d68b0] Connect to grpc://localhost:16688 2025-06-30T09:16:42.924310Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [3170bd8d68b0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } 0: "OK" 2025-06-30T09:16:42.926687Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [3170bd8d68b0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } } 2025-06-30T09:16:42.926765Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1219: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-30T09:16:42.926805Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:42.926989Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:42.926997Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:42.926999Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:42.927007Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-30T09:16:42.927058Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [3170bd8d68b0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } 0: "OK" 2025-06-30T09:16:42.927656Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [3170bd8d68b0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } } 2025-06-30T09:16:42.927693Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1219: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-30T09:16:42.927705Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'something.write for aaaa1234 bbbb4554 - PERMISSION_DENIED' 2025-06-30T09:16:43.273608Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521668867635185689:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:43.273650Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043cc/r3tmp/tmpKeHI5f/pdisk_1.dat 2025-06-30T09:16:43.287514Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3554, node 5 2025-06-30T09:16:43.303013Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:43.303033Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:43.303036Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:43.303096Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6299 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:43.377657Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:43.377695Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:43.378218Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:43.380307Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:43.381028Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:43.382290Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:43.382302Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:43.382305Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:43.382351Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-30T09:16:43.382366Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [3170bd2e6070] Connect to grpc://localhost:24832 2025-06-30T09:16:43.382610Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [3170bd2e6070] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-06-30T09:16:43.390819Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [3170bd2e6070] Status 14 Service Unavailable 2025-06-30T09:16:43.391023Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-30T09:16:43.391032Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "Service Unavailable" retryable: 1 2025-06-30T09:16:43.391039Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:43.391053Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-30T09:16:43.391171Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [3170bd2e6070] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } 2025-06-30T09:16:43.396879Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [3170bd2e6070] Status 1 CANCELLED 2025-06-30T09:16:43.397291Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" retryable: 1 2025-06-30T09:16:43.397309Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "CANCELLED" retryable: 1 2025-06-30T09:16:43.397318Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad [GOOD] >> IncrementalBackup::SimpleRestoreBackupCollection-WithIncremental [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish [GOOD] Test command err: 2025-06-30T09:16:40.558233Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:16:40.558327Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:16:40.558346Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00493e/r3tmp/tmpj44vd5/pdisk_1.dat 2025-06-30T09:16:40.674931Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:16:40.675932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:40.694891Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:40.696355Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275000130240 != 1751275000130244 2025-06-30T09:16:40.743464Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.743511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.755350Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:40.834162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:40.854880Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:16:40.855208Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:16:40.855330Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:16:40.855426Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:16:40.866469Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:16:40.866722Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:16:40.866775Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:16:40.866983Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:16:40.866995Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:16:40.867004Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:16:40.867076Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:16:40.867108Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:16:40.867123Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:16:40.877510Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:16:40.883249Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:16:40.883346Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:16:40.883374Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:16:40.883381Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:16:40.883386Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:16:40.883393Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:40.883475Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:16:40.883486Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:16:40.883592Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:16:40.883621Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:16:40.883636Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:16:40.883644Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:16:40.883652Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:16:40.883657Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:16:40.883666Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:16:40.883672Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:16:40.883678Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:40.883702Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:40.883708Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:40.883716Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:16:40.883804Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:16:40.883810Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:16:40.883831Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:16:40.883880Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:16:40.883892Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:16:40.883912Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:16:40.883921Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:16:40.883927Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:16:40.883934Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:16:40.883939Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:16:40.884007Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:16:40.884012Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:16:40.884017Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:16:40.884021Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:16:40.884033Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:16:40.884038Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:16:40.884043Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:16:40.884047Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:16:40.884055Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:16:40.884390Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:16:40.884404Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:16:40.894816Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:16:40.894858Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:16:40.894869Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:16:40.894886Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... line.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037889 is DelayComplete 2025-06-30T09:16:44.032732Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037889 executing on unit CompleteOperation 2025-06-30T09:16:44.032735Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037889 to execution unit CompletedOperations 2025-06-30T09:16:44.032739Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037889 on unit CompletedOperations 2025-06-30T09:16:44.032745Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037889 is Executed 2025-06-30T09:16:44.032761Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037889 executing on unit CompletedOperations 2025-06-30T09:16:44.032765Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715668] at 72075186224037889 has finished 2025-06-30T09:16:44.032769Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:16:44.032772Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-30T09:16:44.032776Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-30T09:16:44.032780Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-30T09:16:44.043677Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:16:44.043717Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:16:44.043730Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715668] at 72075186224037889 on unit CompleteOperation 2025-06-30T09:16:44.043758Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715668] from 72075186224037889 at tablet 72075186224037889 send result to client [2:1104:2881], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:16:44.043776Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:16:44.043948Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:1104:2881], Recipient [2:928:2732]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715668 Cleared: true 2025-06-30T09:16:44.043959Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-30T09:16:44.043988Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 3500} 2025-06-30T09:16:44.044002Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-30T09:16:44.044008Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-30T09:16:44.044070Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:928:2732], Recipient [2:928:2732]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:16:44.044076Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:16:44.044097Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-30T09:16:44.044107Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:16:44.044117Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [3500:281474976715668] at 72075186224037890 for WaitForStreamClearance 2025-06-30T09:16:44.044123Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit WaitForStreamClearance 2025-06-30T09:16:44.044131Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [3500:281474976715668] at 72075186224037890 2025-06-30T09:16:44.044139Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Executed 2025-06-30T09:16:44.044151Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit WaitForStreamClearance 2025-06-30T09:16:44.044158Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037890 to execution unit ReadTableScan 2025-06-30T09:16:44.044163Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit ReadTableScan 2025-06-30T09:16:44.044281Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Continue 2025-06-30T09:16:44.044288Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:16:44.044294Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037890 2025-06-30T09:16:44.044299Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:52: TPlanQueueUnit at 72075186224037890 out-of-order limits exceeded 2025-06-30T09:16:44.044304Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-30T09:16:44.044592Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:1123:2898], Recipient [2:928:2732]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-30T09:16:44.044605Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-30T09:16:44.044675Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715668, MessageQuota: 1 2025-06-30T09:16:44.044928Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715668, Size: 54, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-30T09:16:44.044973Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715668, PendingAcks: 0 2025-06-30T09:16:44.044981Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715668, MessageQuota: 0 2025-06-30T09:16:44.045087Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-30T09:16:44.045095Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715668, at: 72075186224037890 2025-06-30T09:16:44.045136Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:928:2732], Recipient [2:928:2732]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:16:44.045142Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:16:44.045153Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-30T09:16:44.045159Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:16:44.045166Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [3500:281474976715668] at 72075186224037890 for ReadTableScan 2025-06-30T09:16:44.045171Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit ReadTableScan 2025-06-30T09:16:44.045178Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [3500:281474976715668] at 72075186224037890 error: , IsFatalError: 0 2025-06-30T09:16:44.045186Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Executed 2025-06-30T09:16:44.045194Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit ReadTableScan 2025-06-30T09:16:44.045200Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037890 to execution unit CompleteOperation 2025-06-30T09:16:44.045204Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit CompleteOperation 2025-06-30T09:16:44.045269Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is DelayComplete 2025-06-30T09:16:44.045274Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit CompleteOperation 2025-06-30T09:16:44.045279Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037890 to execution unit CompletedOperations 2025-06-30T09:16:44.045283Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit CompletedOperations 2025-06-30T09:16:44.045291Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Executed 2025-06-30T09:16:44.045294Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit CompletedOperations 2025-06-30T09:16:44.045299Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715668] at 72075186224037890 has finished 2025-06-30T09:16:44.045304Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:16:44.045308Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-30T09:16:44.045313Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-30T09:16:44.045317Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-30T09:16:44.055758Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-30T09:16:44.055797Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-30T09:16:44.055810Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715668] at 72075186224037890 on unit CompleteOperation 2025-06-30T09:16:44.055835Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715668] from 72075186224037890 at tablet 72075186224037890 send result to client [2:1104:2881], exec latency: 0 ms, propose latency: 1 ms 2025-06-30T09:16:44.055850Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad [GOOD] Test command err: 2025-06-30T09:16:40.582454Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668851329127576:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.582481Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043ee/r3tmp/tmpcShqYf/pdisk_1.dat TServer::EnableGrpc on GrpcPort 3462, node 1 2025-06-30T09:16:40.678847Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:16:40.682468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.682492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.683655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:40.690561Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:40.691632Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521668851329127557:2080] 1751275000582221 != 1751275000582224 2025-06-30T09:16:40.691816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.691818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.691820Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.691862Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32008 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:40.756570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:40.760253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:40.763921Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 737BB3E52FEB6A38DA0B3A6783AD015E4A19E60A4F278D4CC4AED77C30B0A185 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-30T09:16:41.561370Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521668856635151775:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.561512Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043ee/r3tmp/tmpBuMuuK/pdisk_1.dat TServer::EnableGrpc on GrpcPort 27284, node 2 2025-06-30T09:16:41.595017Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:41.601383Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:41.601400Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:41.601402Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:41.601436Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62016 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.661405Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.661441Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.662452Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:41.680752Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:41.682730Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket CFD1BD090F7EC76B9020E9CFB1390634CC4AF4D83122499450418379A2E9F032 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-30T09:16:42.519451Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521668863573144150:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:42.519619Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043ee/r3tmp/tmpzeDpbn/pdisk_1.dat TServer::EnableGrpc on GrpcPort 25055, node 3 2025-06-30T09:16:42.535400Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:42.540191Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:42.540202Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:42.540204Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:42.540250Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3578 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:42.623408Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:42.623436Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:42.623768Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:42.624431Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:42.624739Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:42.625316Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 5BB4C4C22DCF3EC092C14DD42CEB3488685D5AA88A9941C322C2558A66921771 () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-30T09:16:42.625389Z node 3 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 5BB4C4C22DCF3EC092C14DD42CEB3488685D5AA88A9941C322C2558A66921771: Cannot create token from certificate. Client certificate failed verification 2025-06-30T09:16:43.123699Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521668865064964586:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:43.123732Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043ee/r3tmp/tmpRXuv46/pdisk_1.dat 2025-06-30T09:16:43.151135Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:43.151518Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521668865064964554:2080] 1751275003123412 != 1751275003123415 TServer::EnableGrpc on GrpcPort 3953, node 4 2025-06-30T09:16:43.163384Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:43.163400Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:43.163402Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:43.163459Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9826 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:43.225652Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:43.225705Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:43.226688Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:43.233820Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:43.235538Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:43.236586Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 0614304CA0516D53B0FB0BB16BA479A0DE435DDD9923842B84E538DFD29B03EE () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-30T09:16:43.918331Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521668867230939405:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:43.918406Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043ee/r3tmp/tmpIJaC4c/pdisk_1.dat 2025-06-30T09:16:43.953311Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:43.953653Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7521668867230939308:2080] 1751275003917550 != 1751275003917553 TServer::EnableGrpc on GrpcPort 17343, node 5 2025-06-30T09:16:43.971018Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:43.971037Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:43.971039Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:43.971095Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28577 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:44.020529Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:44.020563Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:44.021878Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:44.028736Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:44.030781Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:44.032042Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 19F63B05EFB16E90EC2EBE7E949A66B036D37A0F080934FEDB792FA7724B926A () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-30T09:16:44.032118Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 19F63B05EFB16E90EC2EBE7E949A66B036D37A0F080934FEDB792FA7724B926A: Cannot create token from certificate. Client certificate failed verification >> TxUsage::WriteToTopic_Demo_22_RestartNo_Table >> IncrementalBackup::BackupRestore [GOOD] >> IncrementalBackup::ComplexRestoreBackupCollection+WithIncremental >> IncrementalBackup::SimpleBackupBackupCollection+WithIncremental [GOOD] >> IncrementalBackup::SimpleBackupBackupCollection-WithIncremental >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption [GOOD] |78.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |78.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |78.6%| [TS] {RESULT} ydb/core/graph/shard/ut/unittest |78.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |78.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |78.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |78.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption [GOOD] Test command err: 2025-06-30T09:16:40.683399Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:16:40.683507Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:16:40.683529Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00494d/r3tmp/tmpjAOSvK/pdisk_1.dat 2025-06-30T09:16:40.807064Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:16:40.808091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:40.828826Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:40.830211Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275000197886 != 1751275000197890 2025-06-30T09:16:40.873667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.873710Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.885030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:40.961170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:40.983580Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:641:2542] 2025-06-30T09:16:40.983659Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:16:40.994852Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:16:40.994920Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:16:40.995173Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:16:40.995185Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:16:40.995194Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:16:40.995262Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:16:40.995323Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:16:40.995338Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:667:2542] in generation 1 2025-06-30T09:16:40.995768Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:644:2544] 2025-06-30T09:16:40.995821Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:16:40.997487Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:16:40.997532Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:16:40.997722Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-30T09:16:40.997732Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-30T09:16:40.997740Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-30T09:16:40.997789Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:16:40.997812Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:16:40.997826Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:675:2544] in generation 1 2025-06-30T09:16:41.008823Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:16:41.015325Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:16:41.015430Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:16:41.015464Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:678:2563] 2025-06-30T09:16:41.015470Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:16:41.015477Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:16:41.015484Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:16:41.015608Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:16:41.015618Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-30T09:16:41.015634Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:16:41.015664Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:679:2564] 2025-06-30T09:16:41.015668Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:16:41.015673Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-30T09:16:41.015677Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:16:41.015813Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:16:41.015847Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:16:41.015885Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:16:41.015894Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:16:41.015905Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:16:41.015911Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:16:41.015919Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-30T09:16:41.015931Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-30T09:16:41.015968Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:634:2538], serverId# [1:652:2548], sessionId# [0:0:0] 2025-06-30T09:16:41.015976Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:16:41.015981Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:16:41.015986Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-30T09:16:41.015991Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:16:41.016022Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:16:41.016090Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:16:41.016128Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:16:41.016272Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:635:2539], serverId# [1:660:2554], sessionId# [0:0:0] 2025-06-30T09:16:41.016404Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-30T09:16:41.016439Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-30T09:16:41.016453Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-30T09:16:41.016944Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:16:41.016965Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:16:41.029502Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:16:41.029564Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:16:41.029592Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-30T09:16:41.029602Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-30T09:16:41.177642Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:696:2575], serverId# [1:699:2578], sessionId# [0:0:0] 2025-06-30T09:16:41.177710Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2576], serverId# [1:700:2579], sessionId# [0:0:0] 2025-06-30T09:16:41.178994Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... t finished with status SCHEME_ERROR 2025-06-30T09:16:45.132909Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [3:62:2109] Handle TEvExecuteKqpTransaction 2025-06-30T09:16:45.132942Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [3:62:2109] TxId# 281474976715662 ProcessProposeKqpTransaction 2025-06-30T09:16:45.133221Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz01zs552e419vx0b0y3s8td, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YThhOGU0YzgtZTIxOWUzM2ItZjQ2ZTNhZGMtNDQ0NGYwY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:16:45.134203Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [3:1064:2858], Recipient [3:630:2534]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 3 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-30T09:16:45.134278Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-30T09:16:45.134293Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v8000/0 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v8000/18446744073709551615 ImmediateWriteEdgeReplied# v8000/18446744073709551615 2025-06-30T09:16:45.134305Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v8000/18446744073709551615 2025-06-30T09:16:45.134318Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckRead 2025-06-30T09:16:45.134342Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-30T09:16:45.134349Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:16:45.134355Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:16:45.134360Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:16:45.134378Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-30T09:16:45.134384Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-30T09:16:45.134388Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:16:45.134394Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:16:45.134399Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:16:45.134420Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 3 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-30T09:16:45.134489Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[3:1064:2858], 0} after executionsCount# 1 2025-06-30T09:16:45.134500Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[3:1064:2858], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:16:45.134520Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[3:1064:2858], 0} finished in read 2025-06-30T09:16:45.134535Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-30T09:16:45.134540Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:16:45.134544Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:16:45.134549Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:16:45.134561Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-30T09:16:45.134565Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:16:45.134570Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-30T09:16:45.134576Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-30T09:16:45.134604Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-30T09:16:45.139110Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [3:1064:2858], Recipient [3:630:2534]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-30T09:16:45.139140Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 4 } } 2025-06-30T09:16:45.171253Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [3:62:2109] Handle TEvExecuteKqpTransaction 2025-06-30T09:16:45.171317Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [3:62:2109] TxId# 281474976715663 ProcessProposeKqpTransaction 2025-06-30T09:16:45.171717Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz01zs6m1n4h2yrs0wc2z2mh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzY1MGVjNzQtYzkxNTg5YmItMjE5MDhhYjctYjI4YmNlZTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:16:45.172724Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [3:1094:2882], Recipient [3:865:2693]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 8 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false RangesSize: 1 2025-06-30T09:16:45.172843Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-06-30T09:16:45.172859Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037889 CompleteEdge# v6000/281474976710759 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v5000/18446744073709551615 ImmediateWriteEdgeReplied# v5000/18446744073709551615 2025-06-30T09:16:45.172869Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037889 changed HEAD read to non-repeatable v8000/18446744073709551615 2025-06-30T09:16:45.172888Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CheckRead 2025-06-30T09:16:45.172916Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-30T09:16:45.172923Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CheckRead 2025-06-30T09:16:45.172931Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-30T09:16:45.172935Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-30T09:16:45.172961Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037889 2025-06-30T09:16:45.172984Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-30T09:16:45.172989Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-30T09:16:45.172994Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit ExecuteRead 2025-06-30T09:16:45.172999Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit ExecuteRead 2025-06-30T09:16:45.173017Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 8 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-30T09:16:45.173147Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[3:1094:2882], 0} after executionsCount# 1 2025-06-30T09:16:45.173159Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[3:1094:2882], 0} sends rowCount# 2, bytes# 64, quota rows left# 32765, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:16:45.173202Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[3:1094:2882], 0} finished in read 2025-06-30T09:16:45.173214Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-30T09:16:45.173218Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit ExecuteRead 2025-06-30T09:16:45.173223Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit CompletedOperations 2025-06-30T09:16:45.173227Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CompletedOperations 2025-06-30T09:16:45.173240Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-30T09:16:45.173244Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CompletedOperations 2025-06-30T09:16:45.173249Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037889 has finished 2025-06-30T09:16:45.173254Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-30T09:16:45.173312Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-30T09:16:45.173761Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [3:1094:2882], Recipient [3:865:2693]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-30T09:16:45.173776Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 4 } } |78.6%| [TA] $(B)/ydb/core/tx/datashard/ut_upload_rows/test-results/unittest/{meta.json ... results_accumulator.log} |78.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless >> VDiskRestart::Simple [GOOD] |78.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |78.6%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_upload_rows/test-results/unittest/{meta.json ... results_accumulator.log} |78.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |78.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |78.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |78.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> IncrementalBackup::SimpleBackupBackupCollection-WithIncremental [GOOD] |78.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> VDiskRestart::Simple [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::SimpleBackupBackupCollection-WithIncremental [GOOD] Test command err: 2025-06-30T09:16:41.230634Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:16:41.230756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:16:41.230779Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048e9/r3tmp/tmp3uuOzO/pdisk_1.dat 2025-06-30T09:16:41.341814Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:16:41.342201Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:560:2485], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.342215Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.342221Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:16:41.342236Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:557:2483], Recipient [1:375:2369]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-30T09:16:41.342242Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:16:41.362825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-30T09:16:41.362889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.362939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-30T09:16:41.362946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-30T09:16:41.363001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:16:41.363013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:41.363026Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.363234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:41.363266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:16:41.363272Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.363276Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-30T09:16:41.363319Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.363327Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.363339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.363351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-30T09:16:41.363358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:41.363363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:41.363389Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.363470Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.363476Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-30T09:16:41.363498Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.363503Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.363510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.363516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:16:41.363521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:41.363531Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.363586Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.363591Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-30T09:16:41.363607Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.363612Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.363618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.363623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.363629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-30T09:16:41.363634Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.363640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:41.364353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:41.364482Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.364492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:41.364538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:16:41.364867Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877760, Sender [1:565:2490], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:567:2491] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-30T09:16:41.364880Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5055: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-30T09:16:41.364886Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5794: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-30T09:16:41.364922Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269091328, Sender [1:371:2365], Recipient [1:375:2369]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-30T09:16:41.364999Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:569:2493], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.365005Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.365009Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:16:41.365041Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124996, Sender [1:557:2483], Recipient [1:375:2369]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-30T09:16:41.365047Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4973: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-30T09:16:41.365060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:41.365067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:16:41.365072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:41.382089Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 273285138, Sender [1:45:2092], Recipient [1:375:2369]: ... awX1: 629 RawX2: 12884904421 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-30T09:16:46.685409Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 281474976715662, tablet: 72075186224037888, partId: 1 2025-06-30T09:16:46.685440Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 281474976715662:1, at schemeshard: 72057594046644480, message: Source { RawX1: 629 RawX2: 12884904421 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-30T09:16:46.685450Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 281474976715662:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-30T09:16:46.685464Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976715662:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 629 RawX2: 12884904421 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-30T09:16:46.685480Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715662:1, shardIdx: 72057594046644480:1, shard: 72075186224037888, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:46.685485Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-30T09:16:46.685491Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715662:1, datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-06-30T09:16:46.685497Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715662:1, datashard: 72075186224037888, at schemeshard: 72057594046644480 2025-06-30T09:16:46.685503Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715662:1 129 -> 240 2025-06-30T09:16:46.685545Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:46.685680Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-30T09:16:46.685686Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:46.685692Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715662:1 2025-06-30T09:16:46.685707Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:894:2698] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-30T09:16:46.685712Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:629:2533] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-30T09:16:46.685734Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-06-30T09:16:46.685741Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:16:46.685773Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037889 state Ready 2025-06-30T09:16:46.685780Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-30T09:16:46.685810Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [3:374:2368], Recipient [3:374:2368]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:46.685814Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:46.685820Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-30T09:16:46.685828Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 281474976715662:1ProgressState, operation type TxCopyTable 2025-06-30T09:16:46.685832Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:46.685837Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1061: Set barrier, OperationId: 281474976715662:1, name: CopyTableBarrier, done: 1, blocked: 1, parts count: 2 2025-06-30T09:16:46.685841Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 281474976715662, done: 1, blocked: 1 2025-06-30T09:16:46.685850Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 281474976715662:1 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 281474976715662 Name: CopyTableBarrier }, at tablet# 72057594046644480 2025-06-30T09:16:46.685853Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715662:1 240 -> 240 2025-06-30T09:16:46.685914Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:46.685918Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715662:1 2025-06-30T09:16:46.685929Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [3:374:2368], Recipient [3:374:2368]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:46.685932Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:46.685935Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-30T09:16:46.685939Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715662:1 ProgressState 2025-06-30T09:16:46.685948Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:46.685952Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715662:1 progress is 2/2 2025-06-30T09:16:46.685955Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 2/2 2025-06-30T09:16:46.685958Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715662:1 progress is 2/2 2025-06-30T09:16:46.685961Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 2/2 2025-06-30T09:16:46.685965Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715662, ready parts: 2/2, is published: true 2025-06-30T09:16:46.685973Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:864:2678] message: TxId: 281474976715662 2025-06-30T09:16:46.685978Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 2/2 2025-06-30T09:16:46.685983Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715662:0 2025-06-30T09:16:46.685986Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715662:0 2025-06-30T09:16:46.685996Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 2 2025-06-30T09:16:46.686000Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715662:1 2025-06-30T09:16:46.686002Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715662:1 2025-06-30T09:16:46.686017Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 11] was 3 2025-06-30T09:16:46.686021Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-30T09:16:46.686100Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:46.686108Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:864:2678] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-30T09:16:46.686185Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [3:878:2685], Recipient [3:374:2368]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:16:46.686189Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:16:46.686192Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:16:46.705678Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [3:978:2763], serverId# [3:979:2764], sessionId# [0:0:0] 2025-06-30T09:16:46.705736Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz01ztpz29h0esjfjeabt8gn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODBjNjJkMWQtYzMwYjcyMzctNDc2OGJhNTQtMzM2ZGZlZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } 2025-06-30T09:16:46.723084Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jz01ztqk4dkbpmd9gp003qm7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Nzc5MzNiZDYtMWYxOTgwZWEtNmVhNWU1ZmEtODJkZjQwOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } >> TxUsage::WriteToTopic_Demo_13_Table [GOOD] |78.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |78.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> TxUsage::WriteToTopic_Demo_13_Query >> VDiskBalancing::TestStopOneNode_Mirror3dc >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 >> IncrementalBackup::E2EBackupCollection |78.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |78.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |78.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |78.7%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::SimpleRestoreBackupCollection-WithIncremental [GOOD] Test command err: 2025-06-30T09:16:41.151197Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:16:41.151294Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:16:41.151311Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004908/r3tmp/tmpdNTHJl/pdisk_1.dat 2025-06-30T09:16:41.256230Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:16:41.256618Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:560:2485], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.256630Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.256636Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:16:41.256648Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:557:2483], Recipient [1:375:2369]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-30T09:16:41.256652Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:16:41.275571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-30T09:16:41.275671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.275745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-30T09:16:41.275754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-30T09:16:41.275832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:16:41.275851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:41.275872Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.276126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:41.276176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:16:41.276184Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.276191Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-30T09:16:41.276244Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.276254Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.276268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.276278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-30T09:16:41.276285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:41.276290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:41.276326Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.276412Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.276418Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-30T09:16:41.276437Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.276446Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.276453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.276458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:16:41.276463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:41.276473Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.276527Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.276532Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-30T09:16:41.276547Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.276551Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.276556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.276561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.276567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-30T09:16:41.276571Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.276577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:41.277210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:41.277346Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.277356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:41.277415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:16:41.277709Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877760, Sender [1:565:2490], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:567:2491] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-30T09:16:41.277722Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5055: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-30T09:16:41.277729Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5794: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-30T09:16:41.277802Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269091328, Sender [1:371:2365], Recipient [1:375:2369]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-30T09:16:41.277888Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:569:2493], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.277893Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.277897Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:16:41.277933Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124996, Sender [1:557:2483], Recipient [1:375:2369]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-30T09:16:41.277938Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4973: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-30T09:16:41.277954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:41.277961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:16:41.277967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:41.294696Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 273285138, Sender [1:45:2092], Recipient [1:375:2369]: ... _TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:44.536383Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:16:44.536437Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269551620, Sender [2:700:2571], Recipient [2:374:2368]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 700 RawX2: 8589937163 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-30T09:16:44.536444Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-30T09:16:44.536455Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 700 RawX2: 8589937163 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-30T09:16:44.536466Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 281474976715662, tablet: 72075186224037888, partId: 0 2025-06-30T09:16:44.536498Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 281474976715662:0, at schemeshard: 72057594046644480, message: Source { RawX1: 700 RawX2: 8589937163 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-30T09:16:44.536508Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 281474976715662:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-30T09:16:44.536519Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976715662:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 700 RawX2: 8589937163 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-30T09:16:44.536534Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715662:0, shardIdx: 72057594046644480:1, shard: 72075186224037888, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:44.536539Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-30T09:16:44.536546Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715662:0, datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-06-30T09:16:44.536552Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715662:0, datashard: 72075186224037888, at schemeshard: 72057594046644480 2025-06-30T09:16:44.536560Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715662:0 129 -> 240 2025-06-30T09:16:44.536596Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:44.536744Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-30T09:16:44.536751Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:44.536757Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715662:0 2025-06-30T09:16:44.536771Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [2:908:2707] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-30T09:16:44.536779Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [2:700:2571] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-30T09:16:44.536808Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-06-30T09:16:44.536818Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:16:44.536858Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037889 state Ready 2025-06-30T09:16:44.536867Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-30T09:16:44.536900Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [2:374:2368], Recipient [2:374:2368]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:44.536907Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:44.536916Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-30T09:16:44.536923Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 281474976715662:0ProgressState, operation type TxCopyTable 2025-06-30T09:16:44.536928Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:44.536935Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1061: Set barrier, OperationId: 281474976715662:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-06-30T09:16:44.536941Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 281474976715662, done: 0, blocked: 1 2025-06-30T09:16:44.536953Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 281474976715662:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 281474976715662 Name: CopyTableBarrier }, at tablet# 72057594046644480 2025-06-30T09:16:44.536959Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715662:0 240 -> 240 2025-06-30T09:16:44.537035Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:44.537042Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715662:0 2025-06-30T09:16:44.537061Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [2:374:2368], Recipient [2:374:2368]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:44.537066Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:44.537071Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-30T09:16:44.537078Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715662:0 ProgressState 2025-06-30T09:16:44.537089Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:44.537096Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715662:0 progress is 1/1 2025-06-30T09:16:44.537101Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 1/1 2025-06-30T09:16:44.537107Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715662:0 progress is 1/1 2025-06-30T09:16:44.537110Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 1/1 2025-06-30T09:16:44.537116Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715662, ready parts: 1/1, is published: true 2025-06-30T09:16:44.537129Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:888:2690] message: TxId: 281474976715662 2025-06-30T09:16:44.537137Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 1/1 2025-06-30T09:16:44.537144Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715662:0 2025-06-30T09:16:44.537148Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715662:0 2025-06-30T09:16:44.537177Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 11] was 3 2025-06-30T09:16:44.537183Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 3 2025-06-30T09:16:44.537273Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:44.537285Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [2:888:2690] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-30T09:16:44.537446Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [2:897:2698], Recipient [2:374:2368]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:16:44.537454Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:16:44.537459Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:16:44.559240Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [2:986:2766], serverId# [2:987:2767], sessionId# [0:0:0] 2025-06-30T09:16:44.559304Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz01zrkvcyknzhr3yc59pqrt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQ4MTYyYzItZmNmODAxYmEtYjc5ZjNhMmEtYzA4OWFiOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }, { items { uint32_value: 5 } items { uint32_value: 50 } } >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob [GOOD] >> VDiskBalancing::TestStopOneNode_Mirror3dc [GOOD] >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc [GOOD] Test command err: RandomSeed# 301634065741778398 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:100:0] 2025-06-30T09:16:48.457459Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob [GOOD] Test command err: RandomSeed# 17045376717806080804 SEND TEvPut with key [1:1:1:0:0:3201024:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:3201024:0] 2025-06-30T09:16:48.413836Z 3 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:194:17] ServerId# [1:304:64] TabletId# 72057594037932033 PipeClientId# [3:194:17] 2025-06-30T09:16:48.413890Z 8 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:229:17] ServerId# [1:309:69] TabletId# 72057594037932033 PipeClientId# [8:229:17] 2025-06-30T09:16:48.413933Z 6 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:215:17] ServerId# [1:307:67] TabletId# 72057594037932033 PipeClientId# [6:215:17] 2025-06-30T09:16:48.413947Z 5 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:208:17] ServerId# [1:306:66] TabletId# 72057594037932033 PipeClientId# [5:208:17] 2025-06-30T09:16:48.413968Z 4 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:201:17] ServerId# [1:305:65] TabletId# 72057594037932033 PipeClientId# [4:201:17] 2025-06-30T09:16:48.413982Z 2 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:187:17] ServerId# [1:303:63] TabletId# 72057594037932033 PipeClientId# [2:187:17] 2025-06-30T09:16:48.413999Z 7 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:222:17] ServerId# [1:308:68] TabletId# 72057594037932033 PipeClientId# [7:222:17] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag [GOOD] >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 [GOOD] >> IncrementalBackup::ComplexRestoreBackupCollection+WithIncremental [FAIL] >> IncrementalBackup::ComplexRestoreBackupCollection-WithIncremental |78.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false >> BasicUsage::ReadWithoutConsumerWithRestarts [GOOD] >> BasicUsage::ReadWithRestarts ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:16:48.996829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:48.996862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:48.996868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:48.996874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:48.996881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:48.996886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:48.996895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:48.996912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:48.997040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:48.997119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:49.015994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:16:49.016030Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:49.020927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:49.021021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:49.021071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:49.023341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:49.023445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:49.023596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:49.023692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:49.024549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:49.024618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:49.024980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:49.024997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:49.025035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:49.025046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:49.025053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:49.025081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.026887Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:16:49.054010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:49.054135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.054217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:49.054227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:49.054290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:49.054308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:49.055507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:49.055569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:49.055656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.055672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:49.055680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:49.055688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:49.056384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.056403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:49.056413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:49.056942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.056958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.056967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:49.056978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:49.057906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:49.058473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:49.058532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:49.058811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:49.058851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:49.058861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:49.058945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:16:49.058954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:49.058999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:49.059015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:16:49.059558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:49.059572Z node 1 :FLAT_TX_SCHEMESHARD ... _TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.132747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.132755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.132760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 104:0, at tablet# 72057594046678944 2025-06-30T09:16:49.132768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 104 ready parts: 1/1 2025-06-30T09:16:49.132797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 104 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:49.133085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 msg type: 269090816 2025-06-30T09:16:49.133111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-06-30T09:16:49.133171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:49.133187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:49.133192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-06-30T09:16:49.133250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 104:0 128 -> 240 2025-06-30T09:16:49.133256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-06-30T09:16:49.133278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:16:49.133297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:619:2546], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 104 2025-06-30T09:16:49.133645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:49.133656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:16:49.133705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:49.133710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-30T09:16:49.133720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.133725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 104:0, ProgressState, NeedSyncHive: 0 2025-06-30T09:16:49.133729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 104:0 240 -> 240 2025-06-30T09:16:49.133937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:16:49.133950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:16:49.133954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:16:49.133958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-30T09:16:49.133963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-30T09:16:49.133976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-30T09:16:49.134758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.134779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-30T09:16:49.134793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:16:49.134797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:16:49.134801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:16:49.134804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:16:49.134808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-30T09:16:49.134813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:16:49.134817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-30T09:16:49.134821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:0 2025-06-30T09:16:49.134857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:16:49.134970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-30T09:16:49.135450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-30T09:16:49.135463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-30T09:16:49.135552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-30T09:16:49.135569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-30T09:16:49.135574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:774:2654] TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-30T09:16:49.136212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeExclusive } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:49.136235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeExclusive } 2025-06-30T09:16:49.136240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, path /MyRoot/ServerLess0 2025-06-30T09:16:49.136271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, at schemeshard: 72057594046678944 2025-06-30T09:16:49.136278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 105:1, propose status:StatusPreconditionFailed, reason: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, at schemeshard: 72057594046678944 2025-06-30T09:16:49.136775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 105, response: Status: StatusPreconditionFailed Reason: "Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:49.136821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, operation: ALTER DATABASE, path: /MyRoot/ServerLess0 TestModificationResult got TxId: 105, wait until txId: 105 |78.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |78.7%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 [GOOD] Test command err: RandomSeed# 9917314259527061446 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:2:0:0:100:0] 2025-06-30T09:16:48.743149Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:6349:836] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Start compaction Finish compaction >> TxUsage::WriteToTopic_Demo_20_RestartNo_Table [GOOD] >> TSchemeShardTestExtSubdomainReboots::CreateForceDrop-AlterDatabaseCreateHiveFirst-true [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartNo_Table [GOOD] >> TSchemeShardServerLess::TestServerlessComputeResourcesMode >> TSchemeShardTestExtSubdomainReboots::CreateForceDrop-AlterDatabaseCreateHiveFirst-false [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartNo_Table >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false [GOOD] |78.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |78.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots >> TSchemeShardServerLess::TestServerlessComputeResourcesModeValidation >> TxUsage::WriteToTopic_Demo_20_RestartNo_Query >> TSchemeShardServerLess::TestServerlessComputeResourcesMode [GOOD] >> TSchemeShardServerLess::StorageBilling ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain_reboots/unittest >> TSchemeShardTestExtSubdomainReboots::CreateForceDrop-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:16:38.887411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:38.887439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.887445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:38.887451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:38.887457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:38.887462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:38.887471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.887500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:38.887622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:38.887722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:38.903909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:16:38.903938Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:38.904035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.906905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:38.907701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:38.907737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:38.909559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:38.909613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:38.909721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.909787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:38.910714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.910794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:38.911048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:38.911057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.911064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:38.911071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:38.911077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:38.911102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:16:38.912416Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.930352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:38.930440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.930501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:38.930509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:38.930556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:38.930575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:38.931364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.931405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:38.931462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.931471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:38.931475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:38.931479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:38.931850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.931861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:38.931866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:38.932253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.932262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.932267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:38.932274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:38.932790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:38.933180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:38.933218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:38.933395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.933419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... hemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:16:49.698271Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:16:49.698277Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-30T09:16:49.698282Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:16:49.698423Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:16:49.698435Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:16:49.698440Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:16:49.698449Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-30T09:16:49.698454Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:16:49.698464Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-30T09:16:49.700114Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.700144Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:16:49.700159Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:16:49.700165Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:16:49.700171Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:16:49.700174Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:16:49.700180Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-30T09:16:49.700186Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:16:49.700192Z node 41 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:16:49.700196Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:16:49.700213Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:16:49.700301Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:16:49.700311Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:16:49.700332Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:16:49.700384Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:16:49.700390Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:16:49.700403Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:16:49.700844Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:16:49.700956Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:16:49.701494Z node 41 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:16:49.701521Z node 41 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 1002 2025-06-30T09:16:49.701588Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1002: send EvNotifyTxCompletion 2025-06-30T09:16:49.701597Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1002 TestWaitNotification wait txId: 1003 2025-06-30T09:16:49.701613Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:16:49.701617Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:16:49.701724Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1002, at schemeshard: 72057594046678944 2025-06-30T09:16:49.701749Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:16:49.701759Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-30T09:16:49.701765Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [41:352:2341] 2025-06-30T09:16:49.701785Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:16:49.701789Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [41:352:2341] TestWaitNotification: OK eventTxId 1002 TestWaitNotification: OK eventTxId 1003 2025-06-30T09:16:49.701889Z node 41 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:16:49.701929Z node 41 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 60us result status StatusPathDoesNotExist 2025-06-30T09:16:49.701969Z node 41 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:16:49.702019Z node 41 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:16:49.702065Z node 41 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 47us result status StatusSuccess 2025-06-30T09:16:49.702181Z node 41 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TBlobStorageProxyTest::TestBlockPersistence ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:16:49.621657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:49.621689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:49.621695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:49.621702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:49.621709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:49.621713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:49.621723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:49.621740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:49.621866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:49.621952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:49.637876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:16:49.637908Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:49.641812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:49.641886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:49.641930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:49.645085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:49.645195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:49.645375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:49.645471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:49.646343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:49.646402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:49.646755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:49.646770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:49.646796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:49.646808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:49.646815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:49.646838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.648590Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:16:49.672143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:49.672233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.672305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:49.672313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:49.672372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:49.672387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:49.673776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:49.673857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:49.673936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.673948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:49.673953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:49.673959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:49.674717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.674753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:49.674765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:49.677061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.677080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.677086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:49.677093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:49.677799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:49.679504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:49.679573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:49.679824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:49.679873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:49.679883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:49.679982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:16:49.679993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:49.680036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:49.680051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:16:49.681161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:49.681173Z node 1 :FLAT_TX_SCHEMESHARD ... ated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-30T09:16:49.873339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:16:49.873575Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 7 TxId_Deprecated: 7 TabletID: 72075186234409548 2025-06-30T09:16:49.873622Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186234409547 2025-06-30T09:16:49.873636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 7 ShardOwnerId: 72057594046678944 ShardLocalIdx: 7, at schemeshard: 72057594046678944 2025-06-30T09:16:49.873662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 Forgetting tablet 72075186234409548 2025-06-30T09:16:49.873855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-30T09:16:49.873873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 Forgetting tablet 72075186234409547 2025-06-30T09:16:49.873941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:16:49.873945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:16:49.873965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:16:49.873996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-30T09:16:49.874106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:16:49.874113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:16:49.874122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:16:49.875055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-30T09:16:49.875072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186234409546 2025-06-30T09:16:49.875090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-30T09:16:49.875094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186234409548 2025-06-30T09:16:49.875143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-30T09:16:49.875150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186234409547 2025-06-30T09:16:49.875231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:16:49.875482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-30T09:16:49.875566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-30T09:16:49.875574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-30T09:16:49.875653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-30T09:16:49.875671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-30T09:16:49.875677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:933:2793] TestWaitNotification: OK eventTxId 106 2025-06-30T09:16:49.875769Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0/dir/table0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:16:49.875804Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0/dir/table0" took 47us result status StatusPathDoesNotExist 2025-06-30T09:16:49.875853Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0/dir/table0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ServerLess0/dir/table0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:16:49.875915Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:16:49.875929Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 15us result status StatusPathDoesNotExist 2025-06-30T09:16:49.875947Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ServerLess0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:16:49.876001Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:16:49.876022Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 23us result status StatusSuccess 2025-06-30T09:16:49.876112Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SharedDB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186233409550 is deleted wait until 72075186233409551 is deleted wait until 72075186233409552 is deleted wait until 72075186233409553 is deleted 2025-06-30T09:16:49.876202Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409550 2025-06-30T09:16:49.876217Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409551 2025-06-30T09:16:49.876227Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409552 2025-06-30T09:16:49.876238Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409553 Deleted tabletId 72075186233409550 Deleted tabletId 72075186233409551 Deleted tabletId 72075186233409552 Deleted tabletId 72075186233409553 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain_reboots/unittest >> TSchemeShardTestExtSubdomainReboots::CreateForceDrop-AlterDatabaseCreateHiveFirst-false [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:16:38.931508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:38.931525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.931529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:38.931532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:38.931536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:38.931539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:38.931545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.931557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:38.931626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:38.931680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:38.941877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:16:38.941893Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:38.941965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.944038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:38.944721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:38.944744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:38.946140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:38.946174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:38.946260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.946299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:38.947101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.947137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:38.947330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:38.947337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.947343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:38.947347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:38.947351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:38.947368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:16:38.948550Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.968007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:38.968074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.968127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:38.968134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:38.968177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:38.968189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:38.968831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.968875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:38.968929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.968939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:38.968944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:38.968950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:38.969359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.969368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:38.969372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:38.969715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.969727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.969733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:38.969739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:38.970370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:38.970784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:38.970817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:38.970986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.971008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... hemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:16:49.867879Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:16:49.867884Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-30T09:16:49.867889Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:16:49.867992Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:16:49.868002Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:16:49.868006Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:16:49.868014Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-30T09:16:49.868018Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:16:49.868027Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-30T09:16:49.868475Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:16:49.868485Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:16:49.868497Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:16:49.868502Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:16:49.868508Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:16:49.868512Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:16:49.868517Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-30T09:16:49.868522Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:16:49.868527Z node 41 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:16:49.868532Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:16:49.868542Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:16:49.868588Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:16:49.868595Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:16:49.868611Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:16:49.868644Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:16:49.868650Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:16:49.868659Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:16:49.868943Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:16:49.869011Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:16:49.869396Z node 41 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:16:49.869411Z node 41 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 1002 2025-06-30T09:16:49.869451Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1002: send EvNotifyTxCompletion 2025-06-30T09:16:49.869458Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1002 TestWaitNotification wait txId: 1003 2025-06-30T09:16:49.869473Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:16:49.869477Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:16:49.869551Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1002, at schemeshard: 72057594046678944 2025-06-30T09:16:49.869568Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:16:49.869577Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-30T09:16:49.869582Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [41:352:2341] 2025-06-30T09:16:49.869599Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:16:49.869603Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [41:352:2341] TestWaitNotification: OK eventTxId 1002 TestWaitNotification: OK eventTxId 1003 2025-06-30T09:16:49.869672Z node 41 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:16:49.869697Z node 41 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 35us result status StatusPathDoesNotExist 2025-06-30T09:16:49.869732Z node 41 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:16:49.869776Z node 41 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:16:49.869797Z node 41 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 23us result status StatusSuccess 2025-06-30T09:16:49.869875Z node 41 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardServerLess::TestServerlessComputeResourcesModeValidation [GOOD] |78.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:16:50.083830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:50.083855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:50.083860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:50.083866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:50.083872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:50.083876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:50.083886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:50.083900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:50.084018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:50.084079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:50.095637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:16:50.095659Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:50.098151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:50.098196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:50.098227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:50.099752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:50.099821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:50.099931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:50.100005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:50.100698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:50.100746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:50.100957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:50.100980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:50.101007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:50.101015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:50.101021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:50.101043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.102424Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:16:50.134766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:50.134885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.134978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:50.134990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:50.135064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:50.135084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:50.136211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:50.136269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:50.136356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.136372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:50.136380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:50.136387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:50.136975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.136994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:50.137003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:50.137464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.137478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.137487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:50.137497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:50.138465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:50.139008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:50.139062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:50.139353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:50.139388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:50.139399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:50.139486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:16:50.139496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:50.139539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:50.139557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:16:50.140103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:50.140117Z node 1 :FLAT_TX_SCHEMESHARD ... hemeshard_impl.cpp:6085: Update domain reply, message: Origin: 72075186233409546 TxId: 106, at schemeshard: 72057594046678944 2025-06-30T09:16:50.253801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409546, partId: 0 2025-06-30T09:16:50.253815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 106:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 106 2025-06-30T09:16:50.253823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:796: [72057594046678944] TSyncHive, operationId 106:0, HandleReply TEvUpdateDomainReply, from hive: 72075186233409546 2025-06-30T09:16:50.253832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 106:0 138 -> 240 2025-06-30T09:16:50.254382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-30T09:16:50.254405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:16:50.254691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.254729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.254775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 106:0 ProgressState 2025-06-30T09:16:50.254794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-30T09:16:50.254799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-30T09:16:50.254806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-30T09:16:50.254810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-30T09:16:50.254816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-06-30T09:16:50.254823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-30T09:16:50.254829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:0 2025-06-30T09:16:50.254834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 106:0 2025-06-30T09:16:50.254851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-30T09:16:50.255394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-30T09:16:50.255407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-30T09:16:50.255515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-30T09:16:50.255536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-30T09:16:50.255542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:856:2736] TestWaitNotification: OK eventTxId 106 2025-06-30T09:16:50.255658Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:16:50.255697Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 53us result status StatusSuccess 2025-06-30T09:16:50.255816Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLess0" PathDescription { Self { Name: "ServerLess0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409551 SchemeShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SharedHive: 72075186233409546 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:50.255907Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186234409549 2025-06-30T09:16:50.255925Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186234409549 describe path "/MyRoot/ServerLess0" took 19us result status StatusSuccess 2025-06-30T09:16:50.255980Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLess0" PathDescription { Self { Name: "MyRoot/ServerLess0" PathId: 1 SchemeshardId: 72075186234409549 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409551 SchemeShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/ServerLess0" } SharedHive: 72075186233409546 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186234409549, at schemeshard: 72075186234409549 2025-06-30T09:16:50.256052Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:16:50.256067Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 17us result status StatusSuccess 2025-06-30T09:16:50.256123Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLess0" PathDescription { Self { Name: "ServerLess0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409551 SchemeShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SharedHive: 72075186233409546 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:50.256194Z node 1 :HIVE INFO: tablet_helpers.cpp:1470: [72075186233409546] TEvRequestDomainInfo, 72057594046678944:3 >> TPartitionTests::DifferentWriteTxBatchingOptions >> TTicketParserTest::LoginRefreshGroupsWithError [GOOD] >> TTicketParserTest::NebiusAccessServiceAuthenticationOk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesModeValidation [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:16:50.348962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:50.348991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:50.348997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:50.349003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:50.349011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:50.349016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:50.349026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:50.349044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:50.349174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:50.349247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:50.365727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:16:50.365758Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:50.369516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:50.369588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:50.369633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:50.371344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:50.371425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:50.371578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:50.371660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:50.372336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:50.372394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:50.372675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:50.372688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:50.372713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:50.372722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:50.372729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:50.372748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.374143Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:16:50.397681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:50.397769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.397842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:50.397852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:50.397911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:50.397930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:50.400441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:50.400500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:50.400582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.400595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:50.400602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:50.400609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:50.401323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.401342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:50.401349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:50.401798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.401813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.401820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:50.401828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:50.402620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:50.403203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:50.403250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:50.403469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:50.403506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:50.403514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:50.403601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:16:50.403610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:50.403646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:50.403659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:16:50.404209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:50.404221Z node 1 :FLAT_TX_SCHEMESHARD ... 4 2025-06-30T09:16:50.471286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 104:0 128 -> 240 2025-06-30T09:16:50.471293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-06-30T09:16:50.471315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:16:50.471335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:619:2546], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 104 2025-06-30T09:16:50.471682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:50.471688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:16:50.471718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:50.471723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-30T09:16:50.471731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.471736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 104:0, ProgressState, NeedSyncHive: 0 2025-06-30T09:16:50.471740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 104:0 240 -> 240 2025-06-30T09:16:50.471935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:16:50.471947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:16:50.471951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:16:50.471955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-30T09:16:50.471960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-30T09:16:50.471974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-30T09:16:50.472519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.472535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-30T09:16:50.472554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:16:50.472559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:16:50.472566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:16:50.472569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:16:50.472575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-30T09:16:50.472581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:16:50.472588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-30T09:16:50.472593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:0 2025-06-30T09:16:50.472625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:16:50.472708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-30T09:16:50.473018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-30T09:16:50.473025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-30T09:16:50.473084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-30T09:16:50.473096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-30T09:16:50.473100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:774:2654] TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-30T09:16:50.473834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "SharedDB" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:50.473861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "SharedDB" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } 2025-06-30T09:16:50.473865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, path /MyRoot/SharedDB 2025-06-30T09:16:50.473895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless, at schemeshard: 72057594046678944 2025-06-30T09:16:50.473902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 105:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless, at schemeshard: 72057594046678944 2025-06-30T09:16:50.474297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 105, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:50.474336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless, operation: ALTER DATABASE, path: /MyRoot/SharedDB TestModificationResult got TxId: 105, wait until txId: 105 TestModificationResults wait txId: 106 2025-06-30T09:16:50.474863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeUnspecified } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:50.474881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 106:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeUnspecified } 2025-06-30T09:16:50.474885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 106:0, path /MyRoot/ServerLess0 2025-06-30T09:16:50.474901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 106:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified, at schemeshard: 72057594046678944 2025-06-30T09:16:50.474905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified, at schemeshard: 72057594046678944 2025-06-30T09:16:50.475253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:50.475274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified, operation: ALTER DATABASE, path: /MyRoot/ServerLess0 TestModificationResult got TxId: 106, wait until txId: 106 >> TTicketParserTest::NebiusAccessServiceAuthenticationOk [GOOD] >> TTicketParserTest::NebiusAuthenticationRetryError >> TPQTabletTests::Multiple_PQTablets_1 >> TPQTabletTests::Multiple_PQTablets_1 [GOOD] >> TPQTestInternal::TestPartitionedBlobSimpleTest [GOOD] >> TPQTestInternal::TestPartitionedBigTest >> TPQTabletTests::Single_PQTablet_And_Multiple_Partitions >> TPQTabletTests::Multiple_PQTablets_2 >> IncrementalBackup::ComplexRestoreBackupCollection-WithIncremental [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_1_Table [GOOD] >> TPQTabletTests::Multiple_PQTablets_2 [GOOD] >> TPQTabletTests::Single_PQTablet_And_Multiple_Partitions [GOOD] >> TListAllTopicsTests::PlainList >> TxUsage::Sinks_Oltp_WriteToTopic_1_Query [GOOD] >> TPQTest::TestSeveralOwners >> TPQTestInternal::TestPartitionedBigTest [GOOD] >> TPQTestInternal::TestToHex [GOOD] >> TPQUserInfoTest::UserDataDeprecatedSerializaion [GOOD] >> TPQUtilsTest::TLastCounter [GOOD] >> TPQTabletTests::ProposeTx_Unknown_Partition_1 >> TxUsage::ReadRuleGeneration [GOOD] >> TPQTabletTests::One_Tablet_For_All_Partitions >> TPQTabletTests::ProposeTx_Unknown_Partition_1 [GOOD] >> TCacheTest::CheckSystemViewAccess >> IncrementalBackup::E2EBackupCollection [FAIL] >> TPQTabletTests::One_Tablet_For_All_Partitions [GOOD] |78.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |78.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume >> TxUsage::Sinks_Oltp_WriteToTopic_2_Table >> TPQTabletTests::ProposeTx_Unknown_WriteId >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Table >> TCacheTest::CheckSystemViewAccess [GOOD] >> TCacheTest::CookiesArePreserved >> TPQTabletTests::One_New_Partition_In_Another_Tablet >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Table >> TPQTabletTests::One_New_Partition_In_Another_Tablet [GOOD] |78.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/unittest >> TPQUtilsTest::TLastCounter [GOOD] >> TPQTabletTests::ProposeTx_Unknown_WriteId [GOOD] >> TPQTabletTests::ProposeTx_Unknown_Partition_2 >> TCacheTest::CookiesArePreserved [GOOD] >> TPQTabletTests::Limit_On_The_Number_Of_Transactons >> TPQTabletTests::ProposeTx_Unknown_Partition_2 [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartNo_Table [GOOD] >> TTicketParserTest::BulkAuthorizationRetryError [GOOD] >> TTicketParserTest::BulkAuthorizationRetryErrorImmediately >> TTicketParserTest::AuthorizationRetryError [GOOD] >> TTicketParserTest::AuthorizationRetryErrorImmediately >> TPQTabletTests::Read_TEvTxCommit_After_Restart >> TBlobStorageProxyTest::TestBlockPersistence [GOOD] >> TBlobStorageProxyTest::TestCollectGarbage ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::CookiesArePreserved [GOOD] Test command err: 2025-06-30T09:16:52.133818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:16:52.133846Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-30T09:16:52.191532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2025-06-30T09:16:52.194410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-30T09:16:52.195531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-30T09:16:52.195668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TestModificationResult got TxId: 102, wait until txId: 102 2025-06-30T09:16:52.196554Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:216:2199], for# user1@builtin, access# DescribeSchema 2025-06-30T09:16:52.196658Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:222:2205], for# user1@builtin, access# 2025-06-30T09:16:52.351880Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:16:52.351912Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-30T09:16:52.376395Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2025-06-30T09:16:52.377803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-30T09:16:52.378865Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 >> TPQTabletTests::Read_TEvTxCommit_After_Restart [GOOD] >> TPQTabletTests::Limit_On_The_Number_Of_Transactons [GOOD] >> TPQTabletTests::Non_Kafka_Transaction_Supportive_Partitions_Should_Not_Be_Deleted_After_Timeout >> TxUsage::WriteToTopic_Demo_22_RestartNo_Query >> TPQTest::TestSeveralOwners [GOOD] >> TPQTest::TestSourceIdDropByUserWrites >> TPQTabletTests::TEvReadSet_For_A_Non_Existent_Tablet [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryError [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryErrorImmediately >> TPQTabletTests::Non_Kafka_Transaction_Supportive_Partitions_Should_Not_Be_Deleted_After_Timeout [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/unittest >> TPQTabletTests::TEvReadSet_For_A_Non_Existent_Tablet [GOOD] Test command err: 2025-06-30T09:16:51.737740Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:16:51.738794Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:16:51.738869Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037927937] doesn't have tx info 2025-06-30T09:16:51.738889Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:16:51.738895Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-30T09:16:51.738901Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:16:51.738910Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:51.738921Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-30T09:16:51.742617Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:182:2195], now have 1 active actors on pipe 2025-06-30T09:16:51.742649Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-30T09:16:51.745455Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-30T09:16:51.746220Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-30T09:16:51.746253Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:51.746574Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-30T09:16:51.746608Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:16:51.746646Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:16:51.746725Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:16:51.746845Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:190:2201] 2025-06-30T09:16:51.747060Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-30T09:16:51.747067Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:190:2201] 2025-06-30T09:16:51.747077Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:16:51.747181Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-30T09:16:51.747202Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-30T09:16:51.747208Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-30T09:16:51.747257Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:16:51.747297Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:16:51.747360Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:16:51.747411Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:16:51.747445Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:192:2203] 2025-06-30T09:16:51.747609Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:1:Initializer] Initializing completed. 2025-06-30T09:16:51.747614Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 2 [1:192:2203] 2025-06-30T09:16:51.747620Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 1, State: StateInit] SYNC INIT topic topic partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:16:51.747678Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-30T09:16:51.747689Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit request with generation 1 2025-06-30T09:16:51.747693Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit with generation 1 done 2025-06-30T09:16:51.747709Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:16:51.747725Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:16:51.747806Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-30T09:16:51.748690Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:16:51.748709Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:16:51.748739Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:16:51.748745Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-30T09:16:51.748820Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:205:2212], now have 1 active actors on pipe 2025-06-30T09:16:51.748936Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:208:2214], now have 1 active actors on pipe 2025-06-30T09:16:51.749148Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 181 RawX2: 4294969490 } TxId: 67890 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Operations { PartitionId: 1 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Immediate: false } 2025-06-30T09:16:51.749163Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3431: [PQ: 72057594037927937] distributed transaction 2025-06-30T09:16:51.749178Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3745: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-30T09:16:51.749185Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-30T09:16:51.749190Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-30T09:16:51.749196Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3979: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-30T09:16:51.749202Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-30T09:16:51.749211Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-30T09:16:51.749239Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 231 MaxStep: 30231 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Operations { PartitionId: 1 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 181 RawX2: 4294969490 } Partitions { } 2025-06-30T09:16:51.749255Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:16:51.750895Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:16:51.750928Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-30T09:16:51.750934Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PREPARING 2025-06-30T09:16:51.750940Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARED 2025-0 ... 8 ReadSet: "\010\001" Seqno: 0 2025-06-30T09:16:53.335023Z node 6 :PERSQUEUE DEBUG: transaction.cpp:274: [TxId: 67890] Handle TEvReadSet 2025-06-30T09:16:53.335032Z node 6 :PERSQUEUE DEBUG: transaction.cpp:291: [TxId: 67890] Predicates 1/1 2025-06-30T09:16:53.335046Z node 6 :PERSQUEUE DEBUG: pqtablet_mock.cpp:72: Connected to tablet 72057594037927937 from tablet 72057594037950158 2025-06-30T09:16:53.335510Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:16:53.335524Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PLANNING 2025-06-30T09:16:53.335529Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PLANNING 2025-06-30T09:16:53.335535Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PLANNED 2025-06-30T09:16:53.335540Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from PLANNING to PLANNED 2025-06-30T09:16:53.335546Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4466: [PQ: 72057594037927937] TxQueue.size 1 2025-06-30T09:16:53.335552Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:837: [PQ: 72057594037927937] New ExecStep 100, ExecTxId 67890 2025-06-30T09:16:53.335564Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState CALCULATING 2025-06-30T09:16:53.335569Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from PLANNED to CALCULATING 2025-06-30T09:16:53.335584Z node 6 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 100, TxId 67890 2025-06-30T09:16:53.335620Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3537: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCalcPredicateResult Step 100, TxId 67890, Partition 0, Predicate 1 2025-06-30T09:16:53.335626Z node 6 :PERSQUEUE DEBUG: transaction.cpp:218: [TxId: 67890] Handle TEvTxCalcPredicateResult 2025-06-30T09:16:53.335631Z node 6 :PERSQUEUE DEBUG: transaction.cpp:267: [TxId: 67890] Partition responses 1/1 2025-06-30T09:16:53.335636Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state CALCULATING 2025-06-30T09:16:53.335640Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State CALCULATING 2025-06-30T09:16:53.335646Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State CALCULATING FrontTxId 67890 2025-06-30T09:16:53.335653Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-30T09:16:53.335658Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState CALCULATED 2025-06-30T09:16:53.335663Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from CALCULATING to CALCULATED 2025-06-30T09:16:53.335668Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-30T09:16:53.335698Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: CALCULATED MinStep: 134 MaxStep: 30134 PredicatesReceived { TabletId: 72057594037950158 Predicate: true } PredicateRecipients: 72057594037950158 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 181 RawX2: 25769805970 } Partitions { } 2025-06-30T09:16:53.335711Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:16:53.337159Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:16:53.337173Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-06-30T09:16:53.337177Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State CALCULATED 2025-06-30T09:16:53.337182Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State CALCULATED FrontTxId 67890 2025-06-30T09:16:53.337188Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS 2025-06-30T09:16:53.337193Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from CALCULATED to WAIT_RS 2025-06-30T09:16:53.337204Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4027: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-30T09:16:53.337209Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4037: [PQ: 72057594037927937] Send TEvReadSet to tablet 72057594037950158 2025-06-30T09:16:53.337227Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4521: [PQ: 72057594037927937] HaveParticipantsDecision 1 2025-06-30T09:16:53.337237Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState EXECUTING 2025-06-30T09:16:53.337241Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from WAIT_RS to EXECUTING 2025-06-30T09:16:53.337246Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72057594037927937] Received 0, Expected 1 2025-06-30T09:16:53.337294Z node 6 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 100, TxId 67890 2025-06-30T09:16:53.337308Z node 6 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 67890 2025-06-30T09:16:53.337350Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:16:53.337554Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2931: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-30T09:16:53.337564Z node 6 :PERSQUEUE DEBUG: transaction.cpp:324: [TxId: 67890] Predicate acks 1/1 2025-06-30T09:16:53.337570Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state EXECUTING 2025-06-30T09:16:53.337575Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State EXECUTING 2025-06-30T09:16:53.337579Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State EXECUTING FrontTxId 67890 2025-06-30T09:16:53.337584Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72057594037927937] Received 0, Expected 1 2025-06-30T09:16:53.337590Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4366: [PQ: 72057594037927937] TxId 67890 status has not changed 2025-06-30T09:16:53.337664Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2931: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-30T09:16:53.337669Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2936: [PQ: 72057594037927937] Connected to tablet 72057594037950158 2025-06-30T09:16:53.338244Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:16:53.338265Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:16:53.338276Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3583: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCommitDone Step 100, TxId 67890, Partition 0 2025-06-30T09:16:53.338282Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state EXECUTING 2025-06-30T09:16:53.338286Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State EXECUTING 2025-06-30T09:16:53.338291Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State EXECUTING FrontTxId 67890 2025-06-30T09:16:53.338295Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-30T09:16:53.338303Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4224: [PQ: 72057594037927937] TxId: 67890 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-30T09:16:53.338309Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4555: [PQ: 72057594037927937] complete TxId 67890 2025-06-30T09:16:53.338315Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4573: [PQ: 72057594037927937] delete partitions for TxId 67890 2025-06-30T09:16:53.338319Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState EXECUTED 2025-06-30T09:16:53.338327Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from EXECUTING to EXECUTED 2025-06-30T09:16:53.338332Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-30T09:16:53.338373Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: EXECUTED MinStep: 134 MaxStep: 30134 PredicatesReceived { TabletId: 72057594037950158 Predicate: true } PredicateRecipients: 72057594037950158 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 181 RawX2: 25769805970 } Partitions { } 2025-06-30T09:16:53.338388Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:16:53.339017Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:16:53.339028Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state EXECUTED 2025-06-30T09:16:53.339033Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State EXECUTED 2025-06-30T09:16:53.339037Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State EXECUTED FrontTxId 67890 2025-06-30T09:16:53.339043Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72057594037927937] TPersQueue::SendEvReadSetAckToSenders 2025-06-30T09:16:53.339053Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4048: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSetAck {TEvReadSet step# 100 txid# 67890 TabletSource# 72057594037950158 TabletDest# 72057594037927937 SetTabletConsumer# 72057594037927937 Flags# 0 Seqno# 0} 2025-06-30T09:16:53.339060Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS_ACKS 2025-06-30T09:16:53.339064Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from EXECUTED to WAIT_RS_ACKS 2025-06-30T09:16:53.339070Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 1/1 2025-06-30T09:16:53.339074Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72057594037927937] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-30T09:16:53.339078Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 1/1 2025-06-30T09:16:53.339083Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72057594037927937] add an TxId 67890 to the list for deletion 2025-06-30T09:16:53.339089Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState DELETING 2025-06-30T09:16:53.339095Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72057594037927937] delete key for TxId 67890 2025-06-30T09:16:53.339104Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:16:53.339955Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:16:53.339968Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state DELETING 2025-06-30T09:16:53.339972Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State DELETING 2025-06-30T09:16:53.339977Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72057594037927937] delete TxId 67890 |78.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> TxUsage::WriteToTopic_Demo_13_Query [GOOD] >> TBlobStorageProxyTest::TestCollectGarbage [GOOD] >> VectorIndexBuildTestReboots::BaseCase+Prefixed[TabletReboots] >> TTicketParserTest::AuthenticationRetryError [GOOD] >> TTicketParserTest::AuthenticationRetryErrorImmediately >> TxUsage::WriteToTopic_Demo_14_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/unittest >> TPQTabletTests::Non_Kafka_Transaction_Supportive_Partitions_Should_Not_Be_Deleted_After_Timeout [GOOD] Test command err: 2025-06-30T09:16:51.481294Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:16:51.482098Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:16:51.482153Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037927937] doesn't have tx info 2025-06-30T09:16:51.482168Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:16:51.482172Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-30T09:16:51.482177Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:16:51.482184Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:51.482192Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-30T09:16:51.487252Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:210:2214], now have 1 active actors on pipe 2025-06-30T09:16:51.487283Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-30T09:16:51.489118Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-06-30T09:16:51.489764Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-06-30T09:16:51.489795Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:51.489944Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-06-30T09:16:51.489979Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:16:51.490060Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:16:51.490144Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:218:2220] 2025-06-30T09:16:51.490287Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-30T09:16:51.490296Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:218:2220] 2025-06-30T09:16:51.490303Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:16:51.490416Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-30T09:16:51.490432Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-30T09:16:51.490436Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-30T09:16:51.490440Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer reinit request with generation 1 2025-06-30T09:16:51.490443Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer reinit with generation 1 done 2025-06-30T09:16:51.490488Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:16:51.490495Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:16:51.490523Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:16:51.490580Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:16:51.491137Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:16:51.491158Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:16:51.491244Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:225:2225], now have 1 active actors on pipe 2025-06-30T09:16:51.491360Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:228:2227], now have 1 active actors on pipe 2025-06-30T09:16:51.491504Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 181 RawX2: 4294969490 } TxId: 67890 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-06-30T09:16:51.491513Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3431: [PQ: 72057594037927937] distributed transaction 2025-06-30T09:16:51.491531Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3745: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-30T09:16:51.491538Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-30T09:16:51.491542Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-30T09:16:51.491547Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3979: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-30T09:16:51.491551Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-30T09:16:51.491558Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-30T09:16:51.491579Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 134 MaxStep: 30134 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 181 RawX2: 4294969490 } Partitions { } 2025-06-30T09:16:51.491592Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:16:51.492216Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:16:51.492232Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-30T09:16:51.492236Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PREPARING 2025-06-30T09:16:51.492241Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARED 2025-06-30T09:16:51.492284Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 181 RawX2: 4294969490 } TxId: 67891 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-06-30T09:16:51.492289Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3431: [PQ: 72057594037927937] distributed transaction 2025-06-30T09:16:51.492295Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3745: [PQ: 72057594037927937] Propose TxId 67891, WriteId (empty maybe) 2025-06-30T09:16:51.492298Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-30T09:16:51.492300Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67891, State UNKNOWN 2025-06-30T09:16:51.492303Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3979: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-30T09:16:51.492306Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67891, NewState PREPARING 2025-06-30T09:16:51.492309Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67891 2025-06-30T09:16:51.492326Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67891] save tx TxId: 67891 State: PREPARED MinStep: 136 MaxStep: 30136 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 181 RawX2: 4294969490 } Partitions { } 2025-06-30T09:16:51.492336Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE ... ion 0 user user reinit request with generation 6 2025-06-30T09:16:53.220114Z node 6 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 6 done 2025-06-30T09:16:53.220135Z node 6 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:16:53.220162Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:16:53.220184Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:16:53.220592Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:16:53.220610Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:16:53.220660Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [6:197:2206], now have 1 active actors on pipe 2025-06-30T09:16:53.220745Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [6:200:2208], now have 1 active actors on pipe 2025-06-30T09:16:53.220755Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-30T09:16:53.220760Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-30T09:16:53.220767Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2729: [PQ: 72057594037927937] partition {0, {0, 3}, 100000} for WriteId {0, 3} 2025-06-30T09:16:53.220787Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3630: [PQ: 72057594037927937] send TEvSubscribeLock for WriteId {0, 3} 2025-06-30T09:16:53.220803Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:16:53.221149Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:16:53.221231Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:16:53.221275Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:16:53.221303Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateInit] bootstrapping {0, {0, 3}, 100000} [6:206:2213] 2025-06-30T09:16:53.221407Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitDiskStatusStep 2025-06-30T09:16:53.221567Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitMetaStep 2025-06-30T09:16:53.221591Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitInfoRangeStep 2025-06-30T09:16:53.221628Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitDataRangeStep 2025-06-30T09:16:53.221655Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitDataStep 2025-06-30T09:16:53.221658Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-30T09:16:53.221662Z node 6 :PERSQUEUE INFO: partition_init.cpp:895: [topic:{0, {0, 3}, 100000}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:16:53.221665Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:{0, {0, 3}, 100000}:Initializer] Initializing completed. 2025-06-30T09:16:53.221670Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateInit] init complete for topic 'topic' partition {0, {0, 3}, 100000} generation 2 [6:206:2213] 2025-06-30T09:16:53.221675Z node 6 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateInit] SYNC INIT topic topic partitition {0, {0, 3}, 100000} so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:16:53.221680Z node 6 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] Process pending events. Count 0 2025-06-30T09:16:53.221725Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] no data for compaction 2025-06-30T09:16:53.221750Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie -=[ 0wn3r ]=-|3d79aa93-8f7c3d79-e7269131-bfaf1674_0 generated for partition {0, {0, 3}, 100000} topic 'topic' owner -=[ 0wn3r ]=- 2025-06-30T09:16:53.221763Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] TPartition::ReplyOwnerOk. Partition: {0, {0, 3}, 100000} 2025-06-30T09:16:53.221772Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 4 2025-06-30T09:16:53.221818Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037927937] server disconnected, pipe [6:200:2208] destroyed 2025-06-30T09:16:53.221826Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:16:53.221846Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [6:218:2220], now have 1 active actors on pipe 2025-06-30T09:16:53.221897Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-30T09:16:53.221901Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-30T09:16:53.221905Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2729: [PQ: 72057594037927937] partition {0, {0, 0}, 100001} for WriteId {0, 0} 2025-06-30T09:16:53.221921Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:16:53.222288Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:16:53.222358Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:16:53.222396Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:16:53.222421Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateInit] bootstrapping {0, {0, 0}, 100001} [6:225:2225] 2025-06-30T09:16:53.222512Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitDiskStatusStep 2025-06-30T09:16:53.222644Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitMetaStep 2025-06-30T09:16:53.222676Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitInfoRangeStep 2025-06-30T09:16:53.222702Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitDataRangeStep 2025-06-30T09:16:53.222720Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitDataStep 2025-06-30T09:16:53.222723Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-30T09:16:53.222726Z node 6 :PERSQUEUE INFO: partition_init.cpp:895: [topic:{0, {0, 0}, 100001}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:16:53.222728Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:{0, {0, 0}, 100001}:Initializer] Initializing completed. 2025-06-30T09:16:53.222733Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateInit] init complete for topic 'topic' partition {0, {0, 0}, 100001} generation 2 [6:225:2225] 2025-06-30T09:16:53.222756Z node 6 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateInit] SYNC INIT topic topic partitition {0, {0, 0}, 100001} so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:16:53.222762Z node 6 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] Process pending events. Count 0 2025-06-30T09:16:53.222797Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] no data for compaction 2025-06-30T09:16:53.222816Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie -=[ 0wn3r ]=-|adb302c3-e71c488b-191b8352-d2952d74_0 generated for partition {0, {0, 0}, 100001} topic 'topic' owner -=[ 0wn3r ]=- 2025-06-30T09:16:53.222826Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] TPartition::ReplyOwnerOk. Partition: {0, {0, 0}, 100001} 2025-06-30T09:16:53.222851Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 4 2025-06-30T09:16:53.222932Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:5262: [PQ: 72057594037927937] send TEvPQ::TEvDeletePartition to partition {0, {0, 0}, 100001} 2025-06-30T09:16:53.222945Z node 6 :PERSQUEUE DEBUG: partition.cpp:3863: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] Handle TEvPQ::TEvDeletePartition 2025-06-30T09:16:53.223010Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:16:53.223015Z node 6 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from D0000100001(+) to D0000100002(-) 2025-06-30T09:16:53.223100Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] no data for compaction 2025-06-30T09:16:53.223108Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] no data for compaction 2025-06-30T09:16:53.223156Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1382: [PQ: 72057594037927937] Topic 'topic' counters. CacheSize 0 CachedBlobs 0 2025-06-30T09:16:53.223176Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:5196: [PQ: 72057594037927937] Handle TEvPQ::TEvDeletePartitionDone {0, {0, 0}, 100001} 2025-06-30T09:16:53.223183Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4647: [PQ: 72057594037927937] delete WriteId {0, 0} 2025-06-30T09:16:53.223192Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:16:53.223522Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:16:53.397473Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:16:53.407724Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] no data for compaction |78.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestCollectGarbage [GOOD] |78.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |78.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots >> TTicketParserTest::BulkAuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::BulkAuthorization |78.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |78.7%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |78.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> TTicketParserTest::AuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::AuthorizationWithRequiredPermissions |78.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> TTicketParserTest::BulkAuthorization [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount2 >> VectorIndexBuildTestReboots::BaseCase-Prefixed[PipeResets] |78.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |78.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service >> TTicketParserTest::AuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount >> TTicketParserTest::NebiusAuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::NebiusAuthorization ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::ComplexRestoreBackupCollection-WithIncremental [GOOD] Test command err: 2025-06-30T09:16:41.209143Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:16:41.209239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:16:41.209259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048de/r3tmp/tmpMMMqVz/pdisk_1.dat 2025-06-30T09:16:41.319210Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:16:41.319596Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:560:2485], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.319610Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.319616Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:16:41.319630Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:557:2483], Recipient [1:375:2369]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-30T09:16:41.319634Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:16:41.341736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-30T09:16:41.341827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.341891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-30T09:16:41.341901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-30T09:16:41.341982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:16:41.342001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:41.342021Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.342270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:41.342310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:16:41.342319Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.342325Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-30T09:16:41.342371Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.342379Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.342392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.342402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-30T09:16:41.342410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:41.342417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:41.342445Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.342524Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.342530Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-30T09:16:41.342548Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.342553Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.342558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.342563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:16:41.342568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:41.342578Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.342650Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.342655Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-30T09:16:41.342671Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.342676Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.342682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.342687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.342693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-30T09:16:41.342697Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.342704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:41.343440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:41.343586Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.343617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:41.343664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:16:41.343932Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877760, Sender [1:565:2490], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:567:2491] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-30T09:16:41.343942Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5055: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-30T09:16:41.343949Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5794: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-30T09:16:41.343987Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269091328, Sender [1:371:2365], Recipient [1:375:2369]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-30T09:16:41.344062Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:569:2493], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.344068Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.344072Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:16:41.344103Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124996, Sender [1:557:2483], Recipient [1:375:2369]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-30T09:16:41.344109Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4973: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-30T09:16:41.344124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:41.344130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:16:41.344135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:41.361469Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 273285138, Sender [1:45:2092], Recipient [1:375:2369]: ... meshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 6/7 2025-06-30T09:16:51.463957Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:6 progress is 6/7 2025-06-30T09:16:51.463959Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 6/7 2025-06-30T09:16:51.463962Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 6/7, is published: true 2025-06-30T09:16:51.463982Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [3:374:2368], Recipient [3:374:2368]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:51.463985Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:51.463988Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715668:4, at schemeshard: 72057594046644480 2025-06-30T09:16:51.463992Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715668:4 ProgressState 2025-06-30T09:16:51.463997Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:51.463999Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:4 progress is 7/7 2025-06-30T09:16:51.464002Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 7/7 2025-06-30T09:16:51.464007Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:4 progress is 7/7 2025-06-30T09:16:51.464009Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 7/7 2025-06-30T09:16:51.464012Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 7/7, is published: true 2025-06-30T09:16:51.464021Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:1198:2905] message: TxId: 281474976715668 2025-06-30T09:16:51.464025Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 7/7 2025-06-30T09:16:51.464031Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:0 2025-06-30T09:16:51.464034Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715668:0 2025-06-30T09:16:51.464041Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 2 2025-06-30T09:16:51.464044Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:1 2025-06-30T09:16:51.464046Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715668:1 2025-06-30T09:16:51.464055Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 2 2025-06-30T09:16:51.464058Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:2 2025-06-30T09:16:51.464060Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715668:2 2025-06-30T09:16:51.464063Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-30T09:16:51.464066Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:3 2025-06-30T09:16:51.464068Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715668:3 2025-06-30T09:16:51.464081Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 3 2025-06-30T09:16:51.464084Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 3 2025-06-30T09:16:51.464087Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:4 2025-06-30T09:16:51.464089Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715668:4 2025-06-30T09:16:51.464095Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 21] was 3 2025-06-30T09:16:51.464098Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 3 2025-06-30T09:16:51.464101Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:5 2025-06-30T09:16:51.464105Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715668:5 2025-06-30T09:16:51.464111Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 3 2025-06-30T09:16:51.464113Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 3 2025-06-30T09:16:51.464116Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:6 2025-06-30T09:16:51.464118Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715668:6 2025-06-30T09:16:51.464125Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 23] was 3 2025-06-30T09:16:51.464127Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 16] was 3 2025-06-30T09:16:51.464186Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:51.464203Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:51.464218Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:51.464229Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:51.464236Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:1198:2905] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715668 at schemeshard: 72057594046644480 2025-06-30T09:16:51.464306Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [3:1205:2911], Recipient [3:374:2368]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:16:51.464310Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:16:51.464313Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:16:51.479464Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037893, clientId# [3:1490:3136], serverId# [3:1491:3137], sessionId# [0:0:0] 2025-06-30T09:16:51.479514Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jz01zzc9dgrsrzdn6gxq0cj4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MmM3YmUyMGQtYWU4MGUyZDctZWRmYTExNC04NTkyMDQ5MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }, { items { uint32_value: 5 } items { uint32_value: 50 } } 2025-06-30T09:16:51.496187Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037895, clientId# [3:1519:3153], serverId# [3:1520:3154], sessionId# [0:0:0] 2025-06-30T09:16:51.496240Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jz01zzcsc1zkx1axx5yc14ke, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjdhNWQxM2EtOGVkNjNiYi1jODBlMzM4NC1hMjNhMzAzYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 11 } items { uint32_value: 101 } }, { items { uint32_value: 21 } items { uint32_value: 201 } }, { items { uint32_value: 31 } items { uint32_value: 301 } }, { items { uint32_value: 41 } items { uint32_value: 401 } }, { items { uint32_value: 51 } items { uint32_value: 501 } } 2025-06-30T09:16:51.516100Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037892, clientId# [3:1548:3170], serverId# [3:1549:3171], sessionId# [0:0:0] 2025-06-30T09:16:51.516164Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jz01zzd9b5mspkzpcz5d41em, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzU4YzgyNWUtNzA2NzUxYmUtMTU1Y2ViMzctMTRhZTcyY2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 12 } items { uint32_value: 102 } }, { items { uint32_value: 22 } items { uint32_value: 202 } }, { items { uint32_value: 32 } items { uint32_value: 302 } }, { items { uint32_value: 42 } items { uint32_value: 402 } }, { items { uint32_value: 52 } items { uint32_value: 502 } } 2025-06-30T09:16:51.534167Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [3:1577:3187], serverId# [3:1578:3188], sessionId# [0:0:0] 2025-06-30T09:16:51.534218Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jz01zzdy7ccnss0skf0z1xeq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmYzMDYyYzktZWFmMmJiMGUtMmU0Y2JkYmMtMzIyOGZiZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 13 } items { uint32_value: 103 } }, { items { uint32_value: 23 } items { uint32_value: 203 } }, { items { uint32_value: 33 } items { uint32_value: 303 } }, { items { uint32_value: 43 } items { uint32_value: 403 } }, { items { uint32_value: 53 } items { uint32_value: 503 } } |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> TTicketParserTest::AuthorizationWithUserAccount2 [GOOD] >> TTicketParserTest::BulkAuthorizationModify >> TTicketParserTest::AuthorizationWithUserAccount [GOOD] >> TTicketParserTest::AuthorizationUnavailable >> TTicketParserTest::NebiusAuthorization [GOOD] >> TTicketParserTest::NebiusAuthorizationModify |78.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |78.8%| [LD] {RESULT} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> BasicUsage::ReadWithRestarts [GOOD] >> BasicUsage::ConflictingWrites ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::E2EBackupCollection [FAIL] Test command err: 2025-06-30T09:16:41.183892Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:16:41.183962Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:16:41.183975Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048fa/r3tmp/tmp9JcX9V/pdisk_1.dat 2025-06-30T09:16:41.284056Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:16:41.284455Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:560:2485], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.284467Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.284474Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:16:41.284488Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:557:2483], Recipient [1:375:2369]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-30T09:16:41.284494Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:16:41.306315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-30T09:16:41.306407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.306483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-30T09:16:41.306493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-30T09:16:41.306566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:16:41.306581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:41.306601Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.306880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:41.306929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:16:41.306937Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.306943Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-30T09:16:41.306994Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.307003Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.307018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.307028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-30T09:16:41.307036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:41.307042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:41.307069Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.307130Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.307135Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-30T09:16:41.307154Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.307158Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.307164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.307170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:16:41.307175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:41.307186Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.307236Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.307240Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-30T09:16:41.307255Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.307259Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:41.307265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.307270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:16:41.307276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-30T09:16:41.307280Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:41.307287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:41.308043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:41.308131Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:41.308142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:41.308200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:16:41.308484Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877760, Sender [1:565:2490], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:567:2491] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-30T09:16:41.308494Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5055: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-30T09:16:41.308502Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5794: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-30T09:16:41.308537Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269091328, Sender [1:371:2365], Recipient [1:375:2369]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-30T09:16:41.308613Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:569:2493], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.308621Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:16:41.308626Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:16:41.308660Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124996, Sender [1:557:2483], Recipient [1:375:2369]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-30T09:16:41.308667Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4973: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-30T09:16:41.308681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:41.308688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:16:41.308694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-30T09:16:41.325857Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 273285138, Sender [1:45:2092], Recipient [1:375:2369]: ... d 281474976715668:0 240 -> 240 2025-06-30T09:16:51.201441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCreateRestoreOpControlPlane::TWaitCopyTableBarrier operationId: 281474976715668:2 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 281474976715668 Name: CopyTableBarrier }, at tablet# 72057594046644480 2025-06-30T09:16:51.201447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715668:2 1 -> 240 2025-06-30T09:16:51.201569Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:51.201577Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715668:0 2025-06-30T09:16:51.201582Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715668:2 2025-06-30T09:16:51.201610Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:51.201616Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:51.201624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-30T09:16:51.201632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715668:0 ProgressState 2025-06-30T09:16:51.201646Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:51.201653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:0 progress is 2/3 2025-06-30T09:16:51.201658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 2/3 2025-06-30T09:16:51.201663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:0 progress is 2/3 2025-06-30T09:16:51.201667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 2/3 2025-06-30T09:16:51.201673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 2/3, is published: true 2025-06-30T09:16:51.201723Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:16:51.201729Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:16:51.201738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715668:2, at schemeshard: 72057594046644480 2025-06-30T09:16:51.201744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715668:2 ProgressState 2025-06-30T09:16:51.201751Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:16:51.201756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:2 progress is 3/3 2025-06-30T09:16:51.201764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 3/3 2025-06-30T09:16:51.201770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:2 progress is 3/3 2025-06-30T09:16:51.201774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 3/3 2025-06-30T09:16:51.201778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 3/3, is published: true 2025-06-30T09:16:51.201795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:1385:3054] message: TxId: 281474976715668 2025-06-30T09:16:51.201802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 3/3 2025-06-30T09:16:51.201811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:0 2025-06-30T09:16:51.201816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715668:0 2025-06-30T09:16:51.201852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 16] was 3 2025-06-30T09:16:51.201858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 3 2025-06-30T09:16:51.201865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:1 2025-06-30T09:16:51.201869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715668:1 2025-06-30T09:16:51.201874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 15] was 2 2025-06-30T09:16:51.201879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:2 2025-06-30T09:16:51.201883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715668:2 2025-06-30T09:16:51.201889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 9] was 3 2025-06-30T09:16:51.202052Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:51.202082Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:16:51.202094Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [1:1385:3054] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715668 at schemeshard: 72057594046644480 2025-06-30T09:16:51.202240Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [1:1392:3060], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:16:51.202246Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:16:51.202249Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:16:51.223937Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [1:1463:3114], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:16:51.223969Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:16:51.223975Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:16:51.223989Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [1:1467:3118], Recipient [1:375:2369]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:16:51.223993Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:16:51.223997Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:16:51.370018Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:16:51.370062Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:16:51.370124Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:375:2369], Recipient [1:375:2369]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:16:51.370132Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:16:51.513122Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037893, clientId# [1:1499:3146], serverId# [1:1500:3147], sessionId# [0:0:0] 2025-06-30T09:16:51.513244Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jz01zzd263v52mkv4y2y5tk5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTE2OWEyMzctYTBhNTE1NDgtZmQ4OWRiZDctM2NkNWY4, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } assertion failed at ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp:925, virtual void NKikimr::NTestSuiteIncrementalBackup::TTestCaseE2EBackupCollection::Execute_(NUnitTest::TTestContext &): (expected == actual) failed: ("{ items { uint32_value: 2 } items { uint32_value: 200 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }" != "{ items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }") , with diff: "{ items { uint32_value: (|1 } items { uint3)2(|_value:) (|10 )} (|}, { )items { uint32_value: 2(0| } items { uint32_value: 2)0 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }" TBackTrace::Capture()+28 (0x13DCD64C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+137 (0x13F87399) NKikimr::NTestSuiteIncrementalBackup::TTestCaseE2EBackupCollection::Execute_(NUnitTest::TTestContext&)+4673 (0x13CAA501) NKikimr::NTestSuiteIncrementalBackup::TCurrentTest::Execute()::'lambda'()::operator()() const+71 (0x13CAE427) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+126 (0x13F8924E) NKikimr::NTestSuiteIncrementalBackup::TCurrentTest::Execute()+421 (0x13CADC85) NUnitTest::TTestFactory::Execute()+803 (0x13F899C3) NUnitTest::RunMain(int, char**)+3021 (0x13F9B56D) ??+0 (0x7FF979052D90) __libc_start_main+128 (0x7FF979052E40) _start+41 (0x12CEE029) >> TTicketParserTest::BulkAuthorizationModify [GOOD] >> TTicketParserTest::NebiusAuthorizationModify [GOOD] >> TTicketParserTest::AuthenticationRetryErrorImmediately [GOOD] >> TTicketParserTest::AuthorizationUnavailable [GOOD] |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::BulkAuthorizationModify [GOOD] Test command err: 2025-06-30T09:16:40.364809Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668855510509394:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.364934Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043df/r3tmp/tmp8y1bJ6/pdisk_1.dat 2025-06-30T09:16:40.449246Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:40.451380Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521668855510509368:2080] 1751275000364474 != 1751275000364477 TServer::EnableGrpc on GrpcPort 17472, node 1 2025-06-30T09:16:40.466261Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.466275Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.466276Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.466315Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6545 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:16:40.515548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:40.517806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:16:40.519237Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-30T09:16:40.519259Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [12507f9fe5f0] Connect to grpc://localhost:8590 2025-06-30T09:16:40.519986Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [12507f9fe5f0] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-30T09:16:40.521674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.521699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.522609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:40.527610Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [12507f9fe5f0] Status 14 Service Unavailable 2025-06-30T09:16:40.527738Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-30T09:16:40.527753Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:40.527768Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-30T09:16:40.527870Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [12507f9fe5f0] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-30T09:16:40.528496Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [12507f9fe5f0] Status 14 Service Unavailable 2025-06-30T09:16:40.528573Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-30T09:16:40.528585Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:41.369498Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-30T09:16:41.369545Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-30T09:16:41.369625Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:41.369739Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [12507f9fe5f0] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-30T09:16:41.370871Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [12507f9fe5f0] Status 14 Service Unavailable 2025-06-30T09:16:41.370977Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-30T09:16:41.370985Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:43.370127Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-30T09:16:43.370171Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-30T09:16:43.370375Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [12507f9fe5f0] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-30T09:16:43.383139Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [12507f9fe5f0] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:43.383266Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-30T09:16:45.365493Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668855510509394:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:45.365545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:16:52.832247Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521668903878705296:2136];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043df/r3tmp/tmpUTpgrN/pdisk_1.dat 2025-06-30T09:16:52.842238Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:16:52.852389Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:52.852670Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521668903878705198:2080] 1751275012829529 != 1751275012829532 TServer::EnableGrpc on GrpcPort 28195, node 2 2025-06-30T09:16:52.868329Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:52.868344Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:52.868346Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:52.868398Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3767 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:16:52.943324Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:52.943372Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:52.943768Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at ... schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:55.739164Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:55.739318Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:55.744052Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:55.744080Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:55.744085Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:55.744102Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:55.744119Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(somewhere.sleep) 2025-06-30T09:16:55.744125Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.list) 2025-06-30T09:16:55.744130Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-30T09:16:55.744137Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.eat) 2025-06-30T09:16:55.744169Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [12507f823670] Connect to grpc://localhost:21599 2025-06-30T09:16:55.744909Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [12507f823670] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:55.745844Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [12507f823670] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "somewhere.sleep" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:55.745921Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [12507f823670] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.list" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:55.745981Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [12507f823670] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:55.746053Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [12507f823670] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.eat" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:55.747998Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [12507f823670] Status 16 Access Denied 2025-06-30T09:16:55.748069Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.list now has a permanent error "Access Denied" retryable:0 2025-06-30T09:16:55.748198Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [12507f823670] Status 16 Access Denied 2025-06-30T09:16:55.748244Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.read now has a permanent error "Access Denied" retryable:0 2025-06-30T09:16:55.748257Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [12507f823670] Status 16 Access Denied 2025-06-30T09:16:55.748268Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.eat now has a permanent error "Access Denied" retryable:0 2025-06-30T09:16:55.748344Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [12507f823670] Status 16 Access Denied 2025-06-30T09:16:55.748378Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission somewhere.sleep now has a permanent error "Access Denied" retryable:0 2025-06-30T09:16:55.748642Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [12507f823670] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:55.748696Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-06-30T09:16:55.748706Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-30T09:16:55.748991Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [12507c5f48b0] Connect to grpc://localhost:27550 2025-06-30T09:16:55.749119Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [12507c5f48b0] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-30T09:16:55.751114Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [12507c5f48b0] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-30T09:16:55.751222Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-30T09:16:56.115104Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521668921884429132:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:56.115129Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043df/r3tmp/tmpTTfqAc/pdisk_1.dat 2025-06-30T09:16:56.125765Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20452, node 5 2025-06-30T09:16:56.137801Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:56.137820Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:56.137822Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:56.137876Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12292 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:56.218485Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:56.218522Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:56.218869Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:56.219569Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:56.221101Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:56.221126Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:56.221128Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:56.221165Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read) 2025-06-30T09:16:56.221186Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [12507bc70e30] Connect to grpc://localhost:14207 2025-06-30T09:16:56.221390Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [12507bc70e30] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-30T09:16:56.223266Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [12507bc70e30] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:56.223338Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:56.223507Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:56.223516Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:56.223517Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:56.223526Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-30T09:16:56.223566Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [12507bc70e30] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-30T09:16:56.223980Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [12507bc70e30] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:56.224037Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::AuthenticationRetryErrorImmediately [GOOD] Test command err: 2025-06-30T09:16:40.352172Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668851702837646:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.352200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043e9/r3tmp/tmp5rvSJH/pdisk_1.dat 2025-06-30T09:16:40.428049Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29703, node 1 2025-06-30T09:16:40.447084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.447115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.447118Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.447178Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:40.454233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.454264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.455422Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16236 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:40.500079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:40.503796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:40.506250Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-30T09:16:40.506279Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [531e7e5f10f0] Connect to grpc://localhost:15665 2025-06-30T09:16:40.506942Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [531e7e5f10f0] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-30T09:16:40.514850Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [531e7e5f10f0] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:40.515046Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:40.959357Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521668855470996280:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.959390Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043e9/r3tmp/tmpvULa8W/pdisk_1.dat 2025-06-30T09:16:40.980365Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:40.980463Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521668855470996256:2080] 1751275000959224 != 1751275000959227 TServer::EnableGrpc on GrpcPort 29372, node 2 2025-06-30T09:16:40.991454Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.991470Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.991472Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.991524Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23682 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.064174Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.064206Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.064702Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:41.065485Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:16:41.067162Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:41.070452Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket ApiK****alid (AB5B5EA8) asking for AccessServiceAuthentication 2025-06-30T09:16:41.070477Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [531e7cf0a070] Connect to grpc://localhost:26289 2025-06-30T09:16:41.070679Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [531e7cf0a070] Request AuthenticateRequest { api_key: "ApiK****alid (AB5B5EA8)" } 2025-06-30T09:16:41.073758Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [531e7cf0a070] Response AuthenticateResponse { subject { user_account { id: "ApiKey-value-valid" } } } 2025-06-30T09:16:41.073877Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket ApiK****alid (AB5B5EA8) () has now valid token of ApiKey-value-valid@as 2025-06-30T09:16:41.521970Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521668857286823739:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.522002Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043e9/r3tmp/tmpaTel66/pdisk_1.dat 2025-06-30T09:16:41.539284Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:41.544141Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521668857286823720:2080] 1751275001521831 != 1751275001521834 TServer::EnableGrpc on GrpcPort 2025, node 3 2025-06-30T09:16:41.557677Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:41.557687Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:41.557690Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:41.557727Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30262 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.628704Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.628740Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.629116Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomai ... N: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:42.025431Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16329 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:42.094615Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:42.094653Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:42.095513Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:42.095647Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:16:42.098192Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-30T09:16:42.098239Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [531e7ce66330] Connect to grpc://localhost:18723 2025-06-30T09:16:42.098674Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [531e7ce66330] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-30T09:16:42.103334Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [531e7ce66330] Status 14 Service Unavailable 2025-06-30T09:16:42.103419Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:42.103429Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-30T09:16:42.103531Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [531e7ce66330] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-30T09:16:42.104190Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [531e7ce66330] Status 14 Service Unavailable 2025-06-30T09:16:42.104250Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:42.990363Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-30T09:16:42.990381Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-30T09:16:42.990448Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [531e7ce66330] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-30T09:16:42.990898Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:42.991519Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [531e7ce66330] Status 14 Service Unavailable 2025-06-30T09:16:42.991583Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:43.990851Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-30T09:16:43.990874Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-30T09:16:43.990997Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [531e7ce66330] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-30T09:16:43.995335Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [531e7ce66330] Status 14 Service Unavailable 2025-06-30T09:16:43.995534Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:45.991737Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-30T09:16:45.991759Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-30T09:16:45.991936Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [531e7ce66330] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-30T09:16:45.994996Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [531e7ce66330] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:45.995104Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-30T09:16:46.990840Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7521668859868077478:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:46.991768Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:16:54.389223Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521668912605968384:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:54.389252Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043e9/r3tmp/tmpUg0l0g/pdisk_1.dat 2025-06-30T09:16:54.402817Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:54.403122Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7521668912605968354:2080] 1751275014389038 != 1751275014389041 TServer::EnableGrpc on GrpcPort 19634, node 5 2025-06-30T09:16:54.415984Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:54.415998Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:54.416001Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:54.416071Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25136 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:54.493428Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:54.493469Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:54.493976Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:54.494728Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:16:54.511292Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:54.519651Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-30T09:16:54.519702Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [531e7ce08330] Connect to grpc://localhost:16786 2025-06-30T09:16:54.519933Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [531e7ce08330] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-30T09:16:54.525898Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [531e7ce08330] Status 14 Service Unavailable 2025-06-30T09:16:54.526874Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:54.526895Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-30T09:16:54.526990Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [531e7ce08330] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-30T09:16:54.529089Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [531e7ce08330] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:54.529214Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-30T09:16:55.391552Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |78.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |78.8%| [LD] {RESULT} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/engine/ut/ydb-core-engine-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAuthorizationModify [GOOD] Test command err: 2025-06-30T09:16:40.324051Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668853755691690:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.324094Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043da/r3tmp/tmpbxyTtz/pdisk_1.dat 2025-06-30T09:16:40.395771Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:40.397704Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521668853755691666:2080] 1751275000323915 != 1751275000323918 TServer::EnableGrpc on GrpcPort 61373, node 1 2025-06-30T09:16:40.427045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.427079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.428148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:40.428700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.428707Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.428720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.428754Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65404 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:16:40.472111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:40.474544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:40.475877Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:40.475890Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:40.475893Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:40.476201Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-30T09:16:40.476224Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [53833e3f08b0] Connect to grpc://localhost:15164 2025-06-30T09:16:40.476695Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [53833e3f08b0] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response 14: "Service Unavailable" 2025-06-30T09:16:40.481200Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [53833e3f08b0] Status 14 Service Unavailable 2025-06-30T09:16:40.481291Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:40.481297Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-30T09:16:40.481333Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [53833e3f08b0] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-30T09:16:40.482629Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [53833e3f08b0] Status 1 CANCELLED 2025-06-30T09:16:40.482685Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' 2025-06-30T09:16:40.955298Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521668853818187581:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.955323Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043da/r3tmp/tmps78VDh/pdisk_1.dat 2025-06-30T09:16:40.971342Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4723, node 2 2025-06-30T09:16:40.985541Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.985555Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.985557Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.985602Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22385 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.059925Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.059965Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.060435Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:41.061630Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:41.062661Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:41.063733Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:41.063746Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:41.063749Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:41.063770Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-30T09:16:41.063783Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [53833ceca5f0] Connect to grpc://localhost:28041 2025-06-30T09:16:41.064101Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [53833ceca5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-06-30T09:16:41.068326Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [53833ceca5f0] Status 14 Service Unavailable 2025-06-30T09:16:41.068402Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-30T09:16:41.068419Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:41.068433Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-30T09:16:41.068527Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [53833ceca5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-06-30T09:16:41.069199Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [53833ceca5f0] Status 14 Service Unavailable 2025-06-30T09:16:41.069274Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-30T09:16:41.069284Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now ... db , DomainLoginOnly 1 2025-06-30T09:16:55.874674Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:55.874676Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:55.874681Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-30T09:16:55.874713Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [53833ce3c5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "XXXXXXXX" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "XXXXXXXX" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-30T09:16:55.875115Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [53833ce3c5f0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-30T09:16:55.875180Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:55.875268Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:55.875276Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:55.875278Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:55.875283Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-30T09:16:55.875323Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [53833ce3c5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "XXXXXXXX" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "XXXXXXXX" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-30T09:16:55.875665Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [53833ce3c5f0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-30T09:16:55.875715Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:55.875785Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:55.875793Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:55.875795Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:55.875800Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( monitoring.view) 2025-06-30T09:16:55.875828Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [53833ce3c5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "monitoring.view" } container_id: "folder" iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "monitoring.view" } container_id: "folder" iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-30T09:16:55.876323Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [53833ce3c5f0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-30T09:16:55.876368Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:56.211789Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521668921063648066:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:56.211965Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043da/r3tmp/tmp0seMTn/pdisk_1.dat 2025-06-30T09:16:56.222399Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:56.224460Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7521668921063648038:2080] 1751275016211604 != 1751275016211607 TServer::EnableGrpc on GrpcPort 16149, node 5 2025-06-30T09:16:56.237175Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:56.237189Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:56.237190Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:56.237228Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11839 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:56.315578Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:56.315614Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:56.316089Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:56.316681Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:56.318282Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:56.318296Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:56.318299Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:56.318326Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-30T09:16:56.318338Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [53833ddb30f0] Connect to grpc://localhost:26761 2025-06-30T09:16:56.318606Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [53833ddb30f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-30T09:16:56.320974Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [53833ddb30f0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-30T09:16:56.321072Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:56.321289Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:56.321299Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:56.321302Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:56.321313Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-30T09:16:56.321403Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [53833ddb30f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-30T09:16:56.322631Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [53833ddb30f0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { account { user_account { id: "user1" } } } } } 2025-06-30T09:16:56.322711Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::AuthorizationUnavailable [GOOD] Test command err: 2025-06-30T09:16:40.473605Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668854709896025:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.474616Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043c6/r3tmp/tmpULHuUF/pdisk_1.dat 2025-06-30T09:16:40.534240Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14368, node 1 2025-06-30T09:16:40.553043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.553058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.553060Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.553109Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19240 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:40.612065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.612099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.613110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:40.615249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:40.616499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:40.617878Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:40.617916Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [108b3effd930] Connect to grpc://localhost:64024 2025-06-30T09:16:40.618661Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3effd930] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:40.622880Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [108b3effd930] Status 14 Service Unavailable 2025-06-30T09:16:40.622978Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-30T09:16:40.622989Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:40.622996Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:40.623106Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3effd930] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:40.623711Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [108b3effd930] Status 14 Service Unavailable 2025-06-30T09:16:40.623768Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-30T09:16:40.623774Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:41.471060Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:41.473655Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-30T09:16:41.473668Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:41.473767Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3effd930] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:41.475797Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [108b3effd930] Status 14 Service Unavailable 2025-06-30T09:16:41.475841Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-30T09:16:41.475848Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:42.474207Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-30T09:16:42.474224Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:42.474296Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3effd930] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:42.475884Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [108b3effd930] Status 14 Service Unavailable 2025-06-30T09:16:42.475931Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-30T09:16:42.475945Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:44.475724Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-30T09:16:44.475748Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:44.475865Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3effd930] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:44.477546Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [108b3effd930] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:44.477600Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a valid subject "user1@as" 2025-06-30T09:16:44.477630Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-30T09:16:45.471740Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668854709896025:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:45.471795Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:16:52.901305Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521668903409953958:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:52.901329Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043c6/r3tmp/tmpjLp1s8/pdisk_1.dat 2025-06-30T09:16:52.922180Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:52.922582Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521668903409953939:2080] 1751275012901211 != 1751275012901214 TServer::EnableGrpc on GrpcPort 19695, node 2 2025-06-30T09:16:52.946998Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:52.947015Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:52.947017Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:52.947081Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5309 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType ... 55.881320Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [108b3d2c8b70] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:55.881382Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-30T09:16:55.881393Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-30T09:16:55.881677Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [108b3fddb930] Connect to grpc://localhost:6746 2025-06-30T09:16:55.881818Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3fddb930] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-30T09:16:55.883940Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [108b3fddb930] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-30T09:16:55.884072Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-30T09:16:55.884349Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:55.884358Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:55.884360Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:55.884366Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-30T09:16:55.884411Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3d2c8b70] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:55.890820Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [108b3d2c8b70] Status 16 Access Denied 2025-06-30T09:16:55.891028Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.write now has a permanent error "Access Denied" retryable:0 2025-06-30T09:16:55.891044Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'Access Denied' 2025-06-30T09:16:55.891401Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:55.891427Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:55.891431Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:55.891443Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:55.891454Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-30T09:16:55.891521Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3d2c8b70] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:55.891760Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3d2c8b70] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:55.894865Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [108b3d2c8b70] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:55.894967Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [108b3d2c8b70] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:55.895007Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-30T09:16:55.895057Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-06-30T09:16:55.895062Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-30T09:16:55.895129Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-30T09:16:56.205295Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521668924303309756:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:56.205320Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043c6/r3tmp/tmp5IPAoo/pdisk_1.dat 2025-06-30T09:16:56.219361Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12987, node 5 2025-06-30T09:16:56.230427Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:56.230443Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:56.230445Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:56.230498Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9411 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:56.309139Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:56.309186Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:56.309652Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:56.310179Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:16:56.312268Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:16:56.312286Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:56.312290Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:16:56.312302Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:56.312314Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-30T09:16:56.312329Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [108b3d298070] Connect to grpc://localhost:13850 2025-06-30T09:16:56.312538Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3d298070] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:56.313426Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3d298070] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:56.314945Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [108b3d298070] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-30T09:16:56.315027Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-30T09:16:56.315124Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [108b3d298070] Status 14 Service Unavailable 2025-06-30T09:16:56.315161Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.write now has a permanent error "Service Unavailable" retryable:1 2025-06-30T09:16:56.315174Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:56.315180Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-30T09:16:56.315198Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-30T09:16:56.315259Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3d298070] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:56.315447Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [108b3d298070] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-30T09:16:56.316110Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [108b3d298070] Status 1 CANCELLED 2025-06-30T09:16:56.316119Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [108b3d298070] Status 1 CANCELLED 2025-06-30T09:16:56.316169Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1415: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" 2025-06-30T09:16:56.316187Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.write now has a permanent error "CANCELLED" retryable:1 2025-06-30T09:16:56.316191Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' |78.8%| [TA] $(B)/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> TBSV::ShardsNotLeftInShardsToDelete >> TSchemeShardTestExtSubdomainReboots::AlterForceDrop-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardTestExtSubdomainReboots::AlterForceDrop-AlterDatabaseCreateHiveFirst-true |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::ShardsNotLeftInShardsToDelete [GOOD] >> TBSV::ShouldLimitBlockStoreVolumeDropRate >> TxUsage::WriteToTopic_Demo_19_RestartNo_Table [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::ShardsNotLeftInShardsToDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:16:57.400459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:57.400484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:57.400489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:57.400493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:57.400497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:57.400500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:57.400507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:57.400520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:57.400625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:57.400682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:57.413295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:16:57.413319Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:57.416590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:57.416659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:57.416705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:57.418321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:57.418378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:57.418477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:57.418528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:57.419491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:57.419542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:57.419831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:57.419846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:57.419876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:57.419885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:57.419893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:57.419920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:16:57.421584Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:16:57.438645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:57.438720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:57.438793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:57.438800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:57.438837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:57.438849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:57.439641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:57.439679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:57.439740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:57.439748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:57.439752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:57.439757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:57.440344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:57.440369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:57.440381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:57.440839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:57.440854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:57.440862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:57.440871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:57.441578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:57.442039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:57.442075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:57.442268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:57.442291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:57.442297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:57.442345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:16:57.442351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:57.442397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:57.442410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:16:57.442945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:57.442956Z node 1 :FLAT_TX_SCHEMESHARD ... 7594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:57.465809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_bsv.cpp:40: TDropBlockStoreVolume TPropose, operationId: 102:0 HandleReply TEvOperationPlan, step: 5000003, at schemeshard: 72057594046678944 2025-06-30T09:16:57.465839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:16:57.465866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:16:57.465872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:16:57.465878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:16:57.465883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:16:57.465893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:57.465906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:16:57.465913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-30T09:16:57.465921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:16:57.465927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:16:57.465931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:16:57.465955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:16:57.465962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-30T09:16:57.465967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-30T09:16:57.465971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-30T09:16:57.466288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-30T09:16:57.466303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-30T09:16:57.466498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:16:57.466508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-30T09:16:57.466668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:16:57.466678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:16:57.466701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:57.466707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:57.466768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:16:57.466798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:57.466805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-30T09:16:57.466811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:16:57.466941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:16:57.466955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:16:57.466960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:16:57.466965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-30T09:16:57.466971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:16:57.467038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:16:57.467044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:16:57.467055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:16:57.467098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:16:57.467108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:16:57.467113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:16:57.467118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-30T09:16:57.467123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:57.467131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-30T09:16:57.467188Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-06-30T09:16:57.467245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:57.467310Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 2025-06-30T09:16:57.467388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-30T09:16:57.467919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:16:57.468251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:16:57.468289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:16:57.468679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-30T09:16:57.468704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-30T09:16:57.468782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:16:57.468790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-30T09:16:57.468857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:16:57.468876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:16:57.468881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:397:2375] TestWaitNotification: OK eventTxId 102 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-06-30T09:16:57.468939Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-30T09:16:57.468964Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 { Type { Kind: Struct Struct { Member { Name: "ShardsToDelete" Type { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "List" Type { Kind: List List { Item { Kind: Struct Struct { Member { Name: "ShardIdx" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } Member { Name: "Truncated" Type { Kind: Data Data { Scheme: 6 } } } } } } } } } } Value { Struct { Optional { Struct { } Struct { Bool: false } } } } } >> TxUsage::WriteToTopic_Demo_19_RestartNo_Query >> TxUsage::WriteToTopic_Demo_20_RestartNo_Query [GOOD] >> TBSV::ShouldLimitBlockStoreVolumeDropRate [GOOD] >> TListAllTopicsTests::PlainList [GOOD] >> TListAllTopicsTests::RecursiveList |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TxUsage::WriteToTopic_Demo_21_RestartNo_Table >> TInterconnectTest::OldFormat >> KqpQueryService::TempTablesDrop >> KqpQueryService::TableSink_ReplaceFromSelectLargeOlap |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/describes_ut/unittest |78.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/describes_ut/unittest |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/describes_ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::ShouldLimitBlockStoreVolumeDropRate [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:16:57.880451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:57.880487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:57.880512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:57.880519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:57.880528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:57.880534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:57.880546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:57.880565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:57.880711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:57.880797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:57.898719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:16:57.898783Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:57.902732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:57.902838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:57.902885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:57.905003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:57.905088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:57.905236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:57.905325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:57.906171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:57.906224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:57.906547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:57.906563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:57.906594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:57.906605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:57.906612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:57.906636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:16:57.908270Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:16:57.931622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:57.931725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:57.931807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:57.931819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:57.931874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:57.931890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:57.932921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:57.932978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:57.933062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:57.933077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:57.933085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:57.933092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:57.933678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:57.933693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:57.933701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:57.934150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:57.934164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:57.934172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:57.934180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:57.935068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:57.935645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:57.935703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:57.935937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:57.935970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:57.935983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:57.936050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:16:57.936059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:57.936100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:57.936115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:16:57.936671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:57.936684Z node 1 :FLAT_TX_SCHEMESHARD ... 223769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 129, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 129 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 129 at step: 5000028 2025-06-30T09:16:58.223917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000028, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:58.223936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 129 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000028 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:58.223946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_bsv.cpp:40: TDropBlockStoreVolume TPropose, operationId: 129:0 HandleReply TEvOperationPlan, step: 5000028, at schemeshard: 72057594046678944 2025-06-30T09:16:58.223973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 2 2025-06-30T09:16:58.224001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#129:0 progress is 1/1 2025-06-30T09:16:58.224006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-30T09:16:58.224012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#129:0 progress is 1/1 2025-06-30T09:16:58.224016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-30T09:16:58.224025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:58.224034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 1 2025-06-30T09:16:58.224040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 129, ready parts: 1/1, is published: false 2025-06-30T09:16:58.224047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-30T09:16:58.224052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 129:0 2025-06-30T09:16:58.224057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 129:0 2025-06-30T09:16:58.224083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 2 2025-06-30T09:16:58.224092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 129, publications: 2, subscribers: 0 2025-06-30T09:16:58.224097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 1], 54 2025-06-30T09:16:58.224101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 13], 18446744073709551615 2025-06-30T09:16:58.224545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-06-30T09:16:58.224560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:24 tabletId 72075186233409569 2025-06-30T09:16:58.224577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 2025-06-30T09:16:58.224582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:23 tabletId 72075186233409568 2025-06-30T09:16:58.224933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:24 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:16:58.224945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:23 hive 72057594037968897 at ss 72057594046678944 FAKE_COORDINATOR: Erasing txId 129 2025-06-30T09:16:58.224987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:58.224993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:58.225038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 13] 2025-06-30T09:16:58.225064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:58.225069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 129, path id: 1 2025-06-30T09:16:58.225075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 129, path id: 13 2025-06-30T09:16:58.225192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 13 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 129 2025-06-30T09:16:58.225204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 13 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 129 2025-06-30T09:16:58.225210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 129 2025-06-30T09:16:58.225215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 13], version: 18446744073709551615 2025-06-30T09:16:58.225224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 1 2025-06-30T09:16:58.225286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:16:58.225292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 13], at schemeshard: 72057594046678944 2025-06-30T09:16:58.225301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:16:58.225337Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 24 TxId_Deprecated: 24 2025-06-30T09:16:58.225387Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 23 TxId_Deprecated: 23 2025-06-30T09:16:58.225420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 24 ShardOwnerId: 72057594046678944 ShardLocalIdx: 24, at schemeshard: 72057594046678944 2025-06-30T09:16:58.225486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 54 PathOwnerId: 72057594046678944, cookie: 129 2025-06-30T09:16:58.225494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 54 PathOwnerId: 72057594046678944, cookie: 129 2025-06-30T09:16:58.225499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 129 2025-06-30T09:16:58.225504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 54 2025-06-30T09:16:58.225508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:58.225519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 129, subscribers: 0 2025-06-30T09:16:58.225594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 23 ShardOwnerId: 72057594046678944 ShardLocalIdx: 23, at schemeshard: 72057594046678944 2025-06-30T09:16:58.225882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-30T09:16:58.226378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:16:58.226399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-06-30T09:16:58.226414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-30T09:16:58.226429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 TestModificationResult got TxId: 129, wait until txId: 129 TestWaitNotification wait txId: 129 2025-06-30T09:16:58.226567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 129: send EvNotifyTxCompletion 2025-06-30T09:16:58.226575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 129 2025-06-30T09:16:58.226680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 129, at schemeshard: 72057594046678944 2025-06-30T09:16:58.226700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 129: got EvNotifyTxCompletionResult 2025-06-30T09:16:58.226705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 129: satisfy waiter [1:1686:3554] TestWaitNotification: OK eventTxId 129 |78.8%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/{meta.json ... results_accumulator.log} |78.8%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut >> TInterconnectTest::OldFormat [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheckOnNew |78.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |78.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut >> TInterconnectTest::OldFormatSuppressVersionCheckOnNew [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheckOnOld |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut >> TInterconnectTest::TestConnectAndDisconnect >> KqpQueryService::TempTablesDrop [GOOD] >> KqpQueryService::Tcl >> TInterconnectTest::TestNotifyUndelivered >> TInterconnectTest::TestBlobEvent220BytesPreSerialized >> TInterconnectTest::OldFormatSuppressVersionCheckOnOld [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheck >> TInterconnectTest::TestConnectAndDisconnect [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalTypeYdb [GOOD] >> TInterconnectTest::TestBlobEventPreSerialized >> TMiniKQLProtoTestYdb::TestExportListTypeYdb >> TxUsage::WriteToTopic_Demo_27_Table [GOOD] >> TMiniKQLProtoTestYdb::TestExportListTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportIntegralYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportEmptyOptionalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalNotEmptyYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportListYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantNotNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNotNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalYdbType [GOOD] >> TInterconnectTest::TestNotifyUndelivered [GOOD] >> TInterconnectTest::TestNotifyUndeliveredOnMissedActor >> TMiniKQLEngineFlatTest::TestSelectRowWithoutColumnsNotExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowWithoutColumnsExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowPayload [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowPayloadNullKey [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeToInclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowManyShards [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitions [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByItems [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByBytes [GOOD] >> TMiniKQLEngineFlatTest::TestSomePushDown [GOOD] >> TMiniKQLEngineFlatTest::TestTakePushdown [GOOD] >> TMiniKQLEngineFlatTest::TestTopSortNonImmediatePushdown >> TxUsage::WriteToTopic_Demo_27_Query >> TxUsage::Sinks_Oltp_WriteToTopic_2_Table [GOOD] >> TMiniKQLEngineFlatTest::TestTopSortNonImmediatePushdown [GOOD] >> TMiniKQLProtoTestYdb::TestExportVoidTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportUuidTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportTupleTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportStructTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantTupleTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantStructTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVoidYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportStringYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportUuidYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportTupleYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportStructYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantYdb [GOOD] >> TInterconnectTest::TestBlobEvent220BytesPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizes >> TInterconnectTest::OldFormatSuppressVersionCheck [GOOD] >> TInterconnectTest::TestBlobEventPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventUpToMebibytes |78.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |78.8%| [LD] {RESULT} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalYdbType [GOOD] >> TInterconnectTest::TestNotifyUndeliveredOnMissedActor [GOOD] >> TInterconnectTest::TestPreSerializedBlobEventUpToMebibytes >> TxUsage::WriteToTopic_Demo_14_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_2_Query >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Table [GOOD] |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestTopSortNonImmediatePushdown [GOOD] >> KqpQueryService::Tcl [GOOD] >> KqpQueryService::TableSink_ReplaceFromSelectOlap >> TInterconnectTest::TestBlobEventDifferentSizes [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerialized >> TxUsage::WriteToTopic_Demo_14_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::OldFormatSuppressVersionCheck [GOOD] Test command err: 2025-06-30T09:16:58.983636Z node 4 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [4:22:2057] [node 3] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-30T09:16:59.396292Z node 5 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [5:20:2058] [node 6] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-30T09:16:59.804306Z node 8 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [8:22:2057] [node 7] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-30T09:16:59.804604Z node 7 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [7:20:2058] [node 8] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestExportVariantYdb [GOOD] >> TInterconnectTest::TestBlobEventUpToMebibytes [GOOD] >> TInterconnectTest::TestBlobEventsThroughSubChannels >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Query >> TInterconnectTest::TestPreSerializedBlobEventUpToMebibytes [GOOD] >> TInterconnectTest::TestPingPongThroughSubChannel >> TInterconnectTest::TestBlobEventDifferentSizesPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw |78.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |78.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots >> TxUsage::WriteToTopic_Demo_22_RestartNo_Query [GOOD] >> TInterconnectTest::TestBlobEventsThroughSubChannels [GOOD] >> TInterconnectTest::TestPingPongThroughSubChannel [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Table |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestBlobEventsThroughSubChannels [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw [GOOD] |78.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestPingPongThroughSubChannel [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistWithoutColumns [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistSetPayload [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistSetPayloadNullValue >> TMiniKQLEngineFlatTest::TestPureProgram [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFromInclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFromExclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromIncTo [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothExcFromIncTo [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromExcTo >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistSetPayloadNullValue [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistErasePayload [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowExistChangePayload [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowExistErasePayload [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowManyShards [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestTopSortPushdownPk [GOOD] >> TMiniKQLEngineFlatTest::TestTopSortPushdown [GOOD] >> TMiniKQLProgramBuilderTest::TestEraseRowDynamicKey [GOOD] >> TMiniKQLProgramBuilderTest::TestAcquireLocks [GOOD] >> TMiniKQLProgramBuilderTest::TestDiagnostics [GOOD] >> TTicketParserTest::LoginRefreshGroupsGood [GOOD] >> TTicketParserTest::LoginCheckRemovedUser >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromExcTo [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothExcFromExcTo [GOOD] >> TMiniKQLEngineFlatTest::TestMapsPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestNoOrderedTakePushdown [GOOD] >> TMiniKQLEngineFlatTest::TestNoAggregatedPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestNoPartialSortPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestMultiRSPerDestination [GOOD] >> TMiniKQLProgramBuilderTest::TestEraseRowStaticKey [GOOD] >> TMiniKQLProgramBuilderTest::TestEraseRowPartialDynamicKey [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectRow [GOOD] >> TMiniKQLProgramBuilderTest::TestUpdateRowDynamicKey [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectFromInclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectFromExclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectToInclusiveRange |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw [GOOD] >> KqpQueryService::TableSink_ReplaceFromSelectOlap [GOOD] >> TMiniKQLEngineFlatHostTest::ShardId [GOOD] >> TMiniKQLEngineFlatHostTest::Basic [GOOD] >> TMiniKQLEngineFlatTest::TestAbort [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Fail1 [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Fail2 [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Fail12 [GOOD] >> TMiniKQLEngineFlatTest::TestBug998 [GOOD] >> TMiniKQLEngineFlatTest::TestAcquireLocks [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownMultipleConsumers [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownNonPureLambda >> TMiniKQLProgramBuilderTest::TestSelectToInclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectToExclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectBothFromInclusiveToInclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectBothFromExclusiveToExclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestInvalidParameterName [GOOD] >> TMiniKQLProgramBuilderTest::TestInvalidParameterType [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownNonPureLambda [GOOD] >> TMiniKQLEngineFlatTest::NoOrderedMapPushdown [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownWriteToTable [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownArgClosure [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestMultiRSPerDestination [GOOD] Test command err: PrepareShardPrograms (491): too many shard readsets (2 > 1), src tables: [200:301:0], dst tables: [200:301:0] Type { Kind: Struct } |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/engine/ut/unittest >> TMiniKQLProgramBuilderTest::TestDiagnostics [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Table [GOOD] |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::NoMapPushdownArgClosure [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_ReplaceFromSelectOlap [GOOD] Test command err: Trying to start YDB, gRPC: 14443, MsgBus: 61409 2025-06-30T09:16:58.754901Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668932191599315:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:58.755116Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00417d/r3tmp/tmpCTXq9E/pdisk_1.dat 2025-06-30T09:16:58.847465Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:58.856456Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:58.856494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:58.857328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14443, node 1 2025-06-30T09:16:58.892576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:58.892589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:58.892592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:58.892675Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61409 TClient is connected to server localhost:61409 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:58.993313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:58.997493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:16:59.217538Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668936486567180:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:59.217536Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668936486567168:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:59.217561Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:59.218652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:59.221843Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521668936486567182:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:16:59.310070Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668936486567233:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:16:59.375838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:59.472474Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668936486567472:2458] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" severity: 1 } 2025-06-30T09:16:59.475404Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668936486567479:2463] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZjljZDU4NDQtNDk1NGRjZi1hY2IzZjA2Ny0zZjY4ZDY3\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" severity: 1 } 2025-06-30T09:16:59.487879Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-30T09:16:59.488165Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521668936486567524:2325], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:17: Error: At function: KiReadTable!
:3:17: Error: Cannot find table 'db.[/Root/test/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:16:59.488772Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZjljZDU4NDQtNDk1NGRjZi1hY2IzZjA2Ny0zZjY4ZDY3, ActorId: [1:7521668936486567164:2288], ActorState: ExecuteState, TraceId: 01jz02076watew8w45c16r9d7s, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:16:59.501479Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521668936486567543:2332], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:17: Error: At function: KiReadTable!
:3:17: Error: Cannot find table 'db.[/Root/test/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:16:59.501600Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NmRkZDQ3ZWItMTlhNzdkMC0zNWU2NjFmOC1mZGU2YTlkNQ==, ActorId: [1:7521668936486567539:2329], ActorState: ExecuteState, TraceId: 01jz02077aev91kgny8wb1pygb, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 18527, MsgBus: 10450 2025-06-30T09:16:59.610858Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521668935276044381:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:59.610882Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00417d/r3tmp/tmp4ej4uY/pdisk_1.dat 2025-06-30T09:16:59.622002Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18527, node 2 2025-06-30T09:16:59.636885Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:59.636897Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:59.636899Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:59.636943Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10450 TClient is connected to server localhost:10450 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:59.711159Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:59.711186Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:59.712258Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:59.714071Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at sc ... 0T09:17:01.594075Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:01.654952Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.654957Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.655026Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.655060Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.655075Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.655120Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.655150Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.655172Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.655221Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.655224Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.655365Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037915;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.655406Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.655518Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:17:01.656045Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=24;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037915; 2025-06-30T09:17:01.656081Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=25;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037915; 2025-06-30T09:17:01.656102Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=26;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037915; 2025-06-30T09:17:01.656117Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=27;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037909; 2025-06-30T09:17:01.656134Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=28;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037915; 2025-06-30T09:17:01.656154Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=29;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037915; 2025-06-30T09:17:01.656172Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=30;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037915; 2025-06-30T09:17:01.656192Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=31;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037909; 2025-06-30T09:17:01.656213Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=32;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037915; 2025-06-30T09:17:01.656227Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=33;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037909; 2025-06-30T09:17:01.656247Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=34;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037915; 2025-06-30T09:17:01.656268Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=35;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037915; 2025-06-30T09:17:01.656289Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=36;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037909; 2025-06-30T09:17:01.656306Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=37;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037915; 2025-06-30T09:17:01.656326Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=38;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037909; 2025-06-30T09:17:01.656339Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=39;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037915; 2025-06-30T09:17:01.656357Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=40;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037915; 2025-06-30T09:17:01.656372Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=41;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037909; 2025-06-30T09:17:01.656385Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=42;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037909; 2025-06-30T09:17:01.656401Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=43;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037909; 2025-06-30T09:17:01.656420Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=44;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037909; 2025-06-30T09:17:01.656441Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=45;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037909; 2025-06-30T09:17:01.656464Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=46;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037909; 2025-06-30T09:17:01.656510Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[3:7521668944869178014:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037910;local_tx_no=47;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903;receive=72075186224037909; 2025-06-30T09:17:01.656804Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/engine/ut/unittest >> TMiniKQLProgramBuilderTest::TestInvalidParameterType [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Query |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::ReassignGroupTest3dc >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains_and_one_small_node [GOOD] >> DSProxyCounters::PutGeneratedSubrequestBytes >> TBlobStorageProxySequenceTest::TestGivenBlock42MultiPut2ItemsStatuses [GOOD] >> TDSProxyGetTest::TestBlock42GetSpecific [GOOD] >> TDSProxyPatchTest::NaiveErrorOnGetItem_ErasureNone |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains_and_one_small_node [GOOD] >> TBlobStorageProxySequenceTest::TestGivenStripe42GetThenVGetResponsePartsNodata263451ThenGetOk >> TTicketParserTest::NebiusAuthenticationRetryError [GOOD] >> TTicketParserTest::NebiusAuthenticationRetryErrorImmediately >> TBlobStorageProxySequenceTest::TestGivenBlock42GetThenVGetResponseParts2523Nodata4ThenGetOk [GOOD] >> TDSProxyGetTest::TestBlock42GetIntervalsAllOk >> TDSProxyPatchTest::NaiveErrorOnGetItem_ErasureNone [GOOD] >> TDSProxyPatchTest::SecuredOk_ErasureMirror3dc >> DSProxyCounters::PutGeneratedSubrequestBytes [GOOD] >> TDSProxyFaultTolerancePatchTest::block42 >> TBlobStorageProxySequenceTest::TestGivenStripe42GetThenVGetResponsePartsNodata263451ThenGetOk [GOOD] >> TDSProxyGetTest::TestBlock42WipedErrorWithTwoBlobs |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TDSProxyPatchTest::SecuredOk_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_3_1_0_VdiskErrors >> TDSProxyGetTest::TestBlock42GetIntervalsAllOk [GOOD] >> TDSProxyPatchTest::MovedOk_ErasureNone >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_3_1_0_VdiskErrors [GOOD] |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TListAllTopicsTests::RecursiveList [GOOD] >> TListAllTopicsTests::ListLimitAndPaging >> TDSProxyPatchTest::MovedOk_ErasureNone [GOOD] >> TDSProxyPatchTest::SecuredErrorOnPut_ErasureMirror3dc |78.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |78.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |78.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_3_1_0_VdiskErrors [GOOD] Test command err: 2025-06-30T09:17:04.063227Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [3:83:2129] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-30T09:17:04.063292Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-30T09:17:04.063299Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-30T09:17:04.063304Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-30T09:17:04.063307Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-30T09:17:04.063311Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-30T09:17:04.063314Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-30T09:17:04.066081Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-30T09:17:04.066125Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-30T09:17:04.066132Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-30T09:17:04.066188Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:1:0] Marker# BPP01 2025-06-30T09:17:04.066194Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-30T09:17:04.066197Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-30T09:17:04.066209Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:1:0] Marker# BPP01 2025-06-30T09:17:04.066255Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-30T09:17:04.066261Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-30T09:17:04.066264Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-30T09:17:04.066272Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:2:0] Marker# BPP01 2025-06-30T09:17:04.066296Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-30T09:17:04.066302Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 3 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-30T09:17:04.066305Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 3 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-30T09:17:04.066308Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 8 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-30T09:17:04.066311Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 8 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-30T09:17:04.066344Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:0:0] Marker# BPP01 2025-06-30T09:17:04.066352Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:2:0] Marker# BPP01 2025-06-30T09:17:04.066366Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-30T09:17:04.066373Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-30T09:17:04.066417Z node 3 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.285 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.286 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.286 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 3.059 VDiskId# [0:1:0:1:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 3.099 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 3.14 VDiskId# [0:1:1:1:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 3.151 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 3.161 VDiskId# [0:1:2:1:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 3.207 VDiskId# [0:1:0:2:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 3.217 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 3.226 VDiskId# [0:1:1:2:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 3.249 VDiskId# [0:1:0:0:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 3.266 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:0:0] NodeId# 3 } TEvVPut{ TimestampMs# 3.267 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 3.296 VDiskId# [0:1:1:0:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 3.304 VDiskId# [0:1:2:2:0] NodeId# 3 Status# OK } ] } >> TDSProxyPatchTest::SecuredErrorOnPut_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_2_0_0_VdiskErrors >> TPartitionTests::DifferentWriteTxBatchingOptions [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_2_0_0_VdiskErrors [GOOD] >> TPartitionTests::FailedTxsDontBlock |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::CreateAssignAlterIsAllowed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_2_0_0_VdiskErrors [GOOD] Test command err: 2025-06-30T09:17:04.681959Z node 4 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [4:83:2129] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-30T09:17:04.682068Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-30T09:17:04.682078Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-30T09:17:04.682085Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-30T09:17:04.682091Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-30T09:17:04.682097Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-30T09:17:04.682102Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-30T09:17:04.686466Z node 4 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-30T09:17:04.686545Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-30T09:17:04.686556Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-30T09:17:04.686667Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:1:0] Marker# BPP01 2025-06-30T09:17:04.686684Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:1:0] Marker# BPP01 2025-06-30T09:17:04.686728Z node 4 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-30T09:17:04.686756Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-30T09:17:04.686761Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-30T09:17:04.686808Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-30T09:17:04.686831Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-30T09:17:04.686846Z node 4 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-30T09:17:04.686909Z node 4 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.439 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:1:0] NodeId# 4 } TEvVPut{ TimestampMs# 0.439 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:1:0] NodeId# 4 } TEvVPut{ TimestampMs# 0.439 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 4 } TEvVPutResult{ TimestampMs# 4.826 VDiskId# [0:1:0:1:0] NodeId# 4 Status# ERROR } TEvVPut{ TimestampMs# 4.893 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 4 } TEvVPutResult{ TimestampMs# 4.977 VDiskId# [0:1:1:1:0] NodeId# 4 Status# OK } TEvVPutResult{ TimestampMs# 4.993 VDiskId# [0:1:2:1:0] NodeId# 4 Status# OK } TEvVPutResult{ TimestampMs# 5.038 VDiskId# [0:1:0:2:0] NodeId# 4 Status# ERROR } TEvVPut{ TimestampMs# 5.075 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 4 } TEvVPutResult{ TimestampMs# 5.119 VDiskId# [0:1:0:0:0] NodeId# 4 Status# OK } ] } |78.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |78.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |78.9%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TTicketParserTest::NebiusAuthenticationRetryErrorImmediately [GOOD] >> TTicketParserTest::NebiusAccessKeySignatureUnsupported |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TxUsage::WriteToTopic_Demo_19_RestartNo_Query [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Table >> TxUsage::WriteToTopic_Demo_21_RestartNo_Table [GOOD] >> TxUsage::WriteToTopic_Demo_14_Query [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartNo_Query >> TTicketParserTest::NebiusAccessKeySignatureUnsupported [GOOD] |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TxUsage::WriteToTopic_Demo_16_Table |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAccessKeySignatureUnsupported [GOOD] Test command err: 2025-06-30T09:16:40.319843Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668855357458865:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.319869Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043c2/r3tmp/tmpIJwvrx/pdisk_1.dat 2025-06-30T09:16:40.388687Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21293, node 1 2025-06-30T09:16:40.425116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.425151Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.427620Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.427633Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.427635Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.427687Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:40.429367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9206 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:40.476871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:40.599168Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 2025-06-30T09:16:40.603976Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:16:40.603992Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:40.604320Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****RmTw (2D5A7684) () has now retryable error message 'Security state is empty' 2025-06-30T09:16:40.604353Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:16:40.604356Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:40.604386Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****RmTw (2D5A7684) () has now retryable error message 'Security state is empty' 2025-06-30T09:16:40.604390Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-06-30T09:16:40.604393Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-06-30T09:16:40.604397Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket eyJh****RmTw (2D5A7684): Security state is empty 2025-06-30T09:16:41.321950Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:42.321366Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****RmTw (2D5A7684) 2025-06-30T09:16:42.321457Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:16:42.321470Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:42.321510Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****RmTw (2D5A7684) () has now retryable error message 'Security state is empty' 2025-06-30T09:16:42.321519Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-06-30T09:16:43.604881Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:16:45.320705Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668855357458865:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:45.320762Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:16:45.326724Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****RmTw (2D5A7684) 2025-06-30T09:16:45.326814Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:16:45.326821Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:45.327094Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****RmTw (2D5A7684) () has now valid token of user1 2025-06-30T09:16:45.327098Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-30T09:16:49.328586Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****RmTw (2D5A7684) 2025-06-30T09:16:49.328730Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****RmTw (2D5A7684) () has now valid token of user1 2025-06-30T09:16:50.870976Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521668896311497182:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:50.871001Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043c2/r3tmp/tmplu1JIj/pdisk_1.dat 2025-06-30T09:16:50.883522Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:50.883744Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521668896311497162:2080] 1751275010870850 != 1751275010870853 TServer::EnableGrpc on GrpcPort 7400, node 2 2025-06-30T09:16:50.895513Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:50.895528Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:50.895543Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:50.895595Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7705 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:50.974942Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:50.974981Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:50.975472Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:50.975919Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:16:50.977913Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-30T09:16:50.977946Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [523d7ddfae30] Connect to grpc://localhost:7100 2025-06-30T09:16:50.978404Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [523d7ddfae30] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-06-30T09:16:50.980509Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [523d7ddfae30] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-06-30T09:16:50.980575Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:16:51.311457Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521668898876167462:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:51.311495Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=/ ... c_service_client.h:120: [523d7c3fab70] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-30T09:16:52.319407Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [523d7c3fab70] Status 14 Service Unavailable 2025-06-30T09:16:52.319503Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:53.318426Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket **** (8E120919) 2025-06-30T09:16:53.318448Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-30T09:16:53.318529Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [523d7c3fab70] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" 2025-06-30T09:16:53.319627Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [523d7c3fab70] Status 14 Service Unavailable 2025-06-30T09:16:53.319681Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-30T09:16:56.311788Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7521668898876167462:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:56.311845Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:16:56.319423Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket **** (8E120919) 2025-06-30T09:16:56.319440Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-30T09:16:56.319477Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [523d7c3fab70] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-06-30T09:16:56.320192Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [523d7c3fab70] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-06-30T09:16:56.320260Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:17:03.786220Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521668951540370979:2155];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043c2/r3tmp/tmp4wZlbg/pdisk_1.dat 2025-06-30T09:17:03.795542Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:17:03.813326Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:03.817845Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521668951540370839:2080] 1751275023780643 != 1751275023780646 TServer::EnableGrpc on GrpcPort 4710, node 4 2025-06-30T09:17:03.828318Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:03.828347Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:03.828349Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:03.828408Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8740 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:03.880622Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:03.880653Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:03.881027Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:03.882568Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:17:03.887166Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:17:03.888391Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-30T09:17:03.888413Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:17:03.888417Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-30T09:17:03.888429Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-30T09:17:03.888446Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [523d7cfec330] Connect to grpc://localhost:6326 2025-06-30T09:17:03.888655Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [523d7cfec330] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response 14: "Service Unavailable" 2025-06-30T09:17:03.894353Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [523d7cfec330] Status 14 Service Unavailable 2025-06-30T09:17:03.894805Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-30T09:17:03.894822Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-30T09:17:03.894877Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [523d7cfec330] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-06-30T09:17:03.895657Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [523d7cfec330] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-06-30T09:17:03.895788Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-30T09:17:04.780684Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043c2/r3tmp/tmprrVjae/pdisk_1.dat 2025-06-30T09:17:06.166147Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521668963496617128:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:06.166837Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:17:06.188749Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18104, node 5 2025-06-30T09:17:06.207009Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:06.207025Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:06.207027Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:06.207076Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63110 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:06.267025Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:06.267055Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:06.268507Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:06.271814Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:06.273979Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:17:06.278205Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:908: Ticket AKIA****MPLE (B3EDC139): Access key signature is not supported >> TBSVWithReboots::SimultaneousCreateDropNbs >> TGroupMapperTest::ReassignGroupTest3dc [GOOD] |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::ReassignGroupTest3dc [GOOD] |78.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |78.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |78.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep >> TBSVWithReboots::CreateAssignWithVersion |78.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TxUsage::Sinks_Oltp_WriteToTopic_2_Query [GOOD] |78.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |78.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |78.9%| [LD] {RESULT} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut >> TDSProxyFaultTolerancePatchTest::block42 [GOOD] >> TDSProxyPatchTest::MovedError_ErasureNone >> TBSVWithReboots::Create >> TxUsage::Sinks_Oltp_WriteToTopic_3_Table >> TDSProxyPatchTest::MovedError_ErasureNone [GOOD] >> TDSProxyPatchTest::SecuredErrorOnGet_ErasureMirror3dc >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Query [GOOD] >> TSchemeShardMoveTest::MoveIndex >> TDSProxyPatchTest::SecuredErrorOnGet_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_0_0_0_VdiskErrors >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Table >> TPartitionTests::FailedTxsDontBlock [GOOD] >> TPartitionTests::EndWriteTimestamp_DataKeysBody >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_0_0_0_VdiskErrors [GOOD] >> TSchemeShardMoveTest::Chain >> TPartitionTests::EndWriteTimestamp_DataKeysBody [GOOD] >> TSchemeShardMoveTest::MoveIndex [GOOD] >> TSchemeShardMoveTest::MoveIndexDoesNonExisted >> TPartitionTests::EndWriteTimestamp_FromMeta ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_0_0_0_VdiskErrors [GOOD] Test command err: 2025-06-30T09:17:08.970445Z node 26 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [26:83:2129] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-30T09:17:08.970532Z node 26 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-30T09:17:08.970541Z node 26 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-30T09:17:08.970547Z node 26 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-30T09:17:08.970552Z node 26 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-30T09:17:08.970557Z node 26 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-30T09:17:08.970562Z node 26 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-30T09:17:08.976848Z node 26 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-30T09:17:08.976982Z node 26 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:1:0] Marker# BPP01 2025-06-30T09:17:08.977000Z node 26 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:1:0] Marker# BPP01 2025-06-30T09:17:08.977023Z node 26 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-30T09:17:08.977036Z node 26 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-30T09:17:08.977086Z node 26 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.424 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:1:0] NodeId# 26 } TEvVPut{ TimestampMs# 0.424 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:1:0] NodeId# 26 } TEvVPut{ TimestampMs# 0.424 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 26 } TEvVPutResult{ TimestampMs# 6.74 VDiskId# [0:1:0:1:0] NodeId# 26 Status# OK } TEvVPutResult{ TimestampMs# 6.819 VDiskId# [0:1:1:1:0] NodeId# 26 Status# OK } TEvVPutResult{ TimestampMs# 6.836 VDiskId# [0:1:2:1:0] NodeId# 26 Status# OK } ] } >> TSchemeShardMoveTest::MoveIndexDoesNonExisted [GOOD] >> TSchemeShardMoveTest::MoveMigratedTable >> TSchemeShardMoveTest::Chain [GOOD] >> TSchemeShardMoveTest::Index >> TPartitionTests::EndWriteTimestamp_FromMeta [GOOD] >> TPartitionTests::EndWriteTimestamp_HeadKeys >> TListAllTopicsTests::ListLimitAndPaging [GOOD] >> TMeteringSink::FlushPutEventsV1 [GOOD] >> TMeteringSink::FlushResourcesReservedV1 [GOOD] >> TMeteringSink::FlushStorageV1 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveIndexDoesNonExisted [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:17:09.062357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:09.062379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:09.062384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:09.062388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:09.062399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:09.062403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:09.062411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:09.062424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:09.062529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:09.062608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:09.074898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:09.074921Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:09.077848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:09.077900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:09.077923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:09.079644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:09.079704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:09.079792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:09.079838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:09.080403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:09.080435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:09.080711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:09.080726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:09.080756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:09.080765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:09.080772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:09.080795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.082043Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:17:09.098085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:09.098180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.098244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:09.098252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:09.098297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:09.098308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:09.099130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:09.099165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:09.099206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.099214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:09.099218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:09.099223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:09.099658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.099666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:09.099670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:09.100012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.100019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.100024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:09.100029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:09.100647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:09.101183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:09.101221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:09.101406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:09.101427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:09.101434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:09.101478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:09.101484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:09.101516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:17:09.101538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:17:09.101996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:09.102006Z node 1 :FLAT_TX_SCHEMESHARD ... } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:09.895319Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Sync" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:17:09.895345Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Sync" took 25us result status StatusSuccess 2025-06-30T09:17:09.895480Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Sync" PathDescription { Self { Name: "Sync" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Sync" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value0" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:09.895532Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Async" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:17:09.895551Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Async" took 19us result status StatusSuccess 2025-06-30T09:17:09.895636Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Async" PathDescription { Self { Name: "Async" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 5 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Async" LocalPathId: 5 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value1" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardMoveTest::Index [GOOD] >> TPartitionTests::EndWriteTimestamp_HeadKeys [GOOD] >> TSchemeShardMoveTest::MoveMigratedTable [GOOD] >> TSchemeShardMoveTest::MoveOldTableWithIndex ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/unittest >> TMeteringSink::FlushStorageV1 [GOOD] Test command err: 2025-06-30T09:16:51.996489Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668902838306926:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:51.996711Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001418/r3tmp/tmpx40U06/pdisk_1.dat 2025-06-30T09:16:52.079317Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:16:52.128425Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:52.130213Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521668902838306809:2080] 1751275011992021 != 1751275011992024 TServer::EnableGrpc on GrpcPort 26902, node 1 2025-06-30T09:16:52.162966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/001418/r3tmp/yandexdEm1Ru.tmp 2025-06-30T09:16:52.162979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/001418/r3tmp/yandexdEm1Ru.tmp 2025-06-30T09:16:52.163040Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/001418/r3tmp/yandexdEm1Ru.tmp 2025-06-30T09:16:52.163089Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:52.170046Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:52.170075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:52.171891Z INFO: TTestServer started on Port 10699 GrpcPort 26902 2025-06-30T09:16:52.175169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10699 PQClient connected to localhost:26902 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:52.232164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:52.236004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:16:52.266183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:16:52.270276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:16:52.375333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:16:52.576188Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668907133274855:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:52.576274Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:52.586824Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668907133274894:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:52.586960Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668907133274892:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:52.586975Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:52.592016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:52.599188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-30T09:16:52.599277Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521668907133274897:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:16:52.635871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:52.671986Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668907133275016:2465] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:16:52.698270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:52.710184Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521668907133275025:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:16:52.717268Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=MmYyM2VhODctODg4YmJlZTAtYWY4MWExMWYtNzYzMjJkYzI=, ActorId: [1:7521668907133274850:2293], ActorState: ExecuteState, TraceId: 01jz0200eq2q593td4vhwh49rp, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:16:52.717825Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:16:52.735337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-30T09:16:52.788237Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jz0200mv1939g1h6wqxesyw6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Mjc1YmQxMzgtMmZjMWM3NTAtMWM1ZjE2YmQtY2RlNzU0MDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7521668907133275257:2612] 2025-06-30T09:16:52.997888Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:56.995502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668902838306926:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:56.995546Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:16:57.962498Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:186: new Create topic request 2025-06-30T ... state CALCULATING 2025-06-30T09:17:10.159207Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037896] TxId 281474976715677, State CALCULATING 2025-06-30T09:17:10.159210Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037896] TxId 281474976715677 State CALCULATING FrontTxId 281474976715677 2025-06-30T09:17:10.159214Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72075186224037896] Received 1, Expected 1 2025-06-30T09:17:10.159217Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037896] TxId 281474976715677, NewState CALCULATED 2025-06-30T09:17:10.159220Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037896] TxId 281474976715677 moved from CALCULATING to CALCULATED 2025-06-30T09:17:10.159223Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72075186224037896] write key for TxId 281474976715677 2025-06-30T09:17:10.159320Z node 3 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715677] save tx TxId: 281474976715677 State: CALCULATED MinStep: 1751275030190 MaxStep: 18446744073709551615 Step: 1751275030204 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 MaxSizeInPartition: 9223372036854775807 LifetimeSeconds: 64800 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 2097152 BurstSize: 2097152 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "topic3" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/dir2/topic3" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7521668954584207854 RawX2: 12884904032 } Partitions { Partition { PartitionId: 0 } } 2025-06-30T09:17:10.159353Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037896] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:17:10.159912Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037896] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:17:10.159921Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037896] Try execute txs with state CALCULATED 2025-06-30T09:17:10.159922Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037896] TxId 281474976715677, State CALCULATED 2025-06-30T09:17:10.159925Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037896] TxId 281474976715677 State CALCULATED FrontTxId 281474976715677 2025-06-30T09:17:10.159927Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037896] TxId 281474976715677, NewState WAIT_RS 2025-06-30T09:17:10.159929Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037896] TxId 281474976715677 moved from CALCULATED to WAIT_RS 2025-06-30T09:17:10.159940Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4027: [PQ: 72075186224037896] Send TEvTxProcessing::TEvReadSet to 0 receivers. Wait TEvTxProcessing::TEvReadSet from 0 senders. 2025-06-30T09:17:10.159944Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4521: [PQ: 72075186224037896] HaveParticipantsDecision 1 2025-06-30T09:17:10.159951Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037896] TxId 281474976715677, NewState EXECUTING 2025-06-30T09:17:10.159953Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037896] TxId 281474976715677 moved from WAIT_RS to EXECUTING 2025-06-30T09:17:10.159954Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72075186224037896] Received 0, Expected 1 2025-06-30T09:17:10.159963Z node 3 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72075186224037896, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1751275030204, TxId 281474976715677 2025-06-30T09:17:10.160002Z node 3 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:17:10.160248Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:17:10.160259Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037896, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:17:10.160276Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3583: [PQ: 72075186224037896] Handle TEvPQ::TEvTxCommitDone Step 1751275030204, TxId 281474976715677, Partition 0 2025-06-30T09:17:10.160280Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037896] Try execute txs with state EXECUTING 2025-06-30T09:17:10.160281Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037896] TxId 281474976715677, State EXECUTING 2025-06-30T09:17:10.160283Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037896] TxId 281474976715677 State EXECUTING FrontTxId 281474976715677 2025-06-30T09:17:10.160285Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72075186224037896] Received 1, Expected 1 2025-06-30T09:17:10.160293Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4224: [PQ: 72075186224037896] TxId: 281474976715677 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-30T09:17:10.160296Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4555: [PQ: 72075186224037896] complete TxId 281474976715677 2025-06-30T09:17:10.160343Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72075186224037896] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 MaxSizeInPartition: 9223372036854775807 LifetimeSeconds: 64800 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 2097152 BurstSize: 2097152 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "topic3" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/dir2/topic3" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } 2025-06-30T09:17:10.160358Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037896] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:17:10.160366Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4573: [PQ: 72075186224037896] delete partitions for TxId 281474976715677 2025-06-30T09:17:10.160368Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037896] TxId 281474976715677, NewState EXECUTED 2025-06-30T09:17:10.160371Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037896] TxId 281474976715677 moved from EXECUTING to EXECUTED 2025-06-30T09:17:10.160374Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72075186224037896] write key for TxId 281474976715677 2025-06-30T09:17:10.160417Z node 3 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715677] save tx TxId: 281474976715677 State: EXECUTED MinStep: 1751275030190 MaxStep: 18446744073709551615 Step: 1751275030204 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 MaxSizeInPartition: 9223372036854775807 LifetimeSeconds: 64800 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 2097152 BurstSize: 2097152 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "topic3" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/dir2/topic3" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7521668954584207854 RawX2: 12884904032 } Partitions { Partition { PartitionId: 0 } } 2025-06-30T09:17:10.160440Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037896] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:17:10.162319Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037896] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:17:10.162329Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037896] Try execute txs with state EXECUTED 2025-06-30T09:17:10.162331Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037896] TxId 281474976715677, State EXECUTED 2025-06-30T09:17:10.162334Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037896] TxId 281474976715677 State EXECUTED FrontTxId 281474976715677 2025-06-30T09:17:10.162338Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037896] TPersQueue::SendEvReadSetAckToSenders 2025-06-30T09:17:10.162340Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037896] TxId 281474976715677, NewState WAIT_RS_ACKS 2025-06-30T09:17:10.162342Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037896] TxId 281474976715677 moved from EXECUTED to WAIT_RS_ACKS 2025-06-30T09:17:10.162345Z node 3 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715677] PredicateAcks: 0/0 2025-06-30T09:17:10.162347Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037896] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-30T09:17:10.162349Z node 3 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715677] PredicateAcks: 0/0 2025-06-30T09:17:10.162351Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037896] add an TxId 281474976715677 to the list for deletion 2025-06-30T09:17:10.162353Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037896] TxId 281474976715677, NewState DELETING 2025-06-30T09:17:10.162358Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037896] delete key for TxId 281474976715677 2025-06-30T09:17:10.162368Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037896] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:17:10.162670Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037896] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:17:10.162675Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037896] Try execute txs with state DELETING 2025-06-30T09:17:10.162677Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037896] TxId 281474976715677, State DELETING 2025-06-30T09:17:10.162680Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037896] delete TxId 281474976715677 >> TSubDomainTest::DeleteTableAndThenForceDeleteSubDomain ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::Index [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:17:09.582875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:09.582907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:09.582913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:09.582919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:09.582930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:09.582935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:09.582945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:09.582961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:09.583070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:09.583148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:09.597052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:09.597084Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:09.601113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:09.601194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:09.601233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:09.603262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:09.603350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:09.603483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:09.603549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:09.604206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:09.604250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:09.604546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:09.604562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:09.604591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:09.604602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:09.604609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:09.604631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.606094Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:17:09.629464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:09.629561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.629643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:09.629654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:09.629714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:09.629735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:09.630658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:09.630708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:09.630793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.630806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:09.630812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:09.630819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:09.631389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.631406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:09.631413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:09.631846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.631859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.631866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:09.631873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:09.632602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:09.633055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:09.633098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:09.633299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:09.633332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:09.633340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:09.633400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:09.633408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:09.633442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:17:09.633472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:17:09.633902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:09.633912Z node 1 :FLAT_TX_SCHEMESHARD ... 2057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:10.538599Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove/Sync" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:17:10.538630Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove/Sync" took 32us result status StatusSuccess 2025-06-30T09:17:10.538800Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove/Sync" PathDescription { Self { Name: "Sync" PathId: 10 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 11 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 10 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Sync" LocalPathId: 10 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value0" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 10 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:10.538899Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove/Async" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:17:10.538922Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove/Async" took 25us result status StatusSuccess 2025-06-30T09:17:10.539050Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove/Async" PathDescription { Self { Name: "Async" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 9 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 8 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Async" LocalPathId: 8 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value1" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |78.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut >> TSchemeShardMoveTest::MoveOldTableWithIndex [GOOD] |78.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |78.9%| [LD] {RESULT} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Table [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/unittest >> TPartitionTests::EndWriteTimestamp_HeadKeys [GOOD] Test command err: 2025-06-30T09:16:50.921582Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:50.921617Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-30T09:16:50.926396Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-30T09:16:50.926477Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:16:50.926585Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:182:2195] 2025-06-30T09:16:50.926853Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-30T09:16:50.926889Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-30T09:16:50.926914Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-30T09:16:50.926936Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request Got KV request 2025-06-30T09:16:50.926968Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-30T09:16:50.926984Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 1 size 684 so 0 eo 1 d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-30T09:16:50.927002Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-30T09:16:50.927005Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-30T09:16:50.927009Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-30T09:16:50.000000Z 2025-06-30T09:16:50.927012Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-30T09:16:50.927016Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [1:182:2195] 2025-06-30T09:16:50.927024Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-30T09:16:50.927029Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-30T09:16:50.927076Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:16:51.263565Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src1|43c778b8-15e96008-4a259357-15ed71a8_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src1 2025-06-30T09:16:51.263621Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-30T09:16:52.322963Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create immediate tx with id = 3 and act no: 4 Create immediate tx with id = 6 and act no: 7 2025-06-30T09:16:52.323104Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-30T09:16:53.677885Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:16:53.677955Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Wait batch completion 2025-06-30T09:16:53.678005Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-30T09:16:53.678015Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-30T09:16:53.678067Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src1' seqNo 1 partNo 0 2025-06-30T09:16:53.678113Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src1' seqNo 1 partNo 0 FormedBlobsCount 0 NewHead: Offset 20 PartNo 0 PackedSize 84 count 1 nextOffset 21 batches 1 2025-06-30T09:16:53.678123Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-30T09:16:53.678129Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 20 PartNo 0 PackedSize 84 count 1 nextOffset 21 batches 1 2025-06-30T09:16:53.680199Z node 1 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-30T09:16:53.680240Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src1' seqNo 3 partNo 0 2025-06-30T09:16:53.680311Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1425: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob sourceId 'src1' seqNo 3 partNo 0 result is x0000000000_00000000000000000020_00000_0000000001_00000? size 70 2025-06-30T09:16:53.680325Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1117: [PQ: 72057594037927937, Partition: 0, State: StateIdle] writing blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 old key x0000000000_00000000000000000020_00000_0000000001_00000? new key d0000000000_00000000000000000020_00000_0000000001_00000? size 70 WTime 10139 2025-06-30T09:16:53.680338Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src1' seqNo 3 partNo 0 FormedBlobsCount 1 NewHead: Offset 50 PartNo 0 PackedSize 84 count 1 nextOffset 51 batches 1 2025-06-30T09:16:53.680345Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-30T09:16:53.680351Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 84 count 1 nextOffset 51 batches 1 2025-06-30T09:16:53.680355Z node 1 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-30T09:16:53.680405Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 50,1 HeadOffset 1 endOffset 1 curOffset 51 d0000000000_00000000000000000050_00000_0000000001_00000? size 70 WTime 10139 Got KV request Got batch complete: 5 Got KV request Got KV request Send disk status response with cookie: 0 Wait immediate tx complete 3 2025-06-30T09:16:53.700781Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 34 WriteNewSizeFromSupportivePartitions# 2 2025-06-30T09:16:53.700825Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:16:53.700847Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 1 is already written 2025-06-30T09:16:53.700854Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:16:53.700858Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 1 is already written 2025-06-30T09:16:53.700898Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=140, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 3 Wait immediate tx complete 6 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 6 2025-06-30T09:16:54.958365Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=140, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Create distr tx with id = 8 and act no: 9 Create immediate tx with id = 10 and act no: 11 Create distr tx with id = 12 and act no: 13 2025-06-30T09:16:54.958471Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 8 2025-06-30T09:16:54.958498Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 12 2025-06-30T09:16:56.248401Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=140, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:16:56.248467Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Wait batch completion 2025-06-30T09:16:56.248510Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-30T09:16:56.248521Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-30T09:16:56.248532Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-30T09:16:56.248547Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 51 PartNo 0 PackedSize 0 count 0 ... /rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src2' seqNo 10 partNo 0 2025-06-30T09:17:07.887293Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src2' seqNo 10 partNo 0 FormedBlobsCount 0 NewHead: Offset 70 PartNo 0 PackedSize 552 count 10 nextOffset 80 batches 1 2025-06-30T09:17:07.887352Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 70,10 HeadOffset 25 endOffset 25 curOffset 80 d0000000000_00000000000000000070_00000_0000000010_00000? size 299 WTime 11240 Got KV request Got batch complete: 10 Got KV request Got KV request Send disk status response with cookie: 0 2025-06-30T09:17:07.918033Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 170 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:17:07.918073Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:17:07.918092Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 70 is stored on disk 2025-06-30T09:17:07.918101Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:17:07.918105Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 71 is stored on disk 2025-06-30T09:17:07.918108Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:17:07.918112Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 72 is stored on disk 2025-06-30T09:17:07.918115Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:17:07.918119Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 73 is stored on disk 2025-06-30T09:17:07.918123Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:17:07.918141Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 5, partNo: 0, Offset: 74 is stored on disk 2025-06-30T09:17:07.918144Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:17:07.918150Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 6, partNo: 0, Offset: 75 is stored on disk 2025-06-30T09:17:07.918154Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:17:07.918157Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 76 is stored on disk 2025-06-30T09:17:07.918161Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:17:07.918168Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 77 is stored on disk 2025-06-30T09:17:07.918171Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:17:07.918210Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 78 is stored on disk 2025-06-30T09:17:07.918215Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:17:07.918219Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 79 is stored on disk 2025-06-30T09:17:07.918257Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=488, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Create distr tx with id = 8 and act no: 9 Create immediate tx with id = 10 and act no: 11 Create distr tx with id = 12 and act no: 13 2025-06-30T09:17:07.918314Z node 2 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 8 2025-06-30T09:17:07.918334Z node 2 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 12 2025-06-30T09:17:08.855371Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=488, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:17:08.855424Z node 2 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Wait batch completion 2025-06-30T09:17:08.855457Z node 2 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-30T09:17:08.855465Z node 2 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-30T09:17:08.855496Z node 2 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(ABORTED), reason=MinSeqNo violation failure on src2 Got batch complete: 3 2025-06-30T09:17:09.070089Z node 2 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 12 2025-06-30T09:17:09.070124Z node 2 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 12 2025-06-30T09:17:09.070146Z node 2 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 80 PartNo 0 PackedSize 0 count 0 nextOffset 80 batches 0, NewHead=Offset 80 PartNo 0 PackedSize 0 count 0 nextOffset 80 batches 0 Got KV request Send disk status response with cookie: 0 Wait immediate tx complete 10 2025-06-30T09:17:09.100846Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 1 2025-06-30T09:17:09.100887Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=488, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Got propose resutl: Origin: 72057594037927937 Status: ABORTED TxId: 10 Errors { Kind: BAD_REQUEST Reason: "MinSeqNo violation failure on src2" } Wait tx committed for tx 12 2025-06-30T09:17:09.340509Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:17:09.340542Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-30T09:17:09.344799Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [3:183:2196] 2025-06-30T09:17:09.345093Z node 3 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-30T09:17:09.000000Z 2025-06-30T09:17:09.345102Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 2 generation 0 [3:183:2196] 2025-06-30T09:17:09.840456Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:17:09.840479Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-30T09:17:09.844797Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [4:183:2196] 2025-06-30T09:17:09.845159Z node 4 :PERSQUEUE INFO: partition_init.cpp:895: [Root/PQ/rt3.dc1--account--topic:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:17:09.845174Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 2 generation 0 [4:183:2196] 2025-06-30T09:17:10.319213Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:17:10.319248Z node 5 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-30T09:17:10.322856Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [5:183:2196] >>>> ADD BLOB 0 writeTimestamp=2025-06-30T09:17:10.313187Z >>>> ADD BLOB 1 writeTimestamp=2025-06-30T09:17:10.313196Z >>>> ADD BLOB 2 writeTimestamp=2025-06-30T09:17:10.313201Z >>>> ADD BLOB 3 writeTimestamp=2025-06-30T09:17:10.313207Z >>>> ADD BLOB 4 writeTimestamp=2025-06-30T09:17:10.313214Z >>>> ADD BLOB 5 writeTimestamp=2025-06-30T09:17:10.313220Z >>>> ADD BLOB 6 writeTimestamp=2025-06-30T09:17:10.313225Z >>>> ADD BLOB 7 writeTimestamp=2025-06-30T09:17:10.313229Z >>>> ADD BLOB 8 writeTimestamp=2025-06-30T09:17:10.313234Z >>>> ADD BLOB 9 writeTimestamp=2025-06-30T09:17:10.313243Z 2025-06-30T09:17:10.323398Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-30T09:17:10.000000Z 2025-06-30T09:17:10.323409Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 2 generation 0 [5:183:2196] >> TxUsage::WriteToTopic_Demo_23_RestartNo_Table >> TSubDomainTest::CreateTablet ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveOldTableWithIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:17:10.110661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:10.110687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:10.110693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:10.110699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:10.110711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:10.110716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:10.110726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:10.110758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:10.110867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:10.110936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:10.126828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:10.126855Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:10.130605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:10.130678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:10.130713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:10.132421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:10.132505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:10.132625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:10.132698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:10.133500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:10.133548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:10.133837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:10.133849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:10.133874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:10.133885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:10.133892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:10.133915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:17:10.135767Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:17:10.161953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:10.162074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:10.162167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:10.162176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:10.162234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:10.162251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:10.163199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:10.163243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:10.163304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:10.163315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:10.163321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:10.163327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:10.167187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:10.167219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:10.167231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:10.175257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:10.175286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:10.175295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:10.175306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:10.176088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:10.179097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:10.179158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:10.179395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:10.179434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:10.179443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:10.179518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:10.179528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:10.179567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:17:10.179598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:17:10.180926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:10.180938Z node 1 :FLAT_TX_SCHEMESHARD ... ESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-30T09:17:11.016106Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 329 RawX2: 8589936903 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-30T09:17:11.016120Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:2, shardIdx: 72057594046678944:2, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:11.016125Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-30T09:17:11.016131Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:2, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:17:11.016139Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:2 129 -> 240 2025-06-30T09:17:11.016287Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 332 RawX2: 8589936905 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-30T09:17:11.016295Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-30T09:17:11.016310Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 332 RawX2: 8589936905 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-30T09:17:11.016315Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-30T09:17:11.016326Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 332 RawX2: 8589936905 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-30T09:17:11.016333Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:11.016338Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:17:11.016343Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-30T09:17:11.016348Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 129 -> 240 2025-06-30T09:17:11.016737Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-30T09:17:11.016940Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:17:11.017269Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-30T09:17:11.017349Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-30T09:17:11.017356Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 102:2 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:11.017364Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 102:2 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 4], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-30T09:17:11.017376Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:2 progress is 2/3 2025-06-30T09:17:11.017380Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 2/3 2025-06-30T09:17:11.017385Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:2 progress is 2/3 2025-06-30T09:17:11.017388Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 2/3 2025-06-30T09:17:11.017393Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/3, is published: true 2025-06-30T09:17:11.017458Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:17:11.017508Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:17:11.017513Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:11.017519Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 102:0 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 2], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:17:11.017528Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 3/3 2025-06-30T09:17:11.017532Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 3/3 2025-06-30T09:17:11.017538Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 3/3 2025-06-30T09:17:11.017541Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 3/3 2025-06-30T09:17:11.017546Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/3, is published: true 2025-06-30T09:17:11.017561Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:385:2351] message: TxId: 102 2025-06-30T09:17:11.017567Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 3/3 2025-06-30T09:17:11.017577Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:17:11.017583Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:17:11.017604Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:17:11.017610Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:17:11.017615Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:1 2025-06-30T09:17:11.017619Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:1 2025-06-30T09:17:11.017624Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-30T09:17:11.017629Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:17:11.017633Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:2 2025-06-30T09:17:11.017637Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:2 2025-06-30T09:17:11.017645Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-30T09:17:11.017649Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:17:11.017703Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:17:11.017709Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:17:11.017721Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:17:11.017728Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:17:11.017734Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:17:11.017739Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:17:11.017745Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:17:11.018414Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:17:11.018426Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:486:2445] 2025-06-30T09:17:11.018443Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 3 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 >> TSchemeShardTestExtSubdomainReboots::SchemeLimits-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardTestExtSubdomainReboots::SchemeLimits-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSubDomainTest::DeleteTableAndThenForceDeleteSubDomain [GOOD] >> TSubDomainTest::DatashardRunAtOtherNodeWhenOneNodeIsStopped >> TTicketParserTest::LoginCheckRemovedUser [GOOD] >> TTicketParserTest::LoginEmptyTicketBad >> TSubDomainTest::CreateTableInsideAndForceDeleteSubDomain >> TTicketParserTest::LoginEmptyTicketBad [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Query [GOOD] >> TSubDomainTest::StartAndStopTenanNode ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain_reboots/unittest >> TSchemeShardTestExtSubdomainReboots::SchemeLimits-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:16:38.873455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:38.873478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.873483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:38.873489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:38.873495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:38.873499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:38.873508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.873522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:38.873621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:38.873694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:38.886660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:16:38.886683Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:38.886801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.889835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:38.890831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:38.890870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:38.893014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:38.893069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:38.893197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.893274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:38.894278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.894332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:38.894606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:38.894618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.894627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:38.894635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:38.894641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:38.894670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:16:38.896025Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.917011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:38.917085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.917141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:38.917149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:38.917197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:38.917210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:38.917882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.917922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:38.917971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.917980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:38.917985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:38.917991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:38.918417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.918428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:38.918434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:38.918807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.918818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.918824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:38.918831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:38.919466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:38.919882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:38.919920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:38.920100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.920124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... ck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72075186233409546, cookie: 1005 2025-06-30T09:17:11.799246Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72075186233409546, cookie: 1005 2025-06-30T09:17:11.799250Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1005 2025-06-30T09:17:11.799255Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1005, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 2 2025-06-30T09:17:11.799260Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-06-30T09:17:11.799272Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 0/1, is published: true 2025-06-30T09:17:11.800305Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1005:4294967295 from tablet: 72075186233409546 to tablet: 72075186233409547 cookie: 0:1005 msg type: 269090816 2025-06-30T09:17:11.800352Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1005, partId: 4294967295, tablet: 72075186233409547 2025-06-30T09:17:11.800928Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1005 2025-06-30T09:17:11.801091Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1005 TestModificationResult got TxId: 1005, wait until txId: 1005 TestModificationResults wait txId: 1006 2025-06-30T09:17:11.801889Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpMkDir MkDir { Name: "B" } } TxId: 1006 TabletId: 72075186233409546 , at schemeshard: 72075186233409546 2025-06-30T09:17:11.801950Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/USER_0/B, operationId: 1006:0, at schemeshard: 72075186233409546 2025-06-30T09:17:11.801989Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72075186233409546, LocalPathId: 1], parent name: MyRoot/USER_0, child name: B, child id: [OwnerId: 72075186233409546, LocalPathId: 3], at schemeshard: 72075186233409546 2025-06-30T09:17:11.802005Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 0 2025-06-30T09:17:11.802030Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1006:0 type: TxMkDir target path: [OwnerId: 72075186233409546, LocalPathId: 3] source path: 2025-06-30T09:17:11.802045Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1006:1, propose status:StatusAccepted, reason: , at schemeshard: 72075186233409546 2025-06-30T09:17:11.802111Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-30T09:17:11.802126Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 1 2025-06-30T09:17:11.802682Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1006, response: Status: StatusAccepted TxId: 1006 SchemeshardId: 72075186233409546 PathId: 3, at schemeshard: 72075186233409546 2025-06-30T09:17:11.802729Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1006, database: /MyRoot/USER_0, subject: , status: StatusAccepted, operation: CREATE DIRECTORY, path: /MyRoot/USER_0/B 2025-06-30T09:17:11.802798Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-30T09:17:11.802805Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 1006, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-30T09:17:11.802840Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 1006, path id: [OwnerId: 72075186233409546, LocalPathId: 3] 2025-06-30T09:17:11.802861Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-30T09:17:11.802867Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [96:544:2476], at schemeshard: 72075186233409546, txId: 1006, path id: 1 2025-06-30T09:17:11.802874Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [96:544:2476], at schemeshard: 72075186233409546, txId: 1006, path id: 3 2025-06-30T09:17:11.802891Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1006:0, at schemeshard: 72075186233409546 2025-06-30T09:17:11.802899Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:63: MkDir::TPropose operationId# 1006:0 ProgressState, at schemeshard: 72075186233409546 2025-06-30T09:17:11.802908Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1006 ready parts: 1/1 2025-06-30T09:17:11.802936Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72075186233409547 message:Transaction { AffectedSet { TabletId: 72075186233409546 Flags: 2 } ExecLevel: 0 TxId: 1006 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409547 2025-06-30T09:17:11.803158Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186233409546, cookie: 1006 2025-06-30T09:17:11.803178Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186233409546, cookie: 1006 2025-06-30T09:17:11.803183Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1006 2025-06-30T09:17:11.803189Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1006, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 6 2025-06-30T09:17:11.803195Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 6 2025-06-30T09:17:11.803356Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72075186233409546, cookie: 1006 2025-06-30T09:17:11.803372Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72075186233409546, cookie: 1006 2025-06-30T09:17:11.803377Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1006 2025-06-30T09:17:11.803382Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1006, pathId: [OwnerId: 72075186233409546, LocalPathId: 3], version: 2 2025-06-30T09:17:11.803388Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 2 2025-06-30T09:17:11.803402Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1006, ready parts: 0/1, is published: true 2025-06-30T09:17:11.804119Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1006:4294967295 from tablet: 72075186233409546 to tablet: 72075186233409547 cookie: 0:1006 msg type: 269090816 2025-06-30T09:17:11.804152Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1006, partId: 4294967295, tablet: 72075186233409547 2025-06-30T09:17:11.804352Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1006 2025-06-30T09:17:11.804383Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1006 TestModificationResult got TxId: 1006, wait until txId: 1006 TestModificationResults wait txId: 1007 2025-06-30T09:17:11.805139Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpMkDir MkDir { Name: "C" } } TxId: 1007 TabletId: 72075186233409546 , at schemeshard: 72075186233409546 2025-06-30T09:17:11.805189Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/USER_0/C, operationId: 1007:0, at schemeshard: 72075186233409546 2025-06-30T09:17:11.805213Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1007:1, propose status:StatusResourceExhausted, reason: Check failed: path: '/MyRoot/USER_0/C', error: paths count limit exceeded, limit: 2, paths: 2, delta: 1, source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155, at schemeshard: 72075186233409546 2025-06-30T09:17:11.805804Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1007, response: Status: StatusResourceExhausted Reason: "Check failed: path: \'/MyRoot/USER_0/C\', error: paths count limit exceeded, limit: 2, paths: 2, delta: 1, source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" TxId: 1007 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:17:11.805848Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1007, database: /MyRoot/USER_0, subject: , status: StatusResourceExhausted, reason: Check failed: path: '/MyRoot/USER_0/C', error: paths count limit exceeded, limit: 2, paths: 2, delta: 1, source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155, operation: CREATE DIRECTORY, path: /MyRoot/USER_0/C TestModificationResult got TxId: 1007, wait until txId: 1007 >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Table >> TModifyUserTest::ModifyUser ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain_reboots/unittest >> TSchemeShardTestExtSubdomainReboots::SchemeLimits-AlterDatabaseCreateHiveFirst-false [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:16:38.958441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:38.958461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.958466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:38.958471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:38.958476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:38.958480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:38.958488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.958500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:38.958586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:38.958647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:38.971871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:16:38.971888Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:38.971983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.974375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:38.975362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:38.975395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:38.976812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:38.976851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:38.976949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.976999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:38.977747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.977790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:38.978000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:38.978008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.978016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:38.978022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:38.978028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:38.978065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:16:38.979242Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.996559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:38.996615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.996662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:38.996670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:38.996717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:38.996729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:38.997264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.997295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:38.997343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.997351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:38.997355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:38.997360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:38.997696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.997706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:38.997712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:38.998018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.998028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.998047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:38.998054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:38.998713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:38.999096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:38.999126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:38.999300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.999325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... ck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72075186233409546, cookie: 1005 2025-06-30T09:17:11.897508Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72075186233409546, cookie: 1005 2025-06-30T09:17:11.897512Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1005 2025-06-30T09:17:11.897515Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1005, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 2 2025-06-30T09:17:11.897519Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-06-30T09:17:11.897527Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 0/1, is published: true 2025-06-30T09:17:11.898068Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1005:4294967295 from tablet: 72075186233409546 to tablet: 72075186233409547 cookie: 0:1005 msg type: 269090816 2025-06-30T09:17:11.898101Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1005, partId: 4294967295, tablet: 72075186233409547 2025-06-30T09:17:11.898339Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1005 2025-06-30T09:17:11.898453Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1005 TestModificationResult got TxId: 1005, wait until txId: 1005 TestModificationResults wait txId: 1006 2025-06-30T09:17:11.899050Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpMkDir MkDir { Name: "B" } } TxId: 1006 TabletId: 72075186233409546 , at schemeshard: 72075186233409546 2025-06-30T09:17:11.899087Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/USER_0/B, operationId: 1006:0, at schemeshard: 72075186233409546 2025-06-30T09:17:11.899113Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72075186233409546, LocalPathId: 1], parent name: MyRoot/USER_0, child name: B, child id: [OwnerId: 72075186233409546, LocalPathId: 3], at schemeshard: 72075186233409546 2025-06-30T09:17:11.899125Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 0 2025-06-30T09:17:11.899130Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1006:0 type: TxMkDir target path: [OwnerId: 72075186233409546, LocalPathId: 3] source path: 2025-06-30T09:17:11.899142Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1006:1, propose status:StatusAccepted, reason: , at schemeshard: 72075186233409546 2025-06-30T09:17:11.899191Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-30T09:17:11.899201Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 1 2025-06-30T09:17:11.899616Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1006, response: Status: StatusAccepted TxId: 1006 SchemeshardId: 72075186233409546 PathId: 3, at schemeshard: 72075186233409546 2025-06-30T09:17:11.899659Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1006, database: /MyRoot/USER_0, subject: , status: StatusAccepted, operation: CREATE DIRECTORY, path: /MyRoot/USER_0/B 2025-06-30T09:17:11.899697Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-30T09:17:11.899703Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 1006, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-30T09:17:11.899728Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 1006, path id: [OwnerId: 72075186233409546, LocalPathId: 3] 2025-06-30T09:17:11.899744Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-30T09:17:11.899749Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [96:544:2476], at schemeshard: 72075186233409546, txId: 1006, path id: 1 2025-06-30T09:17:11.899754Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [96:544:2476], at schemeshard: 72075186233409546, txId: 1006, path id: 3 2025-06-30T09:17:11.899768Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1006:0, at schemeshard: 72075186233409546 2025-06-30T09:17:11.899775Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:63: MkDir::TPropose operationId# 1006:0 ProgressState, at schemeshard: 72075186233409546 2025-06-30T09:17:11.899784Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1006 ready parts: 1/1 2025-06-30T09:17:11.899807Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72075186233409547 message:Transaction { AffectedSet { TabletId: 72075186233409546 Flags: 2 } ExecLevel: 0 TxId: 1006 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409547 2025-06-30T09:17:11.899985Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186233409546, cookie: 1006 2025-06-30T09:17:11.900000Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186233409546, cookie: 1006 2025-06-30T09:17:11.900005Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1006 2025-06-30T09:17:11.900010Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1006, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 6 2025-06-30T09:17:11.900015Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 6 2025-06-30T09:17:11.900132Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72075186233409546, cookie: 1006 2025-06-30T09:17:11.900143Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72075186233409546, cookie: 1006 2025-06-30T09:17:11.900147Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1006 2025-06-30T09:17:11.900152Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1006, pathId: [OwnerId: 72075186233409546, LocalPathId: 3], version: 2 2025-06-30T09:17:11.900156Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 2 2025-06-30T09:17:11.900166Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1006, ready parts: 0/1, is published: true 2025-06-30T09:17:11.900716Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1006:4294967295 from tablet: 72075186233409546 to tablet: 72075186233409547 cookie: 0:1006 msg type: 269090816 2025-06-30T09:17:11.900743Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1006, partId: 4294967295, tablet: 72075186233409547 2025-06-30T09:17:11.900878Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1006 2025-06-30T09:17:11.900897Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1006 TestModificationResult got TxId: 1006, wait until txId: 1006 TestModificationResults wait txId: 1007 2025-06-30T09:17:11.901471Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpMkDir MkDir { Name: "C" } } TxId: 1007 TabletId: 72075186233409546 , at schemeshard: 72075186233409546 2025-06-30T09:17:11.901507Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/USER_0/C, operationId: 1007:0, at schemeshard: 72075186233409546 2025-06-30T09:17:11.901526Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1007:1, propose status:StatusResourceExhausted, reason: Check failed: path: '/MyRoot/USER_0/C', error: paths count limit exceeded, limit: 2, paths: 2, delta: 1, source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155, at schemeshard: 72075186233409546 2025-06-30T09:17:11.901930Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1007, response: Status: StatusResourceExhausted Reason: "Check failed: path: \'/MyRoot/USER_0/C\', error: paths count limit exceeded, limit: 2, paths: 2, delta: 1, source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" TxId: 1007 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:17:11.901959Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1007, database: /MyRoot/USER_0, subject: , status: StatusResourceExhausted, reason: Check failed: path: '/MyRoot/USER_0/C', error: paths count limit exceeded, limit: 2, paths: 2, delta: 1, source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155, operation: CREATE DIRECTORY, path: /MyRoot/USER_0/C TestModificationResult got TxId: 1007, wait until txId: 1007 >> BasicUsage::ConflictingWrites [GOOD] >> Describe::LocationWithKillTablets ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::LoginEmptyTicketBad [GOOD] Test command err: 2025-06-30T09:16:40.305674Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668852984400234:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.305704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043d6/r3tmp/tmpkju6PV/pdisk_1.dat 2025-06-30T09:16:40.366569Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:40.369428Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521668852984400214:2080] 1751275000305522 != 1751275000305525 TServer::EnableGrpc on GrpcPort 2340, node 1 2025-06-30T09:16:40.395895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.395911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.395913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.395981Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:40.409980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.410016Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.414977Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3623 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:40.441125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:40.444639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:16:40.522817Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:16:40.528843Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:16:40.528859Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:40.529395Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****4LYQ (7220CC01) () has now valid token of user1 2025-06-30T09:16:40.529400Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-30T09:16:40.873862Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521668854469703972:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:40.873964Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043d6/r3tmp/tmpnoSGmK/pdisk_1.dat 2025-06-30T09:16:40.889881Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:40.890208Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521668854469703858:2080] 1751275000872089 != 1751275000872092 TServer::EnableGrpc on GrpcPort 11066, node 2 2025-06-30T09:16:40.903662Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:40.903679Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:40.903681Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:40.903741Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26842 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:40.977682Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:40.977724Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:40.978064Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:40.978912Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:41.022882Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:16:41.027103Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:16:41.027120Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:41.027362Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****Y3VA (C6D2C18A) () has now valid token of user1 2025-06-30T09:16:41.027368Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-30T09:16:41.384580Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521668855702264852:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.384603Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043d6/r3tmp/tmpo2GSVi/pdisk_1.dat 2025-06-30T09:16:41.400181Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7819, node 3 2025-06-30T09:16:41.410854Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:41.410881Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:41.410883Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:41.410937Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32064 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.488500Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.488539Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.488989Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:41.489582Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:16:41.535506Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:16:41.538441Z node 3 :TICKET_PARSER TRACE: t ... ode 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:16:41.538585Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****6Bag (C1E6198A) () has now valid token of user1 2025-06-30T09:16:41.538587Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-30T09:16:41.538753Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:16:42.386350Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:45.391517Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****6Bag (C1E6198A) 2025-06-30T09:16:45.391675Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****6Bag (C1E6198A) () has now valid token of user1 2025-06-30T09:16:46.385127Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7521668855702264852:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:46.385176Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:16:48.392676Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****6Bag (C1E6198A) 2025-06-30T09:16:48.392807Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****6Bag (C1E6198A) () has now valid token of user1 2025-06-30T09:16:51.539016Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:16:52.395651Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****6Bag (C1E6198A) 2025-06-30T09:16:52.395782Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****6Bag (C1E6198A) () has now valid token of user1 2025-06-30T09:16:56.395626Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7389: Cannot get console configs 2025-06-30T09:16:56.395647Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:56.397771Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****6Bag (C1E6198A) 2025-06-30T09:16:56.397921Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****6Bag (C1E6198A) () has now valid token of user1 2025-06-30T09:17:00.402653Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****6Bag (C1E6198A) 2025-06-30T09:17:00.402795Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****6Bag (C1E6198A) () has now valid token of user1 2025-06-30T09:17:01.835510Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521668945550372010:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:01.835533Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043d6/r3tmp/tmpgD2RWM/pdisk_1.dat 2025-06-30T09:17:01.857137Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:01.857400Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521668945550371992:2080] 1751275021835375 != 1751275021835378 TServer::EnableGrpc on GrpcPort 17644, node 4 2025-06-30T09:17:01.875033Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:01.875055Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:01.875057Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:01.875119Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12944 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:01.943117Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:01.943161Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:01.943577Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:01.944460Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:17:01.954459Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:17:01.983119Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:17:01.987099Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:17:01.987117Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:17:01.987363Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****QJ3A (6324FE45) () has now valid token of user1 2025-06-30T09:17:01.987367Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-30T09:17:01.990884Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:17:02.837774Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:05.838910Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****QJ3A (6324FE45) 2025-06-30T09:17:05.839020Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****QJ3A (6324FE45) () has now permanent error message 'User not found' 2025-06-30T09:17:06.835949Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7521668945550372010:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:06.836010Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:17:10.841745Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****QJ3A (6324FE45) 2025-06-30T09:17:12.184303Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521668990571920782:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:12.184439Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043d6/r3tmp/tmpQHHJjO/pdisk_1.dat TServer::EnableGrpc on GrpcPort 31757, node 5 2025-06-30T09:17:12.201866Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:12.209948Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:12.209964Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:12.209966Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:12.210035Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64242 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:12.288827Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:12.288865Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:12.289459Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:12.290081Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:17:12.302976Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:17:12.474807Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:17:12.478784Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:916: Ticket **** (00000000): Ticket is empty >> TSubDomainTest::CreateTablet [GOOD] >> TSubDomainTest::CreateTabletForUnknownDomain >> TSubDomainTest::LsLs >> TSubDomainTest::DatashardRunAtOtherNodeWhenOneNodeIsStopped [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::DatashardRunAtOtherNodeWhenOneNodeIsStopped [GOOD] Test command err: 2025-06-30T09:17:11.240300Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668986543276216:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:11.240318Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00416f/r3tmp/tmpqlUVYw/pdisk_1.dat 2025-06-30T09:17:11.311257Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:11.324215Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:17:11.340693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:11.340725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:11.342207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64812 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:17:11.348175Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521668986543276405:2117] Handle TEvNavigate describe path dc-1 2025-06-30T09:17:11.350424Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521668986543276864:2434] HANDLE EvNavigateScheme dc-1 2025-06-30T09:17:11.350476Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521668986543276540:2197], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:11.350488Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521668986543276540:2197], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:17:11.350537Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521668986543276865:2435][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:17:11.351108Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521668986543276124:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521668986543276870:2435] 2025-06-30T09:17:11.351132Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521668986543276121:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521668986543276869:2435] 2025-06-30T09:17:11.351138Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521668986543276124:2053] Subscribe: subscriber# [1:7521668986543276870:2435], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:11.351148Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521668986543276121:2050] Subscribe: subscriber# [1:7521668986543276869:2435], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:11.351162Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521668986543276127:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521668986543276871:2435] 2025-06-30T09:17:11.351172Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521668986543276870:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668986543276124:2053] 2025-06-30T09:17:11.351172Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521668986543276127:2056] Subscribe: subscriber# [1:7521668986543276871:2435], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:11.351179Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521668986543276869:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668986543276121:2050] 2025-06-30T09:17:11.351182Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521668986543276124:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521668986543276870:2435] 2025-06-30T09:17:11.351185Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521668986543276871:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668986543276127:2056] 2025-06-30T09:17:11.351186Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521668986543276121:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521668986543276869:2435] 2025-06-30T09:17:11.351191Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521668986543276127:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521668986543276871:2435] 2025-06-30T09:17:11.351191Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521668986543276865:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668986543276867:2435] 2025-06-30T09:17:11.351201Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521668986543276865:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668986543276866:2435] 2025-06-30T09:17:11.351214Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521668986543276865:2435][/dc-1] Set up state: owner# [1:7521668986543276540:2197], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:11.351260Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521668986543276865:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668986543276868:2435] 2025-06-30T09:17:11.351275Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521668986543276865:2435][/dc-1] Path was already updated: owner# [1:7521668986543276540:2197], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:11.351289Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521668986543276869:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668986543276866:2435], cookie# 1 2025-06-30T09:17:11.351297Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521668986543276870:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668986543276867:2435], cookie# 1 2025-06-30T09:17:11.351301Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521668986543276871:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668986543276868:2435], cookie# 1 2025-06-30T09:17:11.351310Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521668986543276121:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668986543276869:2435], cookie# 1 2025-06-30T09:17:11.351320Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521668986543276124:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668986543276870:2435], cookie# 1 2025-06-30T09:17:11.351330Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521668986543276127:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668986543276871:2435], cookie# 1 2025-06-30T09:17:11.351342Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521668986543276869:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668986543276121:2050], cookie# 1 2025-06-30T09:17:11.351352Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521668986543276870:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668986543276124:2053], cookie# 1 2025-06-30T09:17:11.351355Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521668986543276871:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668986543276127:2056], cookie# 1 2025-06-30T09:17:11.351360Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521668986543276865:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668986543276866:2435], cookie# 1 2025-06-30T09:17:11.351367Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521668986543276865:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:17:11.351370Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521668986543276865:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668986543276867:2435], cookie# 1 2025-06-30T09:17:11.351372Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521668986543276865:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:17:11.351375Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521668986543276865:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668986543276868:2435], cookie# 1 2025-06-30T09:17:11.351379Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521668986543276865:2435][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:17:11.365822Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521668986543276540:2197], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: ... 930652:2104], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/workload_manager/running_requests PathId: Strong: 0 } 2025-06-30T09:17:14.373861Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [4:7521668989776930652:2104], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/workload_manager/running_requests PathId: Strong: 0 }, by path# { Subscriber: { Subscriber: [4:7521668998366865767:2334] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:17:14.373873Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7521668989776930652:2104], cacheItem# { Subscriber: { Subscriber: [4:7521668998366865767:2334] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:17:14.373878Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [4:7521668989776930652:2104], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 0 } 2025-06-30T09:17:14.373883Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [4:7521668989776930652:2104], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 0 }, by path# { Subscriber: { Subscriber: [4:7521668998366865768:2335] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:17:14.373888Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7521668989776930652:2104], cacheItem# { Subscriber: { Subscriber: [4:7521668998366865768:2335] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:17:14.373897Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7521668998366865789:2336], recipient# [4:7521668998366865765:2293], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:14.373906Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:7521668998366865766:2333][/dc-1/USER_0/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [4:7521668998366865771:2333] 2025-06-30T09:17:14.373910Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:7521668998366865766:2333][/dc-1/USER_0/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [4:7521668998366865772:2333] 2025-06-30T09:17:14.373913Z node 4 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][4:7521668998366865766:2333][/dc-1/USER_0/.metadata/workload_manager/delayed_requests] Set up state: owner# [4:7521668989776930652:2104], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:14.373916Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:7521668998366865766:2333][/dc-1/USER_0/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [4:7521668998366865773:2333] 2025-06-30T09:17:14.373920Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][4:7521668998366865766:2333][/dc-1/USER_0/.metadata/workload_manager/delayed_requests] Ignore empty state: owner# [4:7521668989776930652:2104], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:14.373924Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [4:7521668989776930652:2104], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/workload_manager/delayed_requests PathId: Strong: 0 } 2025-06-30T09:17:14.373928Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [4:7521668989776930652:2104], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/workload_manager/delayed_requests PathId: Strong: 0 }, by path# { Subscriber: { Subscriber: [4:7521668998366865766:2333] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:17:14.373934Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7521668989776930652:2104], cacheItem# { Subscriber: { Subscriber: [4:7521668998366865766:2333] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:17:14.373941Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7521668998366865790:2337], recipient# [4:7521668998366865764:2292], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:14.376035Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7521668998366865764:2292], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:17:14.376066Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:17:14.428801Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7521668989776930652:2104], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:14.428839Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7521668989776930652:2104], cacheItem# { Subscriber: { Subscriber: [4:7521668998366865766:2333] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:17:14.428850Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7521668989776930652:2104], cacheItem# { Subscriber: { Subscriber: [4:7521668998366865767:2334] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:17:14.428872Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7521668998366865791:2338], recipient# [4:7521668998366865764:2292], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:14.429019Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7521668998366865764:2292], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } |79.0%| [TA] $(B)/ydb/core/security/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Table [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartNo_Query [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Query >> TxUsage::WriteToTopic_Demo_16_Table [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Table >> TSchemeShardTestExtSubdomainReboots::AlterForceDrop-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSubDomainTest::LsLs [GOOD] >> TSubDomainTest::LsAltered >> TxUsage::WriteToTopic_Demo_16_Query >> TModifyUserTest::ModifyUser [GOOD] >> TModifyUserTest::ModifyLdapUser >> TSubDomainTest::CreateTabletForUnknownDomain [GOOD] >> TSubDomainTest::DatashardNotRunAtAllWhenSubDomainNodesIsStopped >> TSubDomainTest::CreateTableInsideAndForceDeleteSubDomain [GOOD] >> TSubDomainTest::CreateTableInsidetThenStopTenantAndForceDeleteSubDomain >> TSubDomainTest::Boot >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW+VolatileTxs >> TxUsage::WriteToTopic_Demo_11_Table [GOOD] >> TSubDomainTest::StartAndStopTenanNode [GOOD] >> TSubDomainTest::StartTenanNodeAndStopAtDestructor >> TxUsage::WriteToTopic_Demo_27_Query [GOOD] >> TSubDomainTest::LsAltered [GOOD] >> TxUsage::WriteToTopic_Demo_45_Table |78.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain_reboots/unittest >> TSchemeShardTestExtSubdomainReboots::AlterForceDrop-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:16:38.939555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:38.939574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.939578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:38.939581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:38.939585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:38.939587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:38.939594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.939609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:38.939679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:38.939741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:38.950512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:16:38.950531Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:38.950613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.952843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:38.953581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:38.953610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:38.954985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:38.955021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:38.955106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.955153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:38.955856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.955898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:38.956093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:38.956103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.956112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:38.956119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:38.956125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:38.956146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:16:38.957200Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.971084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:38.971145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.971201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:38.971207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:38.971249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:38.971266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:38.971831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.971863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:38.971903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.971909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:38.971913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:38.971917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:38.972252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.972260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:38.972263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:38.972566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.972572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.972576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:38.972581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:38.973000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:38.973307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:38.973332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:38.973487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.973512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... ount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:17:17.466186Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-30T09:17:17.466227Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:17:17.466261Z node 134 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 2025-06-30T09:17:17.466316Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-30T09:17:17.466342Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:17:17.466397Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-30T09:17:17.466420Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:17:17.466488Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:17:17.466496Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:17:17.466532Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:17:17.466581Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:17:17.466588Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:17:17.466600Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:17:17.467271Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-30T09:17:17.467297Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-30T09:17:17.467309Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-30T09:17:17.467322Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-30T09:17:17.467351Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:17:17.467747Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-30T09:17:17.467785Z node 134 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:17:17.467798Z node 134 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 1003 2025-06-30T09:17:17.467873Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:17:17.467881Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 TestWaitNotification wait txId: 1004 2025-06-30T09:17:17.467896Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-30T09:17:17.467901Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-30T09:17:17.468000Z node 134 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:17:17.468024Z node 134 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-30T09:17:17.468034Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:17:17.468039Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [134:387:2375] 2025-06-30T09:17:17.468058Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:17:17.468063Z node 134 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [134:387:2375] TestWaitNotification: OK eventTxId 1003 TestWaitNotification: OK eventTxId 1004 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted wait until 72075186233409549 is deleted wait until 72075186233409550 is deleted wait until 72075186233409551 is deleted 2025-06-30T09:17:17.468141Z node 134 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-30T09:17:17.468151Z node 134 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-30T09:17:17.468160Z node 134 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 2025-06-30T09:17:17.468168Z node 134 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409549 2025-06-30T09:17:17.468177Z node 134 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409550 2025-06-30T09:17:17.468186Z node 134 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409551 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 Deleted tabletId 72075186233409549 Deleted tabletId 72075186233409550 Deleted tabletId 72075186233409551 2025-06-30T09:17:17.468281Z node 134 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:17:17.468540Z node 134 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 269us result status StatusPathDoesNotExist 2025-06-30T09:17:17.468597Z node 134 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:17:17.468665Z node 134 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:17:17.468688Z node 134 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 27us result status StatusSuccess 2025-06-30T09:17:17.468784Z node 134 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Waiting until shard idx 72057594046678944:1 is deleted Waiting until shard idx 72057594046678944:2 is deleted Waiting until shard idx 72057594046678944:3 is deleted Waiting until shard idx 72057594046678944:4 is deleted Waiting until shard idx 72057594046678944:5 is deleted Waiting until shard idx 72057594046678944:6 is deleted Deleted shard idx 72057594046678944:1 Deleted shard idx 72057594046678944:2 Deleted shard idx 72057594046678944:3 Deleted shard idx 72057594046678944:4 Deleted shard idx 72057594046678944:5 Deleted shard idx 72057594046678944:6 >> TModifyUserTest::ModifyLdapUser [GOOD] >> TModifyUserTest::ModifyUserIsEnabled >> TxUsage::Sinks_Oltp_WriteToTopic_3_Table [GOOD] |79.0%| [TA] {RESULT} $(B)/ydb/core/security/ut/test-results/unittest/{meta.json ... results_accumulator.log} |79.0%| [LD] {RESULT} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut >> TxUsage::WriteToTopic_Demo_11_Query >> TxUsage::Sinks_Oltp_WriteToTopic_3_Query >> TModifyUserTest::ModifyUserIsEnabled [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Table [GOOD] |79.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::LsAltered [GOOD] Test command err: 2025-06-30T09:17:14.772378Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669001424910316:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:14.772410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004145/r3tmp/tmpJ8KChm/pdisk_1.dat 2025-06-30T09:17:15.210357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:15.210386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:15.211633Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:15.233596Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:15.235358Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669001424910295:2080] 1751275034772205 != 1751275034772208 TClient is connected to server localhost:28026 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:17:15.639035Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521669001424910525:2103] Handle TEvNavigate describe path dc-1 2025-06-30T09:17:15.684616Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521669005719878103:2260] HANDLE EvNavigateScheme dc-1 2025-06-30T09:17:15.684656Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521669001424910550:2117], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:15.684667Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521669001424910550:2117], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:17:15.684717Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521669005719878104:2261][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:17:15.686415Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521669001424910264:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521669005719878108:2261] 2025-06-30T09:17:15.686433Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521669001424910264:2049] Subscribe: subscriber# [1:7521669005719878108:2261], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:15.686446Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521669001424910267:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521669005719878109:2261] 2025-06-30T09:17:15.686449Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521669001424910267:2052] Subscribe: subscriber# [1:7521669005719878109:2261], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:15.686679Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521669001424910270:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521669005719878110:2261] 2025-06-30T09:17:15.686682Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521669001424910270:2055] Subscribe: subscriber# [1:7521669005719878110:2261], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:15.686691Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521669005719878108:2261][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669001424910264:2049] 2025-06-30T09:17:15.686695Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521669005719878109:2261][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669001424910267:2052] 2025-06-30T09:17:15.686698Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521669005719878110:2261][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669001424910270:2055] 2025-06-30T09:17:15.686703Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521669005719878104:2261][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669005719878105:2261] 2025-06-30T09:17:15.686709Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521669005719878104:2261][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669005719878106:2261] 2025-06-30T09:17:15.686983Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521669001424910264:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521669005719878108:2261] 2025-06-30T09:17:15.686990Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521669001424910267:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521669005719878109:2261] 2025-06-30T09:17:15.686992Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521669001424910270:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521669005719878110:2261] 2025-06-30T09:17:15.699087Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521669005719878104:2261][/dc-1] Set up state: owner# [1:7521669001424910550:2117], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:15.699160Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521669005719878104:2261][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669005719878107:2261] 2025-06-30T09:17:15.699170Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521669005719878104:2261][/dc-1] Path was already updated: owner# [1:7521669001424910550:2117], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:15.699180Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521669005719878108:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669005719878105:2261], cookie# 1 2025-06-30T09:17:15.699183Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521669005719878109:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669005719878106:2261], cookie# 1 2025-06-30T09:17:15.699187Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521669005719878110:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669005719878107:2261], cookie# 1 2025-06-30T09:17:15.702813Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521669001424910264:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669005719878108:2261], cookie# 1 2025-06-30T09:17:15.702829Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521669001424910267:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669005719878109:2261], cookie# 1 2025-06-30T09:17:15.702834Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521669001424910270:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669005719878110:2261], cookie# 1 2025-06-30T09:17:15.702841Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521669005719878108:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669001424910264:2049], cookie# 1 2025-06-30T09:17:15.702845Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521669005719878109:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669001424910267:2052], cookie# 1 2025-06-30T09:17:15.702847Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521669005719878110:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669001424910270:2055], cookie# 1 2025-06-30T09:17:15.702852Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521669005719878104:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669005719878105:2261], cookie# 1 2025-06-30T09:17:15.702858Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521669005719878104:2261][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:17:15.702862Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521669005719878104:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669005719878106:2261], cookie# 1 2025-06-30T09:17:15.702864Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521669005719878104:2261][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:17:15.702868Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521669005719878104:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669005719878107:2261], cookie# 1 2025-06-30T09:17:15.702871Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521669005719878104:2261][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:17:15.816395Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521669001424910550:2117], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 Shard ... mits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046644480 Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275037967 ParentPathId: 1 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 ... (TRUNCATED) TClient::Ls request: /dc-1 2025-06-30T09:17:18.438365Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [2:7521669012282068491:2103] Handle TEvNavigate describe path /dc-1 2025-06-30T09:17:18.439770Z node 2 :TX_PROXY DEBUG: describe.cpp:272: Actor# [2:7521669016577036301:2332] HANDLE EvNavigateScheme /dc-1 2025-06-30T09:17:18.439801Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7521669012282068561:2106], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:18.439822Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][2:7521669012282068851:2255][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [2:7521669012282068561:2106], cookie# 4 2025-06-30T09:17:18.439837Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7521669012282068855:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7521669012282068852:2255], cookie# 4 2025-06-30T09:17:18.439841Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7521669012282068856:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7521669012282068853:2255], cookie# 4 2025-06-30T09:17:18.439845Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7521669012282068857:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7521669012282068854:2255], cookie# 4 2025-06-30T09:17:18.439853Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7521669012282068316:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7521669012282068855:2255], cookie# 4 2025-06-30T09:17:18.439859Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7521669012282068319:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7521669012282068856:2255], cookie# 4 2025-06-30T09:17:18.439864Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7521669012282068322:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7521669012282068857:2255], cookie# 4 2025-06-30T09:17:18.439869Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7521669012282068855:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7521669012282068316:2049], cookie# 4 2025-06-30T09:17:18.439872Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7521669012282068856:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7521669012282068319:2052], cookie# 4 2025-06-30T09:17:18.439874Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7521669012282068857:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7521669012282068322:2055], cookie# 4 2025-06-30T09:17:18.439878Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:7521669012282068851:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7521669012282068852:2255], cookie# 4 2025-06-30T09:17:18.439883Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][2:7521669012282068851:2255][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:17:18.439888Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:7521669012282068851:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7521669012282068853:2255], cookie# 4 2025-06-30T09:17:18.439890Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][2:7521669012282068851:2255][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:17:18.439892Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:7521669012282068851:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7521669012282068854:2255], cookie# 4 2025-06-30T09:17:18.439896Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][2:7521669012282068851:2255][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:17:18.439903Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [2:7521669012282068561:2106], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-30T09:17:18.439915Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [2:7521669012282068561:2106], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [2:7521669012282068851:2255] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1751275037953 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:17:18.439926Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7521669012282068561:2106], cacheItem# { Subscriber: { Subscriber: [2:7521669012282068851:2255] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1751275037953 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-30T09:17:18.439974Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7521669016577036302:2333], recipient# [2:7521669016577036301:2332], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:17:18.439980Z node 2 :TX_PROXY DEBUG: describe.cpp:356: Actor# [2:7521669016577036301:2332] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-30T09:17:18.439996Z node 2 :TX_PROXY DEBUG: describe.cpp:435: Actor# [2:7521669016577036301:2332] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2025-06-30T09:17:18.440170Z node 2 :TX_PROXY DEBUG: describe.cpp:448: Actor# [2:7521669016577036301:2332] Handle TEvDescribeSchemeResult Forward to# [2:7521669016577036300:2331] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 63 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275037953 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275037953 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275037967 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } Children { Name: ".sys" PathId: 18446744073709551615 ... (TRUNCATED) >> TSubDomainTest::CreateTableInsidetThenStopTenantAndForceDeleteSubDomain [GOOD] >> TSubDomainTest::CreateTableInsideSubDomain >> TSubDomainTest::StartTenanNodeAndStopAtDestructor [GOOD] |79.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest >> TSubDomainTest::Boot [GOOD] >> TSubDomainTest::CheckAccessCopyTable >> TxUsage::WriteToTopic_Demo_23_RestartNo_Table [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TModifyUserTest::ModifyUserIsEnabled [GOOD] Test command err: 2025-06-30T09:17:14.446002Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669000395882636:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:14.446256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004152/r3tmp/tmpMKKy6N/pdisk_1.dat 2025-06-30T09:17:14.906057Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:14.908623Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669000395882616:2080] 1751275034444563 != 1751275034444566 2025-06-30T09:17:14.922059Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:14.922091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:14.927834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13468 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:17:15.370882Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521669000395882823:2097] Handle TEvNavigate describe path dc-1 2025-06-30T09:17:15.397867Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521669004690850422:2260] HANDLE EvNavigateScheme dc-1 2025-06-30T09:17:15.397906Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521669000395882939:2146], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:15.397917Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521669000395882939:2146], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:17:15.398209Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521669004690850423:2261][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:17:15.399854Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521669000395882585:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521669004690850427:2261] 2025-06-30T09:17:15.399871Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521669000395882585:2049] Subscribe: subscriber# [1:7521669004690850427:2261], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:15.399885Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521669000395882588:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521669004690850428:2261] 2025-06-30T09:17:15.399888Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521669000395882588:2052] Subscribe: subscriber# [1:7521669004690850428:2261], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:15.399893Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521669000395882591:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521669004690850429:2261] 2025-06-30T09:17:15.399895Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521669000395882591:2055] Subscribe: subscriber# [1:7521669004690850429:2261], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:15.399906Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521669004690850427:2261][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669000395882585:2049] 2025-06-30T09:17:15.399910Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521669004690850428:2261][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669000395882588:2052] 2025-06-30T09:17:15.400121Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521669004690850429:2261][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669000395882591:2055] 2025-06-30T09:17:15.400127Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521669004690850423:2261][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669004690850424:2261] 2025-06-30T09:17:15.400134Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521669004690850423:2261][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669004690850425:2261] 2025-06-30T09:17:15.400378Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521669004690850423:2261][/dc-1] Set up state: owner# [1:7521669000395882939:2146], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:15.401085Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521669004690850423:2261][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669004690850426:2261] 2025-06-30T09:17:15.401093Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521669004690850423:2261][/dc-1] Path was already updated: owner# [1:7521669000395882939:2146], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:15.401100Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521669004690850427:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669004690850424:2261], cookie# 1 2025-06-30T09:17:15.401103Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521669004690850428:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669004690850425:2261], cookie# 1 2025-06-30T09:17:15.401106Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521669004690850429:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669004690850426:2261], cookie# 1 2025-06-30T09:17:15.401111Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521669000395882585:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521669004690850427:2261] 2025-06-30T09:17:15.401114Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521669000395882585:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669004690850427:2261], cookie# 1 2025-06-30T09:17:15.401118Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521669000395882588:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521669004690850428:2261] 2025-06-30T09:17:15.401120Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521669000395882588:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669004690850428:2261], cookie# 1 2025-06-30T09:17:15.401122Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521669000395882591:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521669004690850429:2261] 2025-06-30T09:17:15.401124Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521669000395882591:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669004690850429:2261], cookie# 1 2025-06-30T09:17:15.402966Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521669004690850427:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669000395882585:2049], cookie# 1 2025-06-30T09:17:15.402972Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521669004690850428:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669000395882588:2052], cookie# 1 2025-06-30T09:17:15.402975Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521669004690850429:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669000395882591:2055], cookie# 1 2025-06-30T09:17:15.402982Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521669004690850423:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669004690850424:2261], cookie# 1 2025-06-30T09:17:15.402989Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521669004690850423:2261][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:17:15.402993Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521669004690850423:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669004690850425:2261], cookie# 1 2025-06-30T09:17:15.402996Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521669004690850423:2261][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:17:15.402999Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521669004690850423:2261][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669004690850426:2261], cookie# 1 2025-06-30T09:17:15.403005Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521669004690850423:2261][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:17:15.457106Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:15.555914Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521669000395882939:2146], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057 ... 018316512568:2104], cacheItem# { Subscriber: { Subscriber: [3:7521669018316512843:2255] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 9 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1751275039101 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 9 IsSync: true Partial: 0 } 2025-06-30T09:17:19.205448Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521669022611480232:2330], recipient# [3:7521669022611480231:2329], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } }] } 2025-06-30T09:17:19.205454Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [3:7521669022611480231:2329] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:17:19.205460Z node 3 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [3:7521669022611480231:2329] txid# 281474976715662, Access denied for user2 on path /dc-1, with access AlterSchema 2025-06-30T09:17:19.205472Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521669022611480231:2329] txid# 281474976715662, issues: { message: "Access denied for user2 on path /dc-1" issue_code: 200000 severity: 1 } 2025-06-30T09:17:19.205476Z node 3 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [3:7521669022611480231:2329] txid# 281474976715662 SEND to# [3:7521669022611480230:2328] Source {TEvProposeTransactionStatus Status# 5} 2025-06-30T09:17:19.206514Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [3:7521669018316512395:2097] Handle TEvProposeTransaction 2025-06-30T09:17:19.206519Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [3:7521669018316512395:2097] TxId# 281474976715663 ProcessProposeTransaction 2025-06-30T09:17:19.206524Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [3:7521669018316512395:2097] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [3:7521669022611480234:2332] 2025-06-30T09:17:19.207018Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [3:7521669022611480234:2332] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { ModifyUser { User: "user2" Password: "password" CanLogin: false } } } } UserToken: "\n\005user2\022\030\022\026\n\024all-users@well-known\032\322\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MTMxODIzOSwiaWF0IjoxNzUxMjc1MDM5LCJzdWIiOiJ1c2VyMiJ9.Iwor7MrVsxwS7DmTvYthJ95LmXvehVUaT_R6mqLgQJ-RpMX2YX-hFzJK1VEFM09PLiQGMEsyvwOGGVUZ2u_x4dOp7nikfElSlZRh2X72qG_O2BRS2ugJ8fq6Jc5cRxIs5U_7kJhiHQ42syaxeVkwwos6VFZhDrs30EXppX-0BiLqguP7nNLp68TDm83Pqmzi4h2jvGwtF4Km9JQWrX7me0K1TLAvvLIr5hYlMIFWsmao1eKhsc-U7gfgVnHSsTXwv5MKmjDRUuOrtsHxN2SLILIWj2txM0Su_V4eUPO6BJBROcp9sSeNU4aPOzs9AxJOWG3YAMYHebmUmdv1FsAjzQ\"\005Login*~eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MTMxODIzOSwiaWF0IjoxNzUxMjc1MDM5LCJzdWIiOiJ1c2VyMiJ9.**" PeerName: "" 2025-06-30T09:17:19.207026Z node 3 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [3:7521669022611480234:2332] txid# 281474976715663 Bootstrap, UserSID: user2 CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:17:19.207028Z node 3 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [3:7521669022611480234:2332] txid# 281474976715663 Bootstrap, UserSID: user2 IsClusterAdministrator: 1 2025-06-30T09:17:19.207037Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [3:7521669022611480234:2332] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:17:19.207048Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521669018316512568:2104], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:19.207060Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][3:7521669018316512843:2255][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:7521669018316512568:2104], cookie# 10 2025-06-30T09:17:19.207069Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:7521669018316512847:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7521669018316512844:2255], cookie# 10 2025-06-30T09:17:19.207073Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:7521669018316512848:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7521669018316512845:2255], cookie# 10 2025-06-30T09:17:19.207076Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:7521669018316512849:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7521669018316512846:2255], cookie# 10 2025-06-30T09:17:19.207080Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7521669018316512315:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7521669018316512849:2255], cookie# 10 2025-06-30T09:17:19.207085Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:7521669018316512849:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7521669018316512315:2055], cookie# 10 2025-06-30T09:17:19.207089Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][3:7521669018316512843:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7521669018316512846:2255], cookie# 10 2025-06-30T09:17:19.207092Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][3:7521669018316512843:2255][/dc-1] Sync is in progress: cookie# 10, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:17:19.207095Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7521669018316512309:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7521669018316512847:2255], cookie# 10 2025-06-30T09:17:19.207099Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7521669018316512312:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7521669018316512848:2255], cookie# 10 2025-06-30T09:17:19.207102Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:7521669018316512847:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7521669018316512309:2049], cookie# 10 2025-06-30T09:17:19.207105Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:7521669018316512848:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7521669018316512312:2052], cookie# 10 2025-06-30T09:17:19.207108Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][3:7521669018316512843:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7521669018316512844:2255], cookie# 10 2025-06-30T09:17:19.207110Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][3:7521669018316512843:2255][/dc-1] Sync is in progress: cookie# 10, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:17:19.207112Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][3:7521669018316512843:2255][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7521669018316512845:2255], cookie# 10 2025-06-30T09:17:19.207115Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][3:7521669018316512843:2255][/dc-1] Sync is done in the ring group: cookie# 10, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:17:19.207120Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7521669018316512568:2104], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-30T09:17:19.207130Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7521669018316512568:2104], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [3:7521669018316512843:2255] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 10 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1751275039101 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:17:19.207139Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7521669018316512568:2104], cacheItem# { Subscriber: { Subscriber: [3:7521669018316512843:2255] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 10 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1751275039101 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 10 IsSync: true Partial: 0 } 2025-06-30T09:17:19.207165Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521669022611480235:2333], recipient# [3:7521669022611480234:2332], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } }] } 2025-06-30T09:17:19.207171Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [3:7521669022611480234:2332] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:17:19.207177Z node 3 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [3:7521669022611480234:2332] txid# 281474976715663, Access denied for user2 on path /dc-1, with access AlterSchema 2025-06-30T09:17:19.207190Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521669022611480234:2332] txid# 281474976715663, issues: { message: "Access denied for user2 on path /dc-1" issue_code: 200000 severity: 1 } 2025-06-30T09:17:19.207194Z node 3 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [3:7521669022611480234:2332] txid# 281474976715663 SEND to# [3:7521669022611480233:2331] Source {TEvProposeTransactionStatus Status# 5} >> TxUsage::WriteToTopic_Demo_23_RestartNo_Query >> TSubDomainTest::CreateTableInsideSubDomain [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::StartTenanNodeAndStopAtDestructor [GOOD] Test command err: 2025-06-30T09:17:14.745550Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668999884419528:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:14.745655Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00414d/r3tmp/tmpbP8IdM/pdisk_1.dat 2025-06-30T09:17:15.268198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:15.268230Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:15.279952Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:15.283475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:15.747547Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31283 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:17:16.071520Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521668999884419632:2139] Handle TEvNavigate describe path dc-1 2025-06-30T09:17:16.106350Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521669008474354683:2444] HANDLE EvNavigateScheme dc-1 2025-06-30T09:17:16.106396Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521668999884419657:2153], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:16.106690Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][1:7521669004179387366:2435][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7521668999884419657:2153], cookie# 1 2025-06-30T09:17:16.118552Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521669004179387370:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669004179387367:2435], cookie# 1 2025-06-30T09:17:16.125852Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521669004179387371:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669004179387368:2435], cookie# 1 2025-06-30T09:17:16.126115Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521669004179387372:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669004179387369:2435], cookie# 1 2025-06-30T09:17:16.126122Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521668999884419312:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669004179387370:2435], cookie# 1 2025-06-30T09:17:16.126356Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521668999884419315:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669004179387371:2435], cookie# 1 2025-06-30T09:17:16.127407Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521668999884419318:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669004179387372:2435], cookie# 1 2025-06-30T09:17:16.127654Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521669004179387370:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668999884419312:2050], cookie# 1 2025-06-30T09:17:16.127663Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521669004179387371:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668999884419315:2053], cookie# 1 2025-06-30T09:17:16.127671Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521669004179387366:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669004179387367:2435], cookie# 1 2025-06-30T09:17:16.127679Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521669004179387366:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:17:16.127682Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521669004179387366:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669004179387368:2435], cookie# 1 2025-06-30T09:17:16.127685Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521669004179387366:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:17:16.127912Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521669004179387372:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668999884419318:2056], cookie# 1 2025-06-30T09:17:16.127919Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521669004179387366:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669004179387369:2435], cookie# 1 2025-06-30T09:17:16.127924Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521669004179387366:2435][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:17:16.127946Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521668999884419657:2153], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-30T09:17:16.140002Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7521668999884419657:2153], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7521669004179387366:2435] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:17:16.140702Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7521668999884419657:2153], cacheItem# { Subscriber: { Subscriber: [1:7521669004179387366:2435] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-30T09:17:16.144155Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7521669008474354685:2446], recipient# [1:7521669008474354683:2444], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:17:16.144190Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7521669008474354683:2444] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-30T09:17:16.234706Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7521669008474354683:2444] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-30T09:17:16.238235Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7521669008474354683:2444] Handle TEvDescribeSchemeResult Forward to# [1:7521669008474354682:2443] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-30T09:17:16.275027Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7521668999884419632:2139] Handle TEvProposeTransaction 2025-06-30T09:17:16.275046Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7521668999 ... 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:19.522295Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:7521669021778483147:2533][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7521669021778483157:2533] 2025-06-30T09:17:19.522299Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][3:7521669021778483147:2533][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [3:7521669017483515225:2141], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:19.522304Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7521669021778483164:2534][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7521669017483514974:2050] 2025-06-30T09:17:19.522308Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7521669021778483165:2534][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7521669017483514977:2053] 2025-06-30T09:17:19.522311Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7521669021778483166:2534][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7521669017483514980:2056] 2025-06-30T09:17:19.522316Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:7521669021778483148:2534][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7521669021778483161:2534] 2025-06-30T09:17:19.522321Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:7521669021778483148:2534][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7521669021778483162:2534] 2025-06-30T09:17:19.522326Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][3:7521669021778483148:2534][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Set up state: owner# [3:7521669017483515225:2141], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:19.522330Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:7521669021778483148:2534][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7521669021778483163:2534] 2025-06-30T09:17:19.522334Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][3:7521669021778483148:2534][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Ignore empty state: owner# [3:7521669017483515225:2141], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:19.522342Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7521669017483514974:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7521669021778483152:2532] 2025-06-30T09:17:19.522346Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7521669017483514974:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7521669021778483158:2533] 2025-06-30T09:17:19.522349Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7521669017483514974:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7521669021778483164:2534] 2025-06-30T09:17:19.522352Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7521669017483514977:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7521669021778483153:2532] 2025-06-30T09:17:19.522356Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7521669017483514977:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7521669021778483159:2533] 2025-06-30T09:17:19.522358Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7521669017483514977:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7521669021778483165:2534] 2025-06-30T09:17:19.522362Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7521669017483514980:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7521669021778483154:2532] 2025-06-30T09:17:19.522364Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7521669017483514980:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7521669021778483160:2533] 2025-06-30T09:17:19.522367Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7521669017483514980:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7521669021778483166:2534] 2025-06-30T09:17:19.522375Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7521669017483515225:2141], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-30T09:17:19.522390Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7521669017483515225:2141], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7521669021778483146:2532] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:17:19.522423Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7521669017483515225:2141], cacheItem# { Subscriber: { Subscriber: [3:7521669021778483146:2532] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:17:19.522437Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7521669017483515225:2141], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-30T09:17:19.522444Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7521669017483515225:2141], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7521669021778483147:2533] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:17:19.522451Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7521669017483515225:2141], cacheItem# { Subscriber: { Subscriber: [3:7521669021778483147:2533] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:17:19.522467Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7521669017483515225:2141], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 } 2025-06-30T09:17:19.522474Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7521669017483515225:2141], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7521669021778483148:2534] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:17:19.522483Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7521669017483515225:2141], cacheItem# { Subscriber: { Subscriber: [3:7521669021778483148:2534] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:17:19.522497Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521669021778483167:2535], recipient# [3:7521669021778483140:2270], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:19.522514Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521669021778483168:2536], recipient# [3:7521669021778483145:2273], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TSubDomainTest::CheckAccessCopyTable [GOOD] >> TSubDomainTest::ConsistentCopyTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::CreateTableInsideSubDomain [GOOD] Test command err: 2025-06-30T09:17:14.091816Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668998337228854:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:14.092115Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004156/r3tmp/tmpdQZ9UT/pdisk_1.dat 2025-06-30T09:17:14.747535Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:14.752638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:14.752659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:14.755536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10887 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:17:14.902244Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521668998337229037:2142] Handle TEvNavigate describe path dc-1 2025-06-30T09:17:14.923821Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521668998337229462:2430] HANDLE EvNavigateScheme dc-1 2025-06-30T09:17:14.923864Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521668998337229060:2155], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:14.924018Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521668998337229060:2155], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:17:14.928512Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521668998337229463:2431][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:17:14.943423Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521668994042261411:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521668998337229467:2431] 2025-06-30T09:17:14.943463Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521668994042261411:2050] Subscribe: subscriber# [1:7521668998337229467:2431], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:14.943761Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521668998337229467:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668994042261411:2050] 2025-06-30T09:17:14.943776Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521668998337229463:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668998337229464:2431] 2025-06-30T09:17:14.943785Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521668994042261411:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521668998337229467:2431] 2025-06-30T09:17:14.943794Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521668994042261417:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521668998337229469:2431] 2025-06-30T09:17:14.943797Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521668994042261417:2056] Subscribe: subscriber# [1:7521668998337229469:2431], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:14.944039Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521668998337229469:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668994042261417:2056] 2025-06-30T09:17:14.944051Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521668998337229463:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668998337229466:2431] 2025-06-30T09:17:14.944522Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521668998337229463:2431][/dc-1] Set up state: owner# [1:7521668998337229060:2155], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:14.944570Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521668998337229467:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668998337229464:2431], cookie# 1 2025-06-30T09:17:14.944580Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521668998337229468:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668998337229465:2431], cookie# 1 2025-06-30T09:17:14.944585Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521668998337229469:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668998337229466:2431], cookie# 1 2025-06-30T09:17:14.944589Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521668994042261411:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668998337229467:2431], cookie# 1 2025-06-30T09:17:14.944594Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521668998337229467:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668994042261411:2050], cookie# 1 2025-06-30T09:17:14.944599Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521668998337229463:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668998337229464:2431], cookie# 1 2025-06-30T09:17:14.944605Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521668998337229463:2431][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:17:14.944608Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521668994042261417:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521668998337229469:2431] 2025-06-30T09:17:14.944610Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521668994042261417:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668998337229469:2431], cookie# 1 2025-06-30T09:17:14.947041Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521668994042261414:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521668998337229468:2431] 2025-06-30T09:17:14.947063Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521668994042261414:2053] Subscribe: subscriber# [1:7521668998337229468:2431], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:14.947082Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521668998337229469:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668994042261417:2056], cookie# 1 2025-06-30T09:17:14.947088Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521668998337229468:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668994042261414:2053] 2025-06-30T09:17:14.947095Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521668998337229463:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668998337229466:2431], cookie# 1 2025-06-30T09:17:14.947099Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521668998337229463:2431][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:17:14.947103Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521668998337229463:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668998337229465:2431] 2025-06-30T09:17:14.947114Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521668998337229463:2431][/dc-1] Path was already updated: owner# [1:7521668998337229060:2155], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:14.947127Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521668994042261414:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668998337229468:2431], cookie# 1 2025-06-30T09:17:14.947131Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521668994042261414:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521668998337229468:2431] 2025-06-30T09:17:14.947136Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521668998337229468:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668994042261414:2053], cookie# 1 2025-06-30T09:17:14.947139Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521668998337229463:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668998337229465:2431], cookie# 1 2025-06-30T09:17:14.947143Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521668998337229463:2431][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:17:15.083648Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521668998337229060:2155], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Dat ... 20.167969Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [5:7521669021590482608:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/delayed_requests DomainOwnerId: 72057594046644480 }: sender# [5:7521669025885451013:2720] 2025-06-30T09:17:20.167971Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][5:7521669025885451001:2721][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [5:7521669025885451002:2721] 2025-06-30T09:17:20.167976Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [5:7521669021590482608:2056] Upsert description: path# /dc-1/.metadata/workload_manager/delayed_requests 2025-06-30T09:17:20.167997Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][5:7521669025885451001:2721][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [5:7521669025885451003:2721] 2025-06-30T09:17:20.168006Z node 5 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][5:7521669025885451001:2721][/dc-1/.metadata/workload_manager/running_requests] Set up state: owner# [5:7521669021590482935:2148], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:20.168012Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][5:7521669025885451001:2721][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [5:7521669025885451004:2721] 2025-06-30T09:17:20.168016Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [5:7521669021590482608:2056] Subscribe: subscriber# [5:7521669025885451013:2720], path# /dc-1/.metadata/workload_manager/delayed_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:20.168020Z node 5 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][5:7521669025885451001:2721][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [5:7521669021590482935:2148], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:20.168026Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [5:7521669021590482602:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/delayed_requests DomainOwnerId: 72057594046644480 }: sender# [5:7521669025885451011:2720] 2025-06-30T09:17:20.168028Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [5:7521669021590482602:2050] Upsert description: path# /dc-1/.metadata/workload_manager/delayed_requests 2025-06-30T09:17:20.168031Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7521669021590482608:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7521669025885451007:2721] 2025-06-30T09:17:20.168034Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [5:7521669021590482602:2050] Subscribe: subscriber# [5:7521669025885451011:2720], path# /dc-1/.metadata/workload_manager/delayed_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:20.168040Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7521669021590482602:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7521669025885451005:2721] 2025-06-30T09:17:20.168045Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [5:7521669021590482605:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/delayed_requests DomainOwnerId: 72057594046644480 }: sender# [5:7521669025885451012:2720] 2025-06-30T09:17:20.168047Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [5:7521669021590482605:2053] Upsert description: path# /dc-1/.metadata/workload_manager/delayed_requests 2025-06-30T09:17:20.168050Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [5:7521669021590482935:2148], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-30T09:17:20.168051Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [5:7521669021590482605:2053] Subscribe: subscriber# [5:7521669025885451012:2720], path# /dc-1/.metadata/workload_manager/delayed_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:20.168055Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7521669021590482605:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7521669025885451006:2721] 2025-06-30T09:17:20.168058Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7521669025885451013:2720][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7521669021590482608:2056] 2025-06-30T09:17:20.168062Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7521669025885451011:2720][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7521669021590482602:2050] 2025-06-30T09:17:20.168066Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7521669025885451012:2720][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7521669021590482605:2053] 2025-06-30T09:17:20.168067Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [5:7521669021590482935:2148], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [5:7521669025885451001:2721] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:17:20.168071Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][5:7521669025885451000:2720][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7521669025885451010:2720] 2025-06-30T09:17:20.168074Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][5:7521669025885451000:2720][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7521669025885451008:2720] 2025-06-30T09:17:20.168077Z node 5 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][5:7521669025885451000:2720][/dc-1/.metadata/workload_manager/delayed_requests] Set up state: owner# [5:7521669021590482935:2148], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:20.168080Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][5:7521669025885451000:2720][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7521669025885451009:2720] 2025-06-30T09:17:20.168088Z node 5 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][5:7521669025885451000:2720][/dc-1/.metadata/workload_manager/delayed_requests] Ignore empty state: owner# [5:7521669021590482935:2148], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:20.168090Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7521669021590482608:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7521669025885451013:2720] 2025-06-30T09:17:20.168094Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7521669021590482602:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7521669025885451011:2720] 2025-06-30T09:17:20.168096Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7521669021590482935:2148], cacheItem# { Subscriber: { Subscriber: [5:7521669025885451001:2721] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:17:20.168098Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7521669021590482605:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7521669025885451012:2720] 2025-06-30T09:17:20.168101Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [5:7521669021590482935:2148], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-30T09:17:20.168108Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [5:7521669021590482935:2148], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [5:7521669025885451000:2720] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:17:20.168120Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7521669021590482935:2148], cacheItem# { Subscriber: { Subscriber: [5:7521669025885451000:2720] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:17:20.168148Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7521669025885451014:2722], recipient# [5:7521669025885450998:2275], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } |79.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW+VolatileTxs [GOOD] >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs >> YdbYqlClient::SecurityTokenAuth >> TGRpcNewCoordinationClient::CreateDropDescribe |79.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest >> YdbScripting::MultiResults >> ClientStatsCollector::PrepareQuery >> TSubDomainTest::ConsistentCopyTable [GOOD] >> YdbQueryService::TestCreateAndAttachSession |79.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap >> Describe::LocationWithKillTablets [GOOD] >> Describe::DescribePartitionPermissions |79.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap >> TGRpcYdbTest::ExecuteQueryBadRequest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::ConsistentCopyTable [GOOD] Test command err: 2025-06-30T09:17:18.259714Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669018443511856:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:18.260347Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004141/r3tmp/tmpx2PBsY/pdisk_1.dat 2025-06-30T09:17:18.313841Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:18.314089Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669018443511742:2080] 1751275038255881 != 1751275038255884 TClient is connected to server localhost:63023 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:17:18.344268Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521669018443511951:2103] Handle TEvNavigate describe path dc-1 2025-06-30T09:17:18.346505Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521669018443512244:2253] HANDLE EvNavigateScheme dc-1 2025-06-30T09:17:18.346551Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521669018443511996:2117], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:18.346561Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521669018443511996:2117], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:17:18.346622Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521669018443512245:2254][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:17:18.347194Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521669018443511711:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521669018443512249:2254] 2025-06-30T09:17:18.347203Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521669018443511714:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521669018443512250:2254] 2025-06-30T09:17:18.347220Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521669018443511714:2052] Subscribe: subscriber# [1:7521669018443512250:2254], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:18.347228Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521669018443511711:2049] Subscribe: subscriber# [1:7521669018443512249:2254], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:18.347250Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521669018443511717:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521669018443512251:2254] 2025-06-30T09:17:18.347254Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521669018443511717:2055] Subscribe: subscriber# [1:7521669018443512251:2254], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:18.347265Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521669018443512249:2254][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669018443511711:2049] 2025-06-30T09:17:18.347271Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521669018443512251:2254][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669018443511717:2055] 2025-06-30T09:17:18.347275Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521669018443511711:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521669018443512249:2254] 2025-06-30T09:17:18.347276Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521669018443512250:2254][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669018443511714:2052] 2025-06-30T09:17:18.347280Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521669018443511714:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521669018443512250:2254] 2025-06-30T09:17:18.347284Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521669018443511717:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521669018443512251:2254] 2025-06-30T09:17:18.347285Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521669018443512245:2254][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669018443512246:2254] 2025-06-30T09:17:18.347292Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521669018443512245:2254][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669018443512248:2254] 2025-06-30T09:17:18.347302Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521669018443512245:2254][/dc-1] Set up state: owner# [1:7521669018443511996:2117], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:18.347334Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521669018443512245:2254][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521669018443512247:2254] 2025-06-30T09:17:18.347342Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521669018443512245:2254][/dc-1] Path was already updated: owner# [1:7521669018443511996:2117], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:18.347350Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521669018443512249:2254][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669018443512246:2254], cookie# 1 2025-06-30T09:17:18.347354Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521669018443512250:2254][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669018443512247:2254], cookie# 1 2025-06-30T09:17:18.347358Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521669018443512251:2254][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669018443512248:2254], cookie# 1 2025-06-30T09:17:18.347363Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521669018443511711:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669018443512249:2254], cookie# 1 2025-06-30T09:17:18.347369Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521669018443511714:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669018443512250:2254], cookie# 1 2025-06-30T09:17:18.347374Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521669018443511717:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521669018443512251:2254], cookie# 1 2025-06-30T09:17:18.347379Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521669018443512249:2254][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669018443511711:2049], cookie# 1 2025-06-30T09:17:18.347405Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521669018443512250:2254][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669018443511714:2052], cookie# 1 2025-06-30T09:17:18.347408Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521669018443512251:2254][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669018443511717:2055], cookie# 1 2025-06-30T09:17:18.347413Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521669018443512245:2254][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669018443512246:2254], cookie# 1 2025-06-30T09:17:18.347419Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521669018443512245:2254][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:17:18.347423Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521669018443512245:2254][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669018443512247:2254], cookie# 1 2025-06-30T09:17:18.347425Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521669018443512245:2254][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:17:18.347428Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521669018443512245:2254][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521669018443512248:2254], cookie# 1 2025-06-30T09:17:18.347432Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521669018443512245:2254][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:17:18.361702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:18.361734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:18.365228Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521669018443511996:2117], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 ... E_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][5:7521669029088403919:2677][/dc-1/USER_1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [5:7521669029088403922:2677], cookie# 6 2025-06-30T09:17:22.394644Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][5:7521669029088403919:2677][/dc-1/USER_1] Sync is done in the ring group: cookie# 6, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:17:22.394647Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [5:7521669029088403233:2155], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_0/table PathId: Partial: 0 } 2025-06-30T09:17:22.394653Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [5:7521669029088403233:2155], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_0/table PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [5:7521669033383371784:3158] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1751275042050 PathId: [OwnerId: 72057594046644480, LocalPathId: 6] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, by pathId# nullptr 2025-06-30T09:17:22.394668Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7521669029088403233:2155], cacheItem# { Subscriber: { Subscriber: [5:7521669033383371784:3158] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1751275042050 PathId: [OwnerId: 72057594046644480, LocalPathId: 6] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: dc-1/USER_0/table TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-30T09:17:22.394681Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [5:7521669029088403233:2155], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_0/a/table PathId: Partial: 0 } 2025-06-30T09:17:22.394692Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [5:7521669029088403233:2155], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_0/a/table PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [5:7521669033383371848:3207] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 3 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1751275042150 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, by pathId# nullptr 2025-06-30T09:17:22.394704Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7521669029088403233:2155], cacheItem# { Subscriber: { Subscriber: [5:7521669033383371848:3207] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 3 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1751275042150 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: dc-1/USER_0/a/table TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 3 IsSync: true Partial: 0 } 2025-06-30T09:17:22.394711Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [5:7521669029088403233:2155], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_1 PathId: Partial: 0 } 2025-06-30T09:17:22.394716Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [5:7521669029088403233:2155], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [5:7521669029088403919:2677] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 6 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 1751275041908 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] DomainId: [OwnerId: 72057594046644480, LocalPathId: 3] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:17:22.394722Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7521669029088403233:2155], cacheItem# { Subscriber: { Subscriber: [5:7521669029088403919:2677] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 6 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 1751275041908 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] DomainId: [OwnerId: 72057594046644480, LocalPathId: 3] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 6 IsSync: true Partial: 0 } 2025-06-30T09:17:22.394816Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7521669033383372222:3530], recipient# [5:7521669033383372221:3529], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/table TableId: [72057594046644480:6:1] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } },{ Path: dc-1/USER_1 TableId: [72057594046644480:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 3] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 3] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037892 Coordinators: 72075186224037893 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037894 Mediators: 72075186224037895 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } },{ Path: dc-1/USER_0/a/table TableId: [72057594046644480:7:1] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } },{ Path: dc-1/USER_1 TableId: [72057594046644480:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 3] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 3] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037892 Coordinators: 72075186224037893 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037894 Mediators: 72075186224037895 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:17:22.394837Z node 5 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [5:7521669033383372221:3529] txid# 281474976715672 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:17:22.394848Z node 5 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [5:7521669033383372221:3529] txid# 281474976715672, Access denied for user1@builtin on path /dc-1/USER_0/table, with access SelectRow 2025-06-30T09:17:22.394872Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521669033383372221:3529] txid# 281474976715672, issues: { message: "Access denied for user1@builtin on path /dc-1/USER_0/table" issue_code: 200000 severity: 1 } 2025-06-30T09:17:22.394877Z node 5 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [5:7521669033383372221:3529] txid# 281474976715672 SEND to# [5:7521669033383372220:3528] Source {TEvProposeTransactionStatus Status# 5} 2025-06-30T09:17:22.403115Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [5:7521669029088402891:2051] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [7:7521669029936818333:2104] 2025-06-30T09:17:22.403140Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [5:7521669029088402891:2051] Unsubscribe: subscriber# [7:7521669029936818333:2104], path# /dc-1/USER_0 2025-06-30T09:17:22.403141Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [5:7521669029088402894:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [7:7521669029936818334:2104] 2025-06-30T09:17:22.403151Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [5:7521669029088402897:2057] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [7:7521669029936818335:2104] 2025-06-30T09:17:22.403153Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [5:7521669029088402894:2054] Unsubscribe: subscriber# [7:7521669029936818334:2104], path# /dc-1/USER_0 2025-06-30T09:17:22.403156Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [5:7521669029088402897:2057] Unsubscribe: subscriber# [7:7521669029936818335:2104], path# /dc-1/USER_0 2025-06-30T09:17:22.403214Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [5:7521669029088402891:2051] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_1 }: sender# [6:7521669030864589060:2106] 2025-06-30T09:17:22.403216Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [5:7521669029088402894:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_1 }: sender# [6:7521669030864589061:2106] 2025-06-30T09:17:22.403218Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [5:7521669029088402891:2051] Unsubscribe: subscriber# [6:7521669030864589060:2106], path# /dc-1/USER_1 2025-06-30T09:17:22.403220Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [5:7521669029088402894:2054] Unsubscribe: subscriber# [6:7521669030864589061:2106], path# /dc-1/USER_1 2025-06-30T09:17:22.403227Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [5:7521669029088402897:2057] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_1 }: sender# [6:7521669030864589062:2106] 2025-06-30T09:17:22.403231Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [5:7521669029088402897:2057] Unsubscribe: subscriber# [6:7521669030864589062:2106], path# /dc-1/USER_1 2025-06-30T09:17:22.403278Z node 5 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 6 2025-06-30T09:17:22.403468Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:17:22.403503Z node 5 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 7 2025-06-30T09:17:22.403970Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connected -> Disconnected >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Table [GOOD] >> TGRpcNewCoordinationClient::CreateDropDescribe [GOOD] >> TGRpcNewCoordinationClient::CreateAlter >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Query >> ResultFormatter::EmptyDict [GOOD] >> ResultFormatter::Dict [GOOD] >> ResultFormatter::Decimal [GOOD] >> YdbYqlClient::SecurityTokenAuth [GOOD] >> YdbYqlClient::RetryOperationTemplate >> ClientStatsCollector::PrepareQuery [GOOD] >> ClientStatsCollector::CounterCacheMiss |79.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Decimal [GOOD] >> YdbQueryService::TestCreateAndAttachSession [GOOD] >> YdbQueryService::TestAttachTwice >> YdbScripting::MultiResults [GOOD] >> YdbScripting::Params >> TGRpcNewCoordinationClient::CreateAlter [GOOD] >> TGRpcNewCoordinationClient::NodeNotFound |79.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |79.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan >> TxUsage::WriteToTopic_Demo_45_Table [GOOD] >> TGRpcYdbTest::ExecuteQueryBadRequest [GOOD] >> TGRpcYdbTest::ExecuteQueryExplicitSession >> ResultFormatter::Void [GOOD] >> ResultFormatter::VariantTuple [GOOD] |79.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::VariantTuple [GOOD] >> TxUsage::WriteToTopic_Demo_45_Query >> TGRpcNewCoordinationClient::NodeNotFound [GOOD] >> TGRpcNewCoordinationClient::MultipleSessionsSemaphores >> ClientStatsCollector::CounterCacheMiss [GOOD] >> ClientStatsCollector::CounterRetryOperation >> YdbYqlClient::RetryOperationTemplate [GOOD] >> YdbYqlClient::RetryOperationSync >> ResultFormatter::Optional [GOOD] >> ResultFormatter::Pg [GOOD] >> TSchemeShardTestExtSubdomainReboots::CreateExternalSubdomain-AlterDatabaseCreateHiveFirst-true [GOOD] >> YdbQueryService::TestAttachTwice [GOOD] >> YdbQueryService::TestForbidExecuteWithoutAttach >> YdbScripting::Params [GOOD] >> YdbTableBulkUpsert::DataValidation |79.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Pg [GOOD] |79.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots >> TGRpcYdbTest::ExecuteQueryExplicitSession [GOOD] >> TGRpcYdbTest::ExecuteDmlQuery |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |79.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots >> TBSVWithReboots::Create [GOOD] >> ResultFormatter::List [GOOD] >> ResultFormatter::Null [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Query [GOOD] |79.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |79.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain_reboots/unittest >> TSchemeShardTestExtSubdomainReboots::CreateExternalSubdomain-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:16:38.957228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:38.957243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.957246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:38.957250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:38.957253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:38.957256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:38.957262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.957271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:38.957342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:38.957393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:38.966920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:16:38.966941Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:38.967010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.968973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:38.969596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:38.969618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:38.970955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:38.970988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:38.971063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.971106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:38.971748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.971786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:38.971950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:38.971956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.971962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:38.971966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:38.971970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:38.971986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:16:38.972938Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.986719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:38.986786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.986828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:38.986834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:38.986874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:38.986885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:38.987419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.987450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:38.987499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.987508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:38.987511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:38.987518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:38.987924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.987934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:38.987939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:38.988243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.988249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.988255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:38.988260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:38.988851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:38.989202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:38.989228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:38.989383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.989408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... AT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-06-30T09:17:25.897019Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 11 2025-06-30T09:17:25.897029Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/2, is published: true 2025-06-30T09:17:25.897451Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:1, at schemeshard: 72057594046678944 2025-06-30T09:17:25.897459Z node 146 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:1 ProgressState 2025-06-30T09:17:25.897468Z node 146 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:1 progress is 2/2 2025-06-30T09:17:25.897471Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 2/2 2025-06-30T09:17:25.897477Z node 146 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:1 progress is 2/2 2025-06-30T09:17:25.897479Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 2/2 2025-06-30T09:17:25.897483Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 2/2, is published: true 2025-06-30T09:17:25.897487Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 2/2 2025-06-30T09:17:25.897491Z node 146 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:17:25.897494Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:17:25.897513Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 10 2025-06-30T09:17:25.897516Z node 146 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:1 2025-06-30T09:17:25.897518Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:1 2025-06-30T09:17:25.897528Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 9 2025-06-30T09:17:25.897584Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:17:25.897829Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:17:25.897835Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:17:25.897883Z node 146 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:17:25.897894Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:17:25.897897Z node 146 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [146:699:2597] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:17:25.897950Z node 146 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:17:25.897975Z node 146 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 33us result status StatusSuccess 2025-06-30T09:17:25.898059Z node 146 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186234409547 Coordinators: 72075186234409548 Coordinators: 72075186234409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409550 Mediators: 72075186234409551 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "tenant-1:hdd" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:25.898114Z node 146 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186234409546 2025-06-30T09:17:25.898129Z node 146 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186234409546 describe path "/MyRoot/USER_0" took 14us result status StatusSuccess 2025-06-30T09:17:25.898161Z node 146 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "MyRoot/USER_0" PathId: 1 SchemeshardId: 72075186234409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186234409547 Coordinators: 72075186234409548 Coordinators: 72075186234409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409550 Mediators: 72075186234409551 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "tenant-1:hdd" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/USER_0" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186234409546, at schemeshard: 72075186234409546 2025-06-30T09:17:25.898200Z node 146 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:17:25.898211Z node 146 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 12us result status StatusSuccess 2025-06-30T09:17:25.898251Z node 146 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TGRpcNewCoordinationClient::MultipleSessionsSemaphores [GOOD] >> TGRpcNewCoordinationClient::SessionAcquireAcceptedCallback >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Table >> TBSVWithReboots::CreateAssignWithVersion [GOOD] >> YdbQueryService::TestForbidExecuteWithoutAttach [GOOD] >> YdbQueryService::TestCreateDropAttachSession >> ClientStatsCollector::CounterRetryOperation [GOOD] >> ClientStatsCollector::ExternalMetricRegistryByRawPtr >> YdbTableBulkUpsert::DataValidation [GOOD] >> YdbTableBulkUpsert::AsyncIndexShouldFail >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Table [GOOD] >> TxUsage::WriteToTopic_Demo_16_Query [GOOD] |79.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Null [GOOD] >> TGRpcNewCoordinationClient::SessionAcquireAcceptedCallback [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::Create [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:17:08.969741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:08.969762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:08.969767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:08.969771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:08.969782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:08.969786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:08.969793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:08.969807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:08.970012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:08.970179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:08.986603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:17:08.986627Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:08.986793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:17:08.992494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:08.992610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:08.992639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:08.994780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:08.994830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:08.994968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:08.995047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:08.995714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:08.995757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:08.996011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:08.996021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:08.996039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:08.996046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:08.996050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:08.996075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:17:08.997547Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:17:09.018262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:09.018320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.018368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:09.018376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:09.018419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:09.018434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:09.019072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:09.019107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:09.019163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.019180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:09.019187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:09.019193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:09.019657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.019670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:09.019676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:09.020108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.020120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:09.020128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:09.020135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:09.020707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:09.021131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:09.021164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:09.021326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:09.021347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... : schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1001, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1001 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1001 at step: 5000003 2025-06-30T09:17:26.734891Z node 54 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:26.734910Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1001 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 231928236143 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:26.734916Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:141: NBSVState::TPropose operationId# 1001:0 HandleReply TEvOperationPlan, at schemeshard: 72057594046678944 2025-06-30T09:17:26.734975Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1001:0 128 -> 240 2025-06-30T09:17:26.735009Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:17:26.735022Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 FAKE_COORDINATOR: Erasing txId 1001 2025-06-30T09:17:26.735380Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:26.735387Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1001, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:17:26.735430Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1001, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:17:26.735453Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:26.735458Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [54:212:2212], at schemeshard: 72057594046678944, txId: 1001, path id: 2 2025-06-30T09:17:26.735464Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [54:212:2212], at schemeshard: 72057594046678944, txId: 1001, path id: 3 2025-06-30T09:17:26.735473Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1001:0, at schemeshard: 72057594046678944 2025-06-30T09:17:26.735479Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1001:0 ProgressState 2025-06-30T09:17:26.735490Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1001:0 progress is 1/1 2025-06-30T09:17:26.735494Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1001 ready parts: 1/1 2025-06-30T09:17:26.735501Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1001:0 progress is 1/1 2025-06-30T09:17:26.735504Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1001 ready parts: 1/1 2025-06-30T09:17:26.735508Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1001, ready parts: 1/1, is published: false 2025-06-30T09:17:26.735513Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1001 ready parts: 1/1 2025-06-30T09:17:26.735518Z node 54 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1001:0 2025-06-30T09:17:26.735522Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1001:0 2025-06-30T09:17:26.735551Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:17:26.735558Z node 54 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1001, publications: 2, subscribers: 0 2025-06-30T09:17:26.735562Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1001, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-30T09:17:26.735566Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1001, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-30T09:17:26.735764Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 1001 2025-06-30T09:17:26.735777Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 1001 2025-06-30T09:17:26.735781Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1001 2025-06-30T09:17:26.735785Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1001, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-30T09:17:26.735789Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:17:26.736052Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1001 2025-06-30T09:17:26.736064Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1001 2025-06-30T09:17:26.736068Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1001 2025-06-30T09:17:26.736072Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1001, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:17:26.736077Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:17:26.736087Z node 54 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1001, subscribers: 0 2025-06-30T09:17:26.736575Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1001 2025-06-30T09:17:26.736636Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1001 TestModificationResult got TxId: 1001, wait until txId: 1001 TestWaitNotification wait txId: 1001 2025-06-30T09:17:26.736684Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1001: send EvNotifyTxCompletion 2025-06-30T09:17:26.736693Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1001 2025-06-30T09:17:26.736762Z node 54 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1001, at schemeshard: 72057594046678944 2025-06-30T09:17:26.736781Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1001: got EvNotifyTxCompletionResult 2025-06-30T09:17:26.736786Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1001: satisfy waiter [54:384:2362] TestWaitNotification: OK eventTxId 1001 2025-06-30T09:17:26.736850Z node 54 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/BSVolume_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:17:26.736882Z node 54 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/BSVolume_1" took 42us result status StatusSuccess 2025-06-30T09:17:26.736985Z node 54 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/BSVolume_1" PathDescription { Self { Name: "BSVolume_1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeBlockStoreVolume CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 BSVVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BlockStoreVolumeDescription { Name: "BSVolume_1" PathId: 3 VolumeConfig { BlockSize: 4096 Partitions { BlockCount: 16 } Version: 1 DiskId: "foo" ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Partitions { PartitionId: 0 TabletId: 72075186233409546 } VolumeTabletId: 72075186233409547 AlterVersion: 1 MountToken: "" TokenVersion: 0 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Query >> TGRpcYdbTest::ExecuteDmlQuery [GOOD] >> TGRpcYdbTest::ExecutePreparedQuery >> TxUsage::WriteToTopic_Demo_18_RestartNo_Query >> ResultFormatter::Primitive [GOOD] >> ResultFormatter::Struct [GOOD] >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::CreateAssignWithVersion [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:17:08.642656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:08.642684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:08.642691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:08.642697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:08.642712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:08.642718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:08.642731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:08.642776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:08.642918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:08.643016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:08.658916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:17:08.658952Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:08.659114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:17:08.664735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:08.664873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:08.664900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:08.667070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:08.667346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:08.667475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:08.667577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:08.668441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:08.668497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:08.668745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:08.668754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:08.668772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:08.668779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:08.668784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:08.668808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:17:08.670414Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:17:08.688861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:08.688946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:08.689009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:08.689019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:08.689063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:08.689080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:08.689967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:08.690039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:08.690126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:08.690146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:08.690153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:08.690160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:08.690885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:08.690908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:08.690915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:08.691508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:08.691525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:08.691533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:08.691543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:08.692431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:08.693067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:08.693118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:08.693366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:08.693398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... pp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/BSVolume_4" took 36us result status StatusSuccess 2025-06-30T09:17:27.245624Z node 60 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/BSVolume_4" PathDescription { Self { Name: "BSVolume_4" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeBlockStoreVolume CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 BSVVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BlockStoreVolumeDescription { Name: "BSVolume_4" PathId: 3 VolumeConfig { BlockSize: 4096 Partitions { BlockCount: 16 } Version: 1 DiskId: "foo" ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Partitions { PartitionId: 0 TabletId: 72075186233409546 } VolumeTabletId: 72075186233409547 AlterVersion: 1 MountToken: "Owner123" TokenVersion: 1 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 1003 2025-06-30T09:17:27.246428Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpAssignBlockStoreVolume AssignBlockStoreVolume { Name: "BSVolume_4" NewMountToken: "Owner124" TokenVersion: 1 } } TxId: 1003 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:27.246457Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_assign_bsv.cpp:25: TAssignBlockStoreVolume Propose, path: /MyRoot/DirA/BSVolume_4, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:17:27.246479Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1003:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:27.246490Z node 60 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAssignBlockStoreVolume, opId: 1003:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_assign_bsv.cpp:75) 2025-06-30T09:17:27.246502Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:17:27.246506Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:17:27.246512Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:17:27.246516Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:17:27.246523Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:17:27.246531Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: false 2025-06-30T09:17:27.246535Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:17:27.246539Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:17:27.246544Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 1, subscribers: 0 2025-06-30T09:17:27.246548Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-30T09:17:27.250586Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1003, response: Status: StatusSuccess TxId: 1003 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:27.250646Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1003, database: /MyRoot, subject: , status: StatusSuccess, operation: ALTER BLOCK STORE VOLUME ASSIGN, path: /MyRoot/DirA/BSVolume_4 2025-06-30T09:17:27.250688Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:27.250694Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:17:27.250730Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:27.250751Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [60:213:2213], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-30T09:17:27.250891Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:17:27.250922Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:17:27.250926Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:17:27.250931Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:17:27.250937Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:17:27.250962Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:17:27.251695Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:17:27.251762Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:17:27.251769Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:17:27.251846Z node 60 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:17:27.251862Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:17:27.251869Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [60:407:2385] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:17:27.251951Z node 60 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/BSVolume_4" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:17:27.251983Z node 60 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/BSVolume_4" took 44us result status StatusSuccess 2025-06-30T09:17:27.252085Z node 60 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/BSVolume_4" PathDescription { Self { Name: "BSVolume_4" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeBlockStoreVolume CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 BSVVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BlockStoreVolumeDescription { Name: "BSVolume_4" PathId: 3 VolumeConfig { BlockSize: 4096 Partitions { BlockCount: 16 } Version: 1 DiskId: "foo" ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Partitions { PartitionId: 0 TabletId: 72075186233409546 } VolumeTabletId: 72075186233409547 AlterVersion: 1 MountToken: "Owner124" TokenVersion: 2 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TxUsage::WriteToTopic_Demo_23_RestartNo_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_3_Query [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> TGRpcNewCoordinationClient::SessionAcquireAcceptedCallback [GOOD] Test command err: 2025-06-30T09:17:22.509085Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669035603567922:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:22.509118Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045c2/r3tmp/tmp3qJmwu/pdisk_1.dat 2025-06-30T09:17:22.580684Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:22.594289Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 28548, node 1 2025-06-30T09:17:22.609602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:22.609632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:22.611299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:22.847111Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:22.847129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:22.847131Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:22.847190Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3715 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:23.015646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:23.055283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-30T09:17:23.245333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-30T09:17:23.273612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropKesus, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp:186) 2025-06-30T09:17:23.279942Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-30T09:17:23.281247Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-30T09:17:24.125115Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669044033102139:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:24.125134Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045c2/r3tmp/tmpWFyamC/pdisk_1.dat 2025-06-30T09:17:24.170661Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30544, node 4 2025-06-30T09:17:24.225451Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:24.225479Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:24.226929Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:24.231822Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:24.231831Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:24.231833Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:24.231881Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65128 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:24.254575Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:24.268855Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-30T09:17:24.285442Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterKesus, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp:19) 2025-06-30T09:17:24.298712Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterKesus, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp:19) 2025-06-30T09:17:24.305092Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterKesus, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp:19) 2025-06-30T09:17:25.014731Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521669047525867015:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:25.014826Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045c2/r3tmp/tmpnI7DSA/pdisk_1.dat 2025-06-30T09:17:25.059306Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:25.077386Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 10443, node 7 2025-06-30T09:17:25.091960Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:25.091973Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:25.091976Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:25.092028Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29118 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:25.118097Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:25.118137Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:25.123513Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:25.127739Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:26.020325Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7521669050092257333:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:26.020514Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045c2/r3tmp/tmpjAqQMw/pdisk_1.dat 2025-06-30T09:17:26.052235Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:26.062943Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 18416, node 10 2025-06-30T09:17:26.074988Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:26.075007Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:26.075009Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:26.075066Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7307 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:26.122101Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:26.122138Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:26.124991Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:26.134503Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:26.148107Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-30T09:17:27.049412Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7521669054173959379:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:27.049441Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045c2/r3tmp/tmpAGzkPj/pdisk_1.dat 2025-06-30T09:17:27.070978Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:27.080007Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 19433, node 13 2025-06-30T09:17:27.087919Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:27.087936Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:27.087938Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:27.087999Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5000 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:27.149967Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:27.150024Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:27.151716Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:27.163902Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:27.267596Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) |79.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Struct [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minstep/unittest >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs [GOOD] Test command err: 2025-06-30T09:17:19.988831Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:17:19.988915Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:17:19.988930Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00446e/r3tmp/tmp0tYE7r/pdisk_1.dat 2025-06-30T09:17:20.142519Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:17:20.142977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:17:20.143870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:17:20.144083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:20.144342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:17:20.144724Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 1 HANDLE EvProposeTransaction marker# C0 2025-06-30T09:17:20.144736Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 1 step# 500 Status# 16 SEND to# [1:375:2369] Proxy marker# C1 2025-06-30T09:17:20.157866Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:20.157907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:20.158801Z node 1 :HIVE DEBUG: hive_impl.cpp:2279: HIVE#72057594037968897 Merged config: { } 2025-06-30T09:17:20.158847Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275038326126 != 1751275038326130 2025-06-30T09:17:20.203513Z node 1 :HIVE DEBUG: hive_impl.cpp:145: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [1:303:2341] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-06-30T09:17:20.203571Z node 1 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Execute 2025-06-30T09:17:20.203861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:20.203869Z node 1 :HIVE DEBUG: hive_impl.cpp:365: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-30T09:17:20.203873Z node 1 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-30T09:17:20.203878Z node 1 :HIVE DEBUG: hive_impl.cpp:365: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-30T09:17:20.203881Z node 1 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-30T09:17:20.204004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:20.204083Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-30T09:17:20.204088Z node 1 :HIVE DEBUG: hive_impl.cpp:226: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-30T09:17:20.204094Z node 1 :HIVE DEBUG: hive_impl.cpp:229: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-30T09:17:20.204098Z node 1 :HIVE DEBUG: hive_impl.cpp:306: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-30T09:17:20.204124Z node 1 :HIVE DEBUG: hive_impl.cpp:812: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 1 Location DataCenter: "1" Module: "1" Rack: "1" Unit: "1" 2025-06-30T09:17:20.214527Z node 1 :HIVE DEBUG: tx__register_node.cpp:96: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Complete 2025-06-30T09:17:20.214577Z node 1 :HIVE DEBUG: node_info.cpp:373: HIVE#72057594037968897 Node(1) Ping([1:303:2341]) 2025-06-30T09:17:20.214602Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-30T09:17:20.214831Z node 1 :HIVE DEBUG: hive_impl.cpp:741: HIVE#72057594037968897 THive::Handle::TEvSyncTablets 2025-06-30T09:17:20.214855Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:41: HIVE#72057594037968897 THive::TTxSyncTablets([1:303:2341])::Execute 2025-06-30T09:17:20.214864Z node 1 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-30T09:17:20.214883Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:130: HIVE#72057594037968897 THive::TTxSyncTablets([1:303:2341])::Complete 2025-06-30T09:17:20.214903Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-30T09:17:20.214907Z node 1 :HIVE DEBUG: hive_impl.cpp:226: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-30T09:17:20.214913Z node 1 :HIVE DEBUG: hive_impl.cpp:306: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-30T09:17:20.214916Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-30T09:17:20.214976Z node 1 :HIVE DEBUG: hive_impl.cpp:735: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 1: Status: 0 StartTime: 0 ResourceMaximum { Memory: 202797637632 } 2025-06-30T09:17:20.214986Z node 1 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(1)::Execute 2025-06-30T09:17:20.214995Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:20.215051Z node 1 :HIVE DEBUG: hive_impl.cpp:2795: HIVE#72057594037968897 AddRegisteredDataCentersNode(1, 1) 2025-06-30T09:17:20.215060Z node 1 :HIVE DEBUG: hive_impl.cpp:365: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-30T09:17:20.215062Z node 1 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-30T09:17:20.215088Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-30T09:17:20.215091Z node 1 :HIVE DEBUG: hive_impl.cpp:226: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-30T09:17:20.215094Z node 1 :HIVE DEBUG: hive_impl.cpp:229: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-30T09:17:20.215096Z node 1 :HIVE DEBUG: hive_impl.cpp:306: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-30T09:17:20.225635Z node 1 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(1)::Complete 2025-06-30T09:17:20.225674Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-30T09:17:20.289771Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 1 has been planned 2025-06-30T09:17:20.289826Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 1 for mediator 72057594046382081 tablet 72057594046644480 2025-06-30T09:17:20.289909Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 1000 in 0.500000s at 0.950000s 2025-06-30T09:17:20.290023Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 500, txid# 1 marker# C2 2025-06-30T09:17:20.290034Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 1 stepId# 500 Status# 17 SEND EvProposeTransactionStatus to# [1:375:2369] Proxy 2025-06-30T09:17:20.290764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-30T09:17:20.291654Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-30T09:17:20.291677Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-06-30T09:17:20.291682Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 acknowledged 2025-06-30T09:17:20.291687Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:6] persistent tx 1 acknowledged 2025-06-30T09:17:20.291723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-30T09:17:20.291933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2025-06-30T09:17:20.292075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 1, subscribers: 1 2025-06-30T09:17:20.292969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /Root/table-1, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:17:20.294760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:17:20.294775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:20.294935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/table-1 2025-06-30T09:17:20.295933Z node 1 :HIVE DEBUG: hive_impl.cpp:34: HIVE# ... r# C1 2025-06-30T09:17:28.323078Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 281474976715665 has been planned 2025-06-30T09:17:28.323114Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 2025-06-30T09:17:28.323119Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 2025-06-30T09:17:28.323184Z node 2 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 33500 in 0.500000s at 33.450000s 2025-06-30T09:17:28.323284Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 33000, txid# 281474976715665 marker# C2 2025-06-30T09:17:28.323294Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 281474976715665 stepId# 33000 Status# 17 SEND EvProposeTransactionStatus to# [2:374:2368] Proxy 2025-06-30T09:17:28.323447Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715665 at step 33000 at tablet 72075186224037889 { Transactions { TxId: 281474976715665 AckTo { RawX1: 0 RawX2: 0 } } Step: 33000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-06-30T09:17:28.323455Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:17:28.323483Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 33000, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-30T09:17:28.323568Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:17:28.323576Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:17:28.323584Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [33000:281474976715665] in PlanQueue unit at 72075186224037889 2025-06-30T09:17:28.323630Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 33000:281474976715665 keys extracted: 0 2025-06-30T09:17:28.323657Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:17:28.323692Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:17:28.323706Z node 2 :TX_DATASHARD INFO: drop_table_unit.cpp:72: Trying to DROP TABLE at 72075186224037889 2025-06-30T09:17:28.323789Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:17:28.324094Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 33000} 2025-06-30T09:17:28.324102Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:17:28.324182Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-30T09:17:28.324197Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-06-30T09:17:28.324202Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 removed=1 2025-06-30T09:17:28.324206Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 for mediator 72057594046382081 acknowledged 2025-06-30T09:17:28.324212Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 acknowledged 2025-06-30T09:17:28.324243Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:17:28.324256Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [33000 : 281474976715665] from 72075186224037889 at tablet 72075186224037889 send result to client [2:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:17:28.324267Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715665 state PreOffline TxInFly 0 2025-06-30T09:17:28.324277Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:17:28.324390Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 281474976715665, done: 0, blocked: 1 2025-06-30T09:17:28.324837Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715665 datashard 72075186224037889 state PreOffline 2025-06-30T09:17:28.324847Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-30T09:17:28.324949Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715665:0 2025-06-30T09:17:28.324967Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976715665, publications: 2, subscribers: 1 2025-06-30T09:17:28.325156Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715665, subscribers: 1 2025-06-30T09:17:28.325214Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:17:28.325669Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=ZmFmNTZiMTgtMTcwMTRlZGQtOGQzNTRlODAtMTkxYWM2ZDQ= 2025-06-30 09:17:28.325 INFO ydb-core-tx-datashard-ut_minstep(pid=66622, tid=0x00007FC39E118E00) [core exec] yql_execution.cpp:133: Completed async execution for node #42 2025-06-30T09:17:28.325691Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=ZmFmNTZiMTgtMTcwMTRlZGQtOGQzNTRlODAtMTkxYWM2ZDQ= 2025-06-30 09:17:28.325 INFO ydb-core-tx-datashard-ut_minstep(pid=66622, tid=0x00007FC39E118E00) [core exec] yql_execution.cpp:153: State is ExecutionComplete after apply async changes for node #42 2025-06-30T09:17:28.325702Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=ZmFmNTZiMTgtMTcwMTRlZGQtOGQzNTRlODAtMTkxYWM2ZDQ= 2025-06-30 09:17:28.325 INFO ydb-core-tx-datashard-ut_minstep(pid=66622, tid=0x00007FC39E118E00) [core exec] yql_execution.cpp:59: Begin, root #43 2025-06-30T09:17:28.325708Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=ZmFmNTZiMTgtMTcwMTRlZGQtOGQzNTRlODAtMTkxYWM2ZDQ= 2025-06-30 09:17:28.325 INFO ydb-core-tx-datashard-ut_minstep(pid=66622, tid=0x00007FC39E118E00) [core exec] yql_execution.cpp:72: Collect unused nodes for root #43, status: Ok 2025-06-30T09:17:28.325715Z node 2 :KQP_YQL TRACE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=ZmFmNTZiMTgtMTcwMTRlZGQtOGQzNTRlODAtMTkxYWM2ZDQ= 2025-06-30 09:17:28.325 TRACE ydb-core-tx-datashard-ut_minstep(pid=66622, tid=0x00007FC39E118E00) [core exec] yql_execution.cpp:387: {0}, callable #43 2025-06-30T09:17:28.325727Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=ZmFmNTZiMTgtMTcwMTRlZGQtOGQzNTRlODAtMTkxYWM2ZDQ= 2025-06-30 09:17:28.325 INFO ydb-core-tx-datashard-ut_minstep(pid=66622, tid=0x00007FC39E118E00) [core exec] yql_execution.cpp:577: Node #43 finished execution 2025-06-30T09:17:28.325746Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=ZmFmNTZiMTgtMTcwMTRlZGQtOGQzNTRlODAtMTkxYWM2ZDQ= 2025-06-30 09:17:28.325 INFO ydb-core-tx-datashard-ut_minstep(pid=66622, tid=0x00007FC39E118E00) [core exec] yql_execution.cpp:594: Node #43 created 0 trackable nodes: 2025-06-30T09:17:28.325752Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=ZmFmNTZiMTgtMTcwMTRlZGQtOGQzNTRlODAtMTkxYWM2ZDQ= 2025-06-30 09:17:28.325 INFO ydb-core-tx-datashard-ut_minstep(pid=66622, tid=0x00007FC39E118E00) [core exec] yql_execution.cpp:87: Finish, output #43, status: Ok 2025-06-30T09:17:28.325757Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=ZmFmNTZiMTgtMTcwMTRlZGQtOGQzNTRlODAtMTkxYWM2ZDQ= 2025-06-30 09:17:28.325 INFO ydb-core-tx-datashard-ut_minstep(pid=66622, tid=0x00007FC39E118E00) [core exec] yql_execution.cpp:93: Creating finalizing transformer, output #43 2025-06-30T09:17:28.325777Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=ZmFmNTZiMTgtMTcwMTRlZGQtOGQzNTRlODAtMTkxYWM2ZDQ= 2025-06-30 09:17:28.325 NOTE ydb-core-tx-datashard-ut_minstep(pid=66622, tid=0x00007FC39E118E00) [common provider] yql_provider_gateway.cpp:21:
: Info: Execution, code: 1060 2025-06-30T09:17:28.325783Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=ZmFmNTZiMTgtMTcwMTRlZGQtOGQzNTRlODAtMTkxYWM2ZDQ= 2025-06-30 09:17:28.325 NOTE ydb-core-tx-datashard-ut_minstep(pid=66622, tid=0x00007FC39E118E00) [common provider] yql_provider_gateway.cpp:21:
:1:12: Info: Executing DROP TABLE 2025-06-30T09:17:28.325788Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=ZmFmNTZiMTgtMTcwMTRlZGQtOGQzNTRlODAtMTkxYWM2ZDQ= 2025-06-30 09:17:28.325 NOTE ydb-core-tx-datashard-ut_minstep(pid=66622, tid=0x00007FC39E118E00) [common provider] yql_provider_gateway.cpp:21:
: Info: Success, code: 4 2025-06-30T09:17:28.339106Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-30T09:17:28.339173Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-06-30T09:17:28.339519Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-30T09:17:28.339695Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-30T09:17:28.339778Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:74: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186224037889 2025-06-30T09:17:28.339789Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:19: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute Tablet 72075186224037889 2025-06-30T09:17:28.339810Z node 2 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037889.Leader.1) VolatileState: Running -> Stopped (Node 2) 2025-06-30T09:17:28.339829Z node 2 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(DataShard.72075186224037889.Leader.1 gen 1) to node 2 2025-06-30T09:17:28.339845Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:67: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() result Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 >> YdbQueryService::TestCreateDropAttachSession [GOOD] >> YdbQueryService::TestCreateAttachAndDropAttachedSession >> TBSVWithReboots::CreateAssignAlterIsAllowed [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query [GOOD] >> ResultFormatter::Tuple [GOOD] >> ResultFormatter::Tagged [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_4_Table >> YdbTableBulkUpsert::AsyncIndexShouldFail [GOOD] >> YdbTableBulkUpsert::AsyncIndexShouldSucceed >> Describe::DescribePartitionPermissions [GOOD] >> LocalPartition::Restarts >> ClientStatsCollector::ExternalMetricRegistryByRawPtr [GOOD] >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Table >> TGRpcYdbTest::ExecutePreparedQuery [GOOD] >> TGRpcYdbTest::ExecuteQueryCache >> ResultFormatter::FormatEmptySchema [GOOD] >> ResultFormatter::FormatNonEmptySchema [GOOD] |79.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |79.0%| [LD] {RESULT} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |79.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Tagged [GOOD] >> YdbQueryService::TestCreateAttachAndDropAttachedSession [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::CreateAssignAlterIsAllowed [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:17:05.475580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:05.475608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:05.475617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:05.475622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:05.475635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:05.475640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:05.475650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:05.475668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:05.475780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:05.475874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:05.492446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:17:05.492476Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:05.492590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:17:05.495921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:05.497066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:05.497110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:05.499309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:05.499402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:05.499579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:05.499679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:05.500994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:05.501061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:05.501366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:05.501377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:05.501388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:05.501398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:05.501404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:05.501436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:17:05.505442Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:17:05.528739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:05.528840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:05.528912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:05.528922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:05.528974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:05.528990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:05.529915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:05.529964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:05.530052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:05.530074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:05.530081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:05.530088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:05.530666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:05.530679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:05.530687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:05.531066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:05.531076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:05.531083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:05.531094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:05.531893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:05.532401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:05.532445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:05.532677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:05.532709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... eration.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:17:29.709730Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:68: NBSVState::TConfigureParts operationId: 1003:0 ProgressState, at schemeshard72057594046678944 2025-06-30T09:17:29.710107Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1003:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 272761856 2025-06-30T09:17:29.710138Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1003, partId: 0, tablet: 72075186233409547 2025-06-30T09:17:29.710181Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409547, partId: 0 2025-06-30T09:17:29.710205Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: TxId: 1003 Origin: 72075186233409547 Status: OK 2025-06-30T09:17:29.710210Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:23: NBSVState::TConfigureParts operationId: 1003:0 HandleReply TEvSetConfigResult, at schemeshard: 72057594046678944 2025-06-30T09:17:29.710217Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 3 -> 128 2025-06-30T09:17:29.710626Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:17:29.710664Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:17:29.710672Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:192: NBSVState::TPropose operationId# 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:29.710683Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1003 ready parts: 1/1 2025-06-30T09:17:29.710727Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1003 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:29.711135Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1003:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1003 msg type: 269090816 2025-06-30T09:17:29.711166Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1003, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1003 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1003 at step: 5000004 2025-06-30T09:17:29.711248Z node 79 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:29.711270Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1003 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 339302418542 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:29.711280Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:141: NBSVState::TPropose operationId# 1003:0 HandleReply TEvOperationPlan, at schemeshard: 72057594046678944 2025-06-30T09:17:29.711323Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 128 -> 240 2025-06-30T09:17:29.711362Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 FAKE_COORDINATOR: Erasing txId 1003 2025-06-30T09:17:29.712105Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:29.712117Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:17:29.712170Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:29.712177Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [79:210:2210], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-30T09:17:29.712256Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:17:29.712266Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:17:29.712281Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:17:29.712288Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:17:29.712295Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:17:29.712300Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:17:29.712306Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: false 2025-06-30T09:17:29.712316Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:17:29.712323Z node 79 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:17:29.712329Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:17:29.712368Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-30T09:17:29.712376Z node 79 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 1, subscribers: 0 2025-06-30T09:17:29.712382Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-30T09:17:29.712529Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:17:29.712545Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:17:29.712550Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:17:29.712556Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-30T09:17:29.712561Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:17:29.712579Z node 79 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:17:29.713200Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 2025-06-30T09:17:29.713319Z node 79 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/BSVolume_4" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:17:29.713367Z node 79 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/BSVolume_4" took 59us result status StatusSuccess 2025-06-30T09:17:29.713485Z node 79 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/BSVolume_4" PathDescription { Self { Name: "BSVolume_4" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeBlockStoreVolume CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 BSVVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BlockStoreVolumeDescription { Name: "BSVolume_4" PathId: 3 VolumeConfig { BlockSize: 4096 Partitions { BlockCount: 32 } Partitions { BlockCount: 32 } Version: 2 DiskId: "foo" ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Partitions { PartitionId: 0 TabletId: 72075186233409546 } Partitions { PartitionId: 1 TabletId: 72075186233409548 } VolumeTabletId: 72075186233409547 AlterVersion: 2 MountToken: "Owner123" TokenVersion: 1 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> ResultFormatter::Utf8WithQuotes [GOOD] >> ResultFormatter::VariantStruct [GOOD] >> ResultFormatter::EmptyResultSet [GOOD] >> ResultFormatter::EmptyList [GOOD] >> ResultFormatter::EmptyTuple [GOOD] |79.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::FormatNonEmptySchema [GOOD] |79.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::VariantStruct [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> YdbQueryService::TestCreateAttachAndDropAttachedSession [GOOD] Test command err: 2025-06-30T09:17:23.611132Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669037115816183:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:23.611157Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045ba/r3tmp/tmpeiqy2y/pdisk_1.dat 2025-06-30T09:17:23.769743Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:23.782982Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 12843, node 1 2025-06-30T09:17:23.794758Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:23.794775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:23.794778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:23.794831Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13322 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:23.831887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:23.955204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:23.955239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:23.959387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:24.103833Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1578: Failed to parse session id: unknownSesson 2025-06-30T09:17:25.264220Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669046535905445:2155];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045ba/r3tmp/tmptVaJLm/pdisk_1.dat 2025-06-30T09:17:25.265848Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:17:25.285616Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:25.296439Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 17332, node 4 2025-06-30T09:17:25.306955Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:25.306969Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:25.306972Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:25.307025Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30888 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:25.362880Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:25.362921Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:25.364514Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:25.367166Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:26.485581Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521669052687543093:2189];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:26.485674Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045ba/r3tmp/tmp9MCm73/pdisk_1.dat 2025-06-30T09:17:26.555480Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 5665, node 7 2025-06-30T09:17:26.568392Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:26.572870Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:26.572883Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:26.572885Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:26.572937Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:17:26.584523Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:26.584554Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:26.586296Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24617 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:26.597286Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:28.231986Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7521669059576842896:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:28.232553Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045ba/r3tmp/tmpYpvj30/pdisk_1.dat 2025-06-30T09:17:28.260537Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10821, node 10 2025-06-30T09:17:28.303329Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 2025-06-30T09:17:28.339821Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:28.339851Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:28.342246Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:28.751188Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:28.751206Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:28.751208Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:28.751260Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9555 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:28.875479Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045ba/r3tmp/tmpvfGDqB/pdisk_1.dat 2025-06-30T09:17:29.558796Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7521669062557652260:2165];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:29.561016Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:17:29.582956Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12853, node 13 2025-06-30T09:17:29.622990Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:29.623004Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:29.623007Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:29.623064Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4442 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:29.660776Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:29.660810Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:29.665852Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:29.666414Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:29.675104Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:17:30.033836Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-30T09:17:30.034464Z node 13 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-30T09:17:30.037308Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: TraceId: "01jz0214q17rgvga2g81vefats", Request has 18444992798659.514323s seconds to be completed 2025-06-30T09:17:30.037777Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=13&id=ZDhlOWJlY2ItMjQ2NDc1ZDYtYmY2NDBiNDItMzNkZWFkOGY=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZDhlOWJlY2ItMjQ2NDc1ZDYtYmY2NDBiNDItMzNkZWFkOGY= 2025-06-30T09:17:30.037811Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jz0214q17rgvga2g81vefats", Created new session, sessionId: ydb://session/3?node_id=13&id=ZDhlOWJlY2ItMjQ2NDc1ZDYtYmY2NDBiNDItMzNkZWFkOGY=, workerId: [13:7521669066852620362:2291], database: , longSession: 1, local sessions count: 1 2025-06-30T09:17:30.037820Z node 13 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-30T09:17:30.037843Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=13&id=ZDhlOWJlY2ItMjQ2NDc1ZDYtYmY2NDBiNDItMzNkZWFkOGY=, ActorId: [13:7521669066852620362:2291], ActorState: unknown state, session actor bootstrapped 2025-06-30T09:17:30.037867Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 01jz0214q17rgvga2g81vefats 2025-06-30T09:17:30.037883Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-30T09:17:30.037889Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-30T09:17:30.037895Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-30T09:17:30.037906Z node 13 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-30T09:17:30.037929Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:17:30.037944Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:17:30.037954Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:17:30.037963Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:17:30.037967Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:17:30.041323Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:856: Received ping session request, has local session: ydb://session/3?node_id=13&id=ZDhlOWJlY2ItMjQ2NDc1ZDYtYmY2NDBiNDItMzNkZWFkOGY=, rpc ctrl: [13:7521669066852620364:2292], sameNode: 1, trace_id: 2025-06-30T09:17:30.041338Z node 13 :KQP_PROXY TRACE: kqp_proxy_service.cpp:878: Attach local session: [13:7521669066852620362:2291] to rpc: [13:7521669066852620364:2292] on same node 2025-06-30T09:17:30.043455Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2413: SessionId: ydb://session/3?node_id=13&id=ZDhlOWJlY2ItMjQ2NDc1ZDYtYmY2NDBiNDItMzNkZWFkOGY=, ActorId: [13:7521669066852620362:2291], ActorState: ReadyState, Session closed due to explicit close event 2025-06-30T09:17:30.043478Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=13&id=ZDhlOWJlY2ItMjQ2NDc1ZDYtYmY2NDBiNDItMzNkZWFkOGY=, ActorId: [13:7521669066852620362:2291], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:17:30.043482Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=13&id=ZDhlOWJlY2ItMjQ2NDc1ZDYtYmY2NDBiNDItMzNkZWFkOGY=, ActorId: [13:7521669066852620362:2291], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-30T09:17:30.043488Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2644: SessionId: ydb://session/3?node_id=13&id=ZDhlOWJlY2ItMjQ2NDc1ZDYtYmY2NDBiNDItMzNkZWFkOGY=, ActorId: [13:7521669066852620362:2291], ActorState: unknown state, Cleanup temp tables: 0 2025-06-30T09:17:30.043523Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2735: SessionId: ydb://session/3?node_id=13&id=ZDhlOWJlY2ItMjQ2NDc1ZDYtYmY2NDBiNDItMzNkZWFkOGY=, ActorId: [13:7521669066852620362:2291], ActorState: unknown state, Session actor destroyed 2025-06-30T09:17:30.043683Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=13&id=ZDhlOWJlY2ItMjQ2NDc1ZDYtYmY2NDBiNDItMzNkZWFkOGY=, workerId: [13:7521669066852620362:2291], local sessions count: 0 2025-06-30T09:17:30.045620Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:902: Received ping session request, request_id: 3, sender: [13:7521669066852620367:2294], trace_id: 2025-06-30T09:17:30.045676Z node 13 :KQP_PROXY NOTICE: kqp_proxy_service.cpp:1585: Session not found: ydb://session/3?node_id=13&id=ZDhlOWJlY2ItMjQ2NDc1ZDYtYmY2NDBiNDItMzNkZWFkOGY= 2025-06-30T09:17:30.045708Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 3, sender: [13:7521669066852620367:2294], selfId: [13:7521669062557652211:2119], source: [13:7521669062557652211:2119] >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,0,1000000,0] >> KqpOlapJson::QuotedFilterVariants[10,false,0,10,1000000,0] >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,10,1000000,0] |79.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::EmptyTuple [GOOD] >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr [GOOD] >> TGRpcYdbTest::ExecuteQueryCache [GOOD] >> TxUsage::WriteToTopic_Demo_45_Query [GOOD] |79.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |79.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow >> KqpOlapJson::RestoreJsonArrayVariants[2,true,0,0,0,0] >> TxUsage::WriteToTopic_Demo_46_Table >> YdbYqlClient::RetryOperationSync [GOOD] >> YdbYqlClient::RetryOperationLimitedDuration ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::ExecuteQueryCache [GOOD] Test command err: 2025-06-30T09:17:23.766279Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669037129691011:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:23.766326Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045a5/r3tmp/tmpGRcNWy/pdisk_1.dat 2025-06-30T09:17:23.978644Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:23.986719Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 12503, node 1 2025-06-30T09:17:24.041097Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:24.041107Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:24.041109Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:24.041152Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11830 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:24.103424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:24.152288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:24.152332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:24.154368Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:25.285905Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669048100250987:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:25.285950Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045a5/r3tmp/tmpJaklLH/pdisk_1.dat 2025-06-30T09:17:25.317779Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:25.325487Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 6844, node 4 2025-06-30T09:17:25.336908Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:25.336923Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:25.336926Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:25.336974Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21327 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:25.386237Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:25.386269Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:25.387864Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:25.395874Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:25.667031Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669048100251898:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:25.667033Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669048100251890:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:25.667051Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:25.667841Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:25.675081Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521669048100251904:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:17:25.776344Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521669048100251979:2655] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:26.711373Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521669049020779153:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:26.711399Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045a5/r3tmp/tmpU5FeF9/pdisk_1.dat 2025-06-30T09:17:26.775631Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 19212, node 7 2025-06-30T09:17:26.787293Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:26.811685Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:26.811720Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:26.813346Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:26.818019Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:26.818035Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:26.818038Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:26.818092Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20185 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:26.865924 ... ard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:27.714412Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:27.770638Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz0212qx25185azs18yjxyk5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MzJlYWZkOTAtMmE0N2Y0YzMtODY3ZWMyNmMtMjVjOTQzYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:17:27.907345Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz0212yw480qpgc2qbb0amb0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MzJlYWZkOTAtMmE0N2Y0YzMtODY3ZWMyNmMtMjVjOTQzYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:17:29.030185Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7521669065212065636:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:29.030288Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045a5/r3tmp/tmpAwSnUE/pdisk_1.dat 2025-06-30T09:17:29.228372Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:29.303098Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 27165, node 10 2025-06-30T09:17:29.325984Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:29.326025Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:29.331496Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:29.339586Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:29.339598Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:29.339600Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:29.339658Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31861 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:29.677513Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:29.747528Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521669065212066384:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:29.747560Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:29.747684Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521669065212066396:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:29.748760Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:29.779006Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7521669065212066398:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:17:29.867351Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7521669065212066467:2659] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:30.545955Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7521669069305681072:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:30.546517Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045a5/r3tmp/tmpTBXXwc/pdisk_1.dat 2025-06-30T09:17:30.601097Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28829, node 13 2025-06-30T09:17:30.622573Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:30.622587Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:30.622590Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:30.622650Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:17:30.640274Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:30.640301Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:26319 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:17:30.642286Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:30.656745Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:30.955146Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7521669069305681890:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:30.955146Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7521669069305681901:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:30.955169Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:30.955908Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:30.959725Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7521669069305681904:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:17:31.037548Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7521669073600649273:2652] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> IncrementalRestoreScan::Empty ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr [GOOD] Test command err: 2025-06-30T09:17:22.724374Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669032930235415:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:22.724435Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045be/r3tmp/tmpYT3Zk3/pdisk_1.dat 2025-06-30T09:17:22.829893Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:22.839831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:22.839856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:22.841762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:22.848815Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 18218, node 1 2025-06-30T09:17:22.859910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:22.859924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:22.859926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:22.859971Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22352 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:22.907964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:23.496819Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669037225203552:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:23.496845Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:23.496941Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669037225203564:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:23.497680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:23.509661Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669037225203566:2300], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:17:23.574052Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669037225203637:2656] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:23.711896Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=1&id=M2E0YmEwNTctNWFmMGI0NTQtMTkwMjQ2ODYtZWIwYTgxNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Database not set, use /Root 2025-06-30T09:17:23.726021Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:24.865599Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669041522366557:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:24.865688Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045be/r3tmp/tmpbl68xI/pdisk_1.dat 2025-06-30T09:17:25.000267Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 61856, node 4 TClient is connected to server localhost:12949 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:25.039278Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:25.043684Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:17:25.063262Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:25.063802Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:25.063812Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:25.063814Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:25.063883Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:17:25.186105Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:25.186144Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:25.195867Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:25.373931Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669045817334589:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:25.373947Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669045817334597:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:25.373956Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:25.374640Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:25.378599Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521669045817334603:2300], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-30T09:17:25.440387Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521669045817334678:2654] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:26.036484Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521669049069841590:2075];send_to=[0: ... ] TxId: 281474976715678. Ctx: { TraceId: 01jz0211y5bdpczseng6dzhrmd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTUxMGM2ZTctNzA2Y2MzZmMtNmUyMTM2YWYtYjNkMWVmZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `Root/names`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-30T09:17:26.854623Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=7&id=NTUxMGM2ZTctNzA2Y2MzZmMtNmUyMTM2YWYtYjNkMWVmZWM=, ActorId: [7:7521669049069842780:2320], ActorState: ExecuteState, TraceId: 01jz0211y5bdpczseng6dzhrmd, Create QueryResponse for error on request, msg: 2025-06-30T09:17:28.871094Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7521669059607650922:2155];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:28.874079Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045be/r3tmp/tmpuXC2qj/pdisk_1.dat 2025-06-30T09:17:29.114082Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1622, node 10 2025-06-30T09:17:29.179997Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:29.180030Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:29.190217Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:29.334975Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:29.334988Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:29.334991Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:29.335056Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18341 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:29.488707Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:29.520722Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521669063902619035:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:29.520744Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:29.520957Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521669063902619046:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:29.521925Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:29.529128Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7521669063902619049:2300], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:17:29.627377Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7521669063902619126:2654] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:30.412844Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7521669067507636306:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:30.413027Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045be/r3tmp/tmpPJTYnh/pdisk_1.dat 2025-06-30T09:17:30.445468Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:30.452408Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 17645, node 13 2025-06-30T09:17:30.482450Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:30.482461Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:30.482463Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:30.482503Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:17:30.513176Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:30.513205Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:12219 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:17:30.515231Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:30.523632Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:30.539136Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:17:30.795595Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7521669067507637208:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:30.795615Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7521669067507637220:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:30.795623Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:30.796405Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:30.800607Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7521669067507637222:2300], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:17:30.892935Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7521669067507637293:2650] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> YdbTableBulkUpsert::AsyncIndexShouldSucceed [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,0,1000000,0] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,0,1000000,0.5] >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,10,1000000,0] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,10,1000000,0.5] >> IncrementalRestoreScan::ChangeSenderEmpty >> KqpOlapJson::QuotedFilterVariants[10,false,0,10,1000000,0] [GOOD] >> KqpOlapJson::QuotedFilterVariants[10,false,0,10,1000000,0.5] >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary >> TSchemeShardTopicSplitMergeTest::MargePartitions >> TSchemeShardTopicSplitMergeTest::SplitTwoPartitions >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,10,1000000,0.5] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,1000,0,0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::AsyncIndexShouldSucceed [GOOD] Test command err: 2025-06-30T09:17:23.395873Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669038910744225:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:23.395902Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045de/r3tmp/tmpFOwhBA/pdisk_1.dat 2025-06-30T09:17:23.482682Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:23.497002Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 13499, node 1 2025-06-30T09:17:23.497999Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:23.498049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:23.499507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:23.511280Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:23.511295Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:23.511297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:23.511353Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20318 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:23.547374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:23.862767Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669038910745132:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:23.862795Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:23.957884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:24.022721Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669043205712598:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:24.022766Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:24.022775Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669043205712603:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:24.023396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:24.030165Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669043205712605:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:17:24.090211Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669043205712676:2778] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:24.099129Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz020yqpc70be8p56estp7rn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDlkYjNjNjAtYmQyOTU3OGUtYmFiM2FjNTctYzZiMzE4Y2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:17:24.116612Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz020yqpc70be8p56estp7rn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDY0OTNlMzgtYjdmMWM1ZDYtMTczZDZiMGItYjU3NTdiZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:17:24.124845Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz020yqpc70be8p56estp7rn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGI1YzgxODQtZjQ0OWMyZjktYTUwYmExZjgtMTZlMzFmZjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:17:24.142622Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jz020yqpc70be8p56estp7rn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDIzMjc3NTAtZDRjMjM3NDEtNjM2YTI1ZjgtODk1ZmQ1ZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:17:25.142303Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669048407862496:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:25.142327Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045de/r3tmp/tmpCHNZr5/pdisk_1.dat 2025-06-30T09:17:25.174311Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29635, node 4 2025-06-30T09:17:25.211311Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:25.211329Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:25.211340Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:25.211387Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3128 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:25.241884Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:25.241928Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:25.243584Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:25.250306Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:25.590041Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669048407863241:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:25.590101Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:25.600982Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__ope ... 30T09:17:26.571415Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:26.572409Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6484, node 7 2025-06-30T09:17:26.623467Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:26.623481Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:26.623483Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:26.623530Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7456 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:26.735315Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:26.924196Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Decimal(22,9) value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Date value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Datetime value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Timestamp value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Interval value CLIENT_INTERNAL_ERROR
: Error: GRpc error: (13): Unable to parse request
: Error: Grpc error response on endpoint localhost:6484 BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Yson value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Json value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid JSON for JsonDocument provided: TAPE_ERROR: The JSON document has an improper structure: missing or superfluous commas, braces, missing keys, etc. This is a fatal and unrecoverable error. BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid DyNumber string representation 2025-06-30T09:17:28.810604Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045de/r3tmp/tmptDhPpS/pdisk_1.dat 2025-06-30T09:17:29.007493Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11803, node 10 2025-06-30T09:17:29.075341Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 2025-06-30T09:17:29.114960Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:29.114992Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:29.127135Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:29.342242Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:29.342261Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:29.342263Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:29.342325Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1360 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:29.435096Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:29.509826Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/ui8' Only async-indexed tables are supported by BulkUpsert
: Error: Bulk upsert to table '/Root/ui8/Value_index/indexImplTable' unknown table 2025-06-30T09:17:30.228719Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7521669067838243431:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:30.229610Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045de/r3tmp/tmpJyBpvN/pdisk_1.dat 2025-06-30T09:17:30.249631Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:30.261382Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 26840, node 13 2025-06-30T09:17:30.293447Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:30.293467Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:30.293470Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:30.293524Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23841 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:30.327624Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:30.327669Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:30.329510Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:30.333675Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:30.720994Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:31.229048Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup;
: Error: Bulk upsert to table '/Root/ui8/Value_index/indexImplTable' unknown table >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Query [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,0,1000000,0.5] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,10,0,0] >> KqpOlapJson::QuotedFilterVariants[10,false,0,10,1000000,0.5] [GOOD] >> KqpOlapJson::QuotedFilterVariants[10,false,0,1000,0,0] >> TSchemeShardServerLess::StorageBilling [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Table >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary [GOOD] >> TSchemeShardTopicSplitMergeTest::MargePartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::MargeNotAdjacentRangePartitions >> IncrementalRestoreScan::Empty [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitTwoPartitions [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,1000,0,0] [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitInactivePartition >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,1000,0,0.5] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::StorageBilling [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:16:50.438726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:50.438778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:50.438785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:50.438792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:50.438799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:50.438804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:50.438816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:50.438835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:50.438953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:50.439024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:50.454575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:16:50.454602Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:50.457771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:50.457816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:50.457849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:50.459540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:50.459601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:50.459700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:50.459753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:50.460371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:50.460412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:50.460647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:50.460656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:50.460678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:50.460688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:50.460692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:50.460706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.461767Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:16:50.476572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:50.476638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.476699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:50.476707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:50.476753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:50.476766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:50.477386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:50.477420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:50.477465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.477474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:50.477478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:50.477483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:50.477853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.477862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:50.477869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:50.478176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.478183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:50.478187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:50.478193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:50.478688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:50.479037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:50.479072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:50.479257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:50.479278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:50.479285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:50.479340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:16:50.479346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:50.479370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:50.479380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:16:50.479726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:50.479734Z node 1 :FLAT_TX_SCHEMESHARD ... e 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:668:2578], at schemeshard: 72075186233409549, txId: 107, path id: 2 2025-06-30T09:17:34.399250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72075186233409549 2025-06-30T09:17:34.399258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 107:0 ProgressState at tablet: 72075186233409549 2025-06-30T09:17:34.399284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 107:0, at schemeshard: 72075186233409549 2025-06-30T09:17:34.399289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 107:0, datashard: 72075186233409552, at schemeshard: 72075186233409549 2025-06-30T09:17:34.399295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 107:0 129 -> 240 2025-06-30T09:17:34.399455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409549, cookie: 107 2025-06-30T09:17:34.399467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409549, cookie: 107 2025-06-30T09:17:34.399471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409549, txId: 107 2025-06-30T09:17:34.399476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409549, txId: 107, pathId: [OwnerId: 72075186233409549, LocalPathId: 1], version: 9 2025-06-30T09:17:34.399481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409549, LocalPathId: 1] was 5 2025-06-30T09:17:34.399646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409549, cookie: 107 2025-06-30T09:17:34.399656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409549, cookie: 107 2025-06-30T09:17:34.399662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409549, txId: 107 2025-06-30T09:17:34.399666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409549, txId: 107, pathId: [OwnerId: 72075186233409549, LocalPathId: 2], version: 18446744073709551615 2025-06-30T09:17:34.399670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 4 2025-06-30T09:17:34.399679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 107, ready parts: 0/1, is published: true 2025-06-30T09:17:34.400793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72075186233409549 2025-06-30T09:17:34.400803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 107:0 ProgressState, at schemeshard: 72075186233409549 2025-06-30T09:17:34.400872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-06-30T09:17:34.400902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#107:0 progress is 1/1 2025-06-30T09:17:34.400907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-30T09:17:34.400912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#107:0 progress is 1/1 2025-06-30T09:17:34.400916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-30T09:17:34.400920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 107, ready parts: 1/1, is published: true 2025-06-30T09:17:34.400932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:810:2689] message: TxId: 107 2025-06-30T09:17:34.400937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-30T09:17:34.400942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 107:0 2025-06-30T09:17:34.400947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 107:0 2025-06-30T09:17:34.400965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 2 2025-06-30T09:17:34.401149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409549, cookie: 107 2025-06-30T09:17:34.401369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409549, cookie: 107 2025-06-30T09:17:34.401569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-30T09:17:34.401578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:2203:4045] TestWaitNotification: OK eventTxId 107 2025-06-30T09:17:34.416347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5640: Handle TEvStateChanged, at schemeshard: 72075186233409549, message: Source { RawX1: 782 RawX2: 4294969965 } TabletId: 72075186233409552 State: 4 2025-06-30T09:17:34.416382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409552, state: Offline, at schemeshard: 72075186233409549 2025-06-30T09:17:34.416761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72075186233409549:4 hive 72057594037968897 at ss 72075186233409549 2025-06-30T09:17:34.417096Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72075186233409549 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409552 2025-06-30T09:17:34.417194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72075186233409549 ShardLocalIdx: 4, at schemeshard: 72075186233409549 2025-06-30T09:17:34.417251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 1 2025-06-30T09:17:34.418180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72075186233409549 2025-06-30T09:17:34.418192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72075186233409549, LocalPathId: 2], at schemeshard: 72075186233409549 2025-06-30T09:17:34.418207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72075186233409549, LocalPathId: 1] was 4 2025-06-30T09:17:34.418722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72075186233409549:4 2025-06-30T09:17:34.418749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72075186233409549:4 tabletId 72075186233409552 2025-06-30T09:17:34.418819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72075186233409549 2025-06-30T09:17:34.571125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6723: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-30T09:17:34.571173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-30T09:17:34.571194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-30T09:17:34.571217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6723: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-30T09:17:34.571226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-30T09:17:34.571234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-30T09:17:34.571246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6723: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-30T09:17:34.571254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-30T09:17:34.571263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-30T09:17:34.653480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:34.653598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:191: TTxServerlessStorageBilling: make a bill, record: '{"usage":{"start":1600452180,"quantity":59,"finish":1600452239,"type":"delta","unit":"byte*second"},"tags":{"ydb_size":0},"id":"72057594046678944-3-1600452180-1600452239-0","cloud_id":"CLOUD_ID_VAL","source_wt":1600452240,"source_id":"sless-docapi-ydb-storage","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.v1","folder_id":"FOLDER_ID_VAL","version":"1.0.0"} ', schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 2020-09-18T18:04:00.028000Z, LastBillTime: 2020-09-18T18:02:00.000000Z, lastBilled: 2020-09-18T18:02:00.000000Z--2020-09-18T18:02:59.000000Z, toBill: 2020-09-18T18:03:00.000000Z--2020-09-18T18:03:59.000000Z, next retry at: 2020-09-18T18:05:00.000000Z 2025-06-30T09:17:34.654907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete grabMeteringMessage has happened 2025-06-30T09:17:34.654964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:338: tests -- TFakeMetering got TEvMetering::TEvWriteMeteringJson quantity: 59, 59 unit: "byte*second", "byte*second" type: "delta", "delta" >> TSchemeShardTopicSplitMergeTest::MargeNotAdjacentRangePartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::MargeUnorderedPartitions >> KqpOlapJson::RestoreJsonArrayVariants[2,true,0,0,0,0] [GOOD] >> KqpOlapJson::RestoreJsonArrayVariants[2,true,0,0,0,0.5] >> TSchemeShardTopicSplitMergeTest::SplitWithManyPartition >> KqpOlapJson::QuotedFilterVariants[10,false,0,1000,0,0] [GOOD] >> KqpOlapJson::QuotedFilterVariants[10,false,0,1000,0,0.5] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::Empty [GOOD] Test command err: 2025-06-30T09:17:34.852807Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:17:34.852889Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:17:34.852913Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042af/r3tmp/tmpV4lTrT/pdisk_1.dat 2025-06-30T09:17:34.945289Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:17:34.945544Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:178: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:561:2486] Exhausted 2025-06-30T09:17:34.945561Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:127: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:561:2486] Handle TEvIncrementalRestoreScan::TEvFinished NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvFinished 2025-06-30T09:17:34.945567Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:191: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:561:2486] Finish Done >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,10,0,0] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,10,0,0.5] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:17:34.852594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:34.852619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:34.852623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:34.852628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:34.852636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:34.852639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:34.852647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:34.852661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:34.852760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:34.852817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:34.868471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:34.868503Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:34.872467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:34.872555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:34.872609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:34.874203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:34.874282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:34.874411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:34.874487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:34.875401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:34.875457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:34.875844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:34.875869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:34.875904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:34.875914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:34.875921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:34.875942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.877384Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:17:34.898607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:34.898691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.898770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:34.898778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:34.898822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:34.898836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:34.903173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:34.903234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:34.903289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.903302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:34.903308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:34.903314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:34.906433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.906457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:34.906468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:34.907070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.907084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.907090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:34.907099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:34.907835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:34.908358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:34.908412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:34.908635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:34.908666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:34.908675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:34.908745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:34.908755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:34.908795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:17:34.908807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:17:34.909373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:34.909387Z node 1 :FLAT_TX_SCHEMESHARD ... satisfy waiter [1:640:2564] TestWaitNotification: OK eventTxId 105 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\001" } TestModificationResults wait txId: 106 2025-06-30T09:17:35.011037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\001" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:35.011090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.011164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '01' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', at schemeshard: 72057594046678944 2025-06-30T09:17:35.011727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Split boundary less or equals FromBound of partition: \'01\' <= \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\'" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:35.011777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '01' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-30T09:17:35.011850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-30T09:17:35.011860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-30T09:17:35.011950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-30T09:17:35.011974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-30T09:17:35.011981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:647:2571] TestWaitNotification: OK eventTxId 106 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "UUUUUUUUUUUUUUUT" } TestModificationResults wait txId: 107 2025-06-30T09:17:35.012732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "UUUUUUUUUUUUUUUT" } } } TxId: 107 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:35.012783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 107:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.012847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 107:1, propose status:StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', at schemeshard: 72057594046678944 2025-06-30T09:17:35.013524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 107, response: Status: StatusInvalidParameter Reason: "Split boundary less or equals FromBound of partition: \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\' <= \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\'" TxId: 107 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:35.013589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 107, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 2025-06-30T09:17:35.013667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-06-30T09:17:35.013675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-06-30T09:17:35.013761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-06-30T09:17:35.013784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-30T09:17:35.013790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:654:2578] TestWaitNotification: OK eventTxId 107 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\255" } TestModificationResults wait txId: 108 2025-06-30T09:17:35.014615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\255" } } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:35.014666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 108:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.014726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 108:1, propose status:StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AD' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), at schemeshard: 72057594046678944 2025-06-30T09:17:35.015678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 108, response: Status: StatusInvalidParameter Reason: "Split boundary greate or equals ToBound of partition: \'AD\' >= \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' (FromBound is \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\')" TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:35.015729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AD' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-30T09:17:35.015805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-30T09:17:35.015813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-30T09:17:35.015885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-30T09:17:35.015901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-30T09:17:35.015907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:661:2585] TestWaitNotification: OK eventTxId 108 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } TestModificationResults wait txId: 109 2025-06-30T09:17:35.016681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } } TxId: 109 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:35.016727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 109:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.016780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 109:1, propose status:StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), at schemeshard: 72057594046678944 2025-06-30T09:17:35.017319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 109, response: Status: StatusInvalidParameter Reason: "Split boundary greate or equals ToBound of partition: \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' >= \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' (FromBound is \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\')" TxId: 109 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:35.017362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 109, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 109, wait until txId: 109 TestWaitNotification wait txId: 109 2025-06-30T09:17:35.017434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 109: send EvNotifyTxCompletion 2025-06-30T09:17:35.017441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 109 2025-06-30T09:17:35.017515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 109, at schemeshard: 72057594046678944 2025-06-30T09:17:35.017535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 109: got EvNotifyTxCompletionResult 2025-06-30T09:17:35.017541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 109: satisfy waiter [1:668:2592] TestWaitNotification: OK eventTxId 109 >> TSchemeShardTopicSplitMergeTest::SplitInactivePartition [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::MargeNotAdjacentRangePartitions [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:17:34.822528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:34.822562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:34.822568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:34.822574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:34.822588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:34.822592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:34.822602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:34.822620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:34.822772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:34.822862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:34.839654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:34.839683Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:34.843907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:34.843998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:34.844066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:34.846025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:34.846111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:34.846247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:34.846338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:34.847049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:34.847108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:34.847423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:34.847437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:34.847469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:34.847482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:34.847488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:34.847511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.849311Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:17:34.873232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:34.873315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.873382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:34.873391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:34.873435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:34.873450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:34.875417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:34.875482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:34.875546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.875561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:34.875568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:34.875576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:34.876173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.876188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:34.876194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:34.876577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.876588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.876596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:34.876605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:34.877425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:34.877858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:34.877902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:34.878153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:34.878183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:34.878192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:34.878254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:34.878262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:34.878300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:17:34.878314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:17:34.878728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:34.878757Z node 1 :FLAT_TX_SCHEMESHARD ... -30T09:17:35.425744Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:753: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-30T09:17:35.432523Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409548, partId: 0 2025-06-30T09:17:35.432594Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-06-30T09:17:35.432606Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:623: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-06-30T09:17:35.432620Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:264: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.432624Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:628: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-30T09:17:35.432678Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 104:0 128 -> 240 2025-06-30T09:17:35.432721Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:17:35.432732Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:17:35.433541Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.433663Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:35.433672Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:17:35.433723Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:17:35.433766Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:35.433773Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:210:2210], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-30T09:17:35.433781Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:210:2210], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-30T09:17:35.433792Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.433800Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-30T09:17:35.433818Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:17:35.433821Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:17:35.433827Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:17:35.433829Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:17:35.433833Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-30T09:17:35.433838Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:17:35.433843Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-30T09:17:35.433847Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:0 2025-06-30T09:17:35.433874Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:17:35.433880Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 2, subscribers: 1 2025-06-30T09:17:35.433883Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-30T09:17:35.433886Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-30T09:17:35.434170Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:17:35.434188Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:17:35.434194Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:17:35.434201Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-30T09:17:35.434206Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-30T09:17:35.434384Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:17:35.434394Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:17:35.434398Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:17:35.434401Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:17:35.434406Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:17:35.434415Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 1 2025-06-30T09:17:35.434422Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [2:411:2377] 2025-06-30T09:17:35.435633Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:17:35.435678Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:17:35.435691Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-30T09:17:35.435697Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [2:548:2483] TestWaitNotification: OK eventTxId 104 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Merge { Partition: 0 AdjacentPartition: 2 } TestModificationResults wait txId: 105 2025-06-30T09:17:35.438496Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Merge { Partition: 0 AdjacentPartition: 2 } } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:35.438574Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 105:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.438637Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 105:1, propose status:StatusInvalidParameter, reason: You cannot merge non-contiguous partitions, at schemeshard: 72057594046678944 2025-06-30T09:17:35.439257Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 105, response: Status: StatusInvalidParameter Reason: "You cannot merge non-contiguous partitions" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:35.439314Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: You cannot merge non-contiguous partitions, operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-30T09:17:35.439390Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-30T09:17:35.439399Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-30T09:17:35.439500Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-30T09:17:35.439521Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-30T09:17:35.439527Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:642:2566] TestWaitNotification: OK eventTxId 105 >> TSchemeShardTopicSplitMergeTest::MargeUnorderedPartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::MargePartitions2 >> TSchemeShardTopicSplitMergeTest::SplitWithManyPartition [GOOD] >> IncrementalRestoreScan::ChangeSenderEmpty [GOOD] >> TSchemeShardTopicSplitMergeTest::CreateTopicWithOnePartition >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,1000,0,0.5] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,1000,100,0] >> KqpOlapJson::RestoreJsonArrayVariants[2,true,0,0,0,0.5] [GOOD] >> KqpOlapJson::RestoreJsonArrayVariants[2,true,0,0,100,0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitInactivePartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:17:34.958409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:34.958438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:34.958445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:34.958451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:34.958464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:34.958469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:34.958480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:34.958496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:34.958620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:34.958706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:34.974442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:34.974465Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:34.979433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:34.979539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:34.979606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:34.982290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:34.982379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:34.982543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:34.982618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:34.983506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:34.983562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:34.983902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:34.983919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:34.983949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:34.983959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:34.983966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:34.983989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:17:34.985876Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:17:35.006901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:35.006976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.007037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:35.007045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:35.007094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:35.007110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:35.007738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:35.007782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:35.007824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.007834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:35.007840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:35.007846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:35.008254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.008265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:35.008271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:35.008651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.008661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.008667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:35.008675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:35.009366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:35.009814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:35.009860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:35.010092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:35.010119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:35.010128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:35.010195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:35.010202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:35.010233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:17:35.010249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:17:35.010792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:35.010803Z node 1 :FLAT_TX_SCHEMESHARD ... AT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:647: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionAttachResult CollectPQConfigChanged: false 2025-06-30T09:17:35.799265Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:753: NPQState::TPropose operationId# 105:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-30T09:17:35.799534Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-30T09:17:35.799601Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-30T09:17:35.799609Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-30T09:17:35.799688Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 105, at schemeshard: 72057594046678944 2025-06-30T09:17:35.799693Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 105, ready parts: 0/1, is published: true 2025-06-30T09:17:35.799697Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 105, at schemeshard: 72057594046678944 2025-06-30T09:17:35.847896Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 200, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:35.847941Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 105 AckTo { RawX1: 0 RawX2: 0 } } Step: 200 MediatorID: 72075186233409547 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:35.847958Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:661: NPQState::TPropose operationId# 105:0 HandleReply TEvOperationPlan, step: 200, at tablet: 72057594046678944 2025-06-30T09:17:35.847969Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:753: NPQState::TPropose operationId# 105:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-30T09:17:35.860490Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409548, partId: 0 2025-06-30T09:17:35.860536Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-06-30T09:17:35.860550Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:623: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-06-30T09:17:35.860564Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:264: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 105:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.860569Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:628: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-30T09:17:35.860625Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 105:0 128 -> 240 2025-06-30T09:17:35.860671Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:17:35.861409Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.861477Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:35.861488Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:17:35.861572Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:35.861580Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:210:2210], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-06-30T09:17:35.861595Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.861604Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 105:0 ProgressState 2025-06-30T09:17:35.861622Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 1/1 2025-06-30T09:17:35.861631Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-30T09:17:35.861638Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 1/1 2025-06-30T09:17:35.861641Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-30T09:17:35.861647Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: false 2025-06-30T09:17:35.861654Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-30T09:17:35.861659Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:0 2025-06-30T09:17:35.861665Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 105:0 2025-06-30T09:17:35.861703Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:17:35.861710Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 105, publications: 1, subscribers: 1 2025-06-30T09:17:35.861715Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-30T09:17:35.862021Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:17:35.862044Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:17:35.862052Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-30T09:17:35.862059Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-30T09:17:35.862066Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:17:35.862085Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 1 2025-06-30T09:17:35.862092Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [2:411:2377] 2025-06-30T09:17:35.867996Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:17:35.868059Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-30T09:17:35.868067Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:671:2594] TestWaitNotification: OK eventTxId 105 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "W" } TestModificationResults wait txId: 106 2025-06-30T09:17:35.873025Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "W" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:35.873091Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.873137Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Invalid partition status: 2, at schemeshard: 72057594046678944 2025-06-30T09:17:35.873668Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Invalid partition status: 2" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:35.873724Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Invalid partition status: 2, operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-30T09:17:35.873799Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-30T09:17:35.873809Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-30T09:17:35.873898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-30T09:17:35.873919Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-30T09:17:35.873924Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:763:2673] TestWaitNotification: OK eventTxId 106 |79.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |79.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |79.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |79.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |79.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |79.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream >> TSchemeShardTopicSplitMergeTest::MargePartitions2 [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,10,0,0.5] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,10,100,0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithManyPartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:17:35.781449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:35.781475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:35.781481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:35.781487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:35.781500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:35.781505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:35.781516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:35.781532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:35.781659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:35.781731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:35.802432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:35.802460Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:35.807058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:35.807127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:35.807177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:35.812037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:35.812133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:35.812286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:35.812356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:35.813229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:35.813286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:35.813628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:35.813641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:35.813668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:35.813680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:35.813687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:35.813708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.815179Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:17:35.837635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:35.837736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.837817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:35.837826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:35.837874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:35.837889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:35.838916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:35.838969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:35.839034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.839046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:35.839052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:35.839058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:35.839572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.839586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:35.839594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:35.839938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.839947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.839952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:35.839959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:35.840694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:35.841168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:35.841217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:35.841434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:35.841466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:35.841476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:35.841560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:35.841570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:35.841605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:17:35.841615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:17:35.842338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:35.842350Z node 1 :FLAT_TX_SCHEMESHARD ... DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 5 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:36.009201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:759:2058] recipient: [1:106:2139] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:762:2058] recipient: [1:15:2062] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:763:2058] recipient: [1:761:2669] Leader for TabletID 72057594046678944 is [1:764:2670] sender: [1:765:2058] recipient: [1:761:2669] 2025-06-30T09:17:36.017815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:36.017849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:36.017856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:36.017862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:36.017868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:36.017873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:36.017882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:36.017895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:36.018018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:36.018093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:36.019504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:36.020019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:36.020076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:36.020118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:36.020125Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:36.020194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:36.020302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:17:36.020330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: USER_1, child name: Topic1, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:17:36.020342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:17:36.020458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-30T09:17:36.020503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 4, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:17:36.020541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:17:36.020545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-30T09:17:36.020549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:17:36.020565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 14, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:17:36.020713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4706: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4765: KMeansTreeCluster records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.020999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4854: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.021005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4881: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.021013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4908: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.022522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:36.023378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:36.023398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:36.023418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:36.023427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:36.023433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:36.023450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 >> TSchemeShardTopicSplitMergeTest::SplitWithOnePartition >> KqpOlapJson::QuotedFilterVariants[10,false,0,1000,0,0.5] [GOOD] >> KqpOlapJson::QuotedFilterVariants[10,false,0,1000,100,0] >> TSchemeShardTopicSplitMergeTest::CreateTopicWithOnePartition [GOOD] >> TSchemeShardTopicSplitMergeTest::DisableSplitMerge ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::ChangeSenderEmpty [GOOD] Test command err: 2025-06-30T09:17:35.182468Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:17:35.182620Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:17:35.182661Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004297/r3tmp/tmp6QXJJ6/pdisk_1.dat 2025-06-30T09:17:35.302848Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:17:35.318413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-30T09:17:35.318489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:17:35.318560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-30T09:17:35.318568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-30T09:17:35.318627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:17:35.318651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:35.318938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-30T09:17:35.318994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:17:35.319049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:17:35.319056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-30T09:17:35.319061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:35.319065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:35.319144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:17:35.319149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:17:35.319153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:35.319195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:17:35.319199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:17:35.319203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-30T09:17:35.319208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:35.319740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:35.319842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:35.319891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:17:35.320142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-30T09:17:35.320152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:17:35.320158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-30T09:17:35.336395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-06-30T09:17:35.336429Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:35.336572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage 2025-06-30T09:17:35.336590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:35.337730Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275053759748 != 1751275053759752 2025-06-30T09:17:35.379692Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:62:2109] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:17:35.380077Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:17:35.380247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:35.380280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:35.391087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:35.463362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-30T09:17:35.463436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-30T09:17:35.463457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-30T09:17:35.463512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:35.463522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-30T09:17:35.463563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-30T09:17:35.463592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-30T09:17:35.463759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-30T09:17:35.463767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 1, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-30T09:17:35.463818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-30T09:17:35.463823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:537:2465], at schemeshard: 72057594046644480, txId: 1, path id: 1 2025-06-30T09:17:35.463934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-30T09:17:35.463943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 1:0 ProgressState 2025-06-30T09:17:35.463954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:17:35.463959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:17:35.463963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:17:35.463965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:17:35.463971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-30T09:17:35.463975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:17:35.463979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-30T09:17:35.463982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1:0 2025-06-30T09:17:35.463990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-30T09:17:35.463995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publicatio ... r: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046644480 2025-06-30T09:17:35.983684Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:62:2109] Handle TEvNavigate describe path /Root/IncrBackupTable 2025-06-30T09:17:35.984781Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:785:2637] HANDLE EvNavigateScheme /Root/IncrBackupTable 2025-06-30T09:17:35.984896Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:785:2637] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-30T09:17:35.984909Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:785:2637] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/IncrBackupTable" 2025-06-30T09:17:35.985118Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:785:2637] Handle TEvDescribeSchemeResult Forward to# [1:557:2483] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/IncrBackupTable" PathDescription { Self { Name: "IncrBackupTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1500 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-30T09:17:35.985341Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:65: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:787:2639] HandleUserTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/IncrBackupTable TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:17:35.985372Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:131: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:787:2639] HandleTargetTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:17:35.985408Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:787:2639] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-30T09:17:35.985427Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:176: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:787:2639] Handle NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvNoMoreData ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::MargePartitions2 [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:17:35.707000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:35.707028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:35.707035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:35.707041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:35.707053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:35.707058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:35.707070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:35.707087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:35.707222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:35.707297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:35.724343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:35.724366Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:35.727759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:35.727822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:35.727866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:35.731794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:35.731932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:35.732084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:35.732156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:35.732836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:35.732884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:35.733183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:35.733195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:35.733222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:35.733234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:35.733241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:35.733261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.734647Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:17:35.757985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:35.758082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.758147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:35.758153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:35.758193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:35.758203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:35.759195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:35.759240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:35.759302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.759314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:35.759321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:35.759327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:35.759794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.759825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:35.759834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:35.760199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.760209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:35.760216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:35.760224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:35.760831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:35.761880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:35.761924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:35.762104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:35.762127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:35.762134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:35.762184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:35.762190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:35.762220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:17:35.762230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:17:35.762654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:35.762664Z node 1 :FLAT_TX_SCHEMESHARD ... 6-30T09:17:36.418849Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:623: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-06-30T09:17:36.418866Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:264: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 105:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.418872Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:628: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-30T09:17:36.418935Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 105:0 128 -> 240 2025-06-30T09:17:36.418984Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:17:36.419577Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.419700Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:36.419709Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:17:36.419792Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:36.419799Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:212:2212], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-06-30T09:17:36.419895Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.419905Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 105:0 ProgressState 2025-06-30T09:17:36.419923Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 1/1 2025-06-30T09:17:36.419928Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-30T09:17:36.419935Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 1/1 2025-06-30T09:17:36.419939Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-30T09:17:36.419944Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: false 2025-06-30T09:17:36.419952Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-30T09:17:36.419958Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:0 2025-06-30T09:17:36.419964Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 105:0 2025-06-30T09:17:36.420000Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:17:36.420007Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 105, publications: 1, subscribers: 1 2025-06-30T09:17:36.420013Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-30T09:17:36.420152Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:17:36.420166Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:17:36.420174Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-30T09:17:36.420179Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-30T09:17:36.420185Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:17:36.420200Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 1 2025-06-30T09:17:36.420205Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [2:412:2377] 2025-06-30T09:17:36.421236Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:17:36.421262Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-30T09:17:36.421269Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:680:2600] TestWaitNotification: OK eventTxId 105 2025-06-30T09:17:36.422566Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:17:36.422633Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 84us result status StatusSuccess 2025-06-30T09:17:36.422834Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 5 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" } Status: Inactive ChildPartitionIds: 4 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } Status: Inactive ChildPartitionIds: 4 } Partitions { PartitionId: 3 TabletId: 72075186233409548 KeyRange { FromBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } Status: Active } Partitions { PartitionId: 4 TabletId: 72075186233409548 KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } Status: Active ParentPartitionIds: 1 ParentPartitionIds: 2 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 5 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 5 NextPartitionId: 5 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } } Partitions { PartitionId: 3 GroupId: 4 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } } Partitions { PartitionId: 4 GroupId: 5 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 1 ParentPartitionIds: 2 KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 5 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |79.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |79.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |79.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots >> TSchemeShardTopicSplitMergeTest::SplitWithOnePartition [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartNo_Query [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,1000,100,0] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,1000,100,0.5] >> TS3WrapperTests::GetObject >> KqpOlapJson::RestoreJsonArrayVariants[2,true,0,0,100,0] [GOOD] >> KqpOlapJson::RestoreJsonArrayVariants[2,true,0,0,100,0.5] >> TS3WrapperTests::GetObject [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Table >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Table [GOOD] >> KqpOlapJson::QuotedFilterVariants[10,false,0,1000,100,0] [GOOD] >> KqpOlapJson::QuotedFilterVariants[10,false,0,1000,100,0.5] >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,10,100,0] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,10,100,0.5] >> TS3WrapperTests::AbortUnknownUpload ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithOnePartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:17:36.854788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:36.854822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:36.854829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:36.854834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:36.854847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:36.854851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:36.854861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:36.854878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:36.855004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:36.855082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:36.870559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:36.870590Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:36.874693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:36.874804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:36.874862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:36.876507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:36.876583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:36.876711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:36.876834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:36.877481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:36.877526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:36.877809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:36.877822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:36.877848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:36.877856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:36.877862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:36.877881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.879420Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:17:36.901432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:36.901534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.901612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:36.901621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:36.901674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:36.901692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:36.902684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:36.902732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:36.902818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.902830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:36.902836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:36.902843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:36.903374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.903389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:36.903395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:36.903798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.903811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.903818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:36.903826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:36.904563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:36.905420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:36.905469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:36.905647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:36.905682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:36.905689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:36.905741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:36.905747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:36.905777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:17:36.905787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:17:36.906212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:36.906220Z node 1 :FLAT_TX_SCHEMESHARD ... Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:37.084912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:37.084918Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:37.084951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:37.085055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:17:37.085080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: USER_1, child name: Topic1, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:17:37.085090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:17:37.085204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-30T09:17:37.085254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 4, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:17:37.085291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:17:37.085295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-30T09:17:37.085299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:17:37.085312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 14, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:17:37.085454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4706: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4765: KMeansTreeCluster records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4854: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4881: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.085759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4908: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-30T09:17:37.087077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:37.087486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:37.087501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:37.087647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:37.087658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:37.087667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:37.088659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:746:2658] sender: [1:806:2058] recipient: [1:15:2062] 2025-06-30T09:17:37.150193Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:17:37.150291Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 123us result status StatusSuccess 2025-06-30T09:17:37.150492Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "\177" } Status: Active ParentPartitionIds: 0 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\177" } Status: Active ParentPartitionIds: 0 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { ToBound: "\177" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { FromBound: "\177" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::GetObject [GOOD] Test command err: 2025-06-30T09:17:37.352055Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# F06133D1-9712-4867-BE95-1B1027DF234F, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:27873 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: F3BA639C-6F82-4EA5-905B-E58C88242B02 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2025-06-30T09:17:37.353722Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# F06133D1-9712-4867-BE95-1B1027DF234F, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-30T09:17:37.353836Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# A4CEA4E8-B4F3-4C8A-AB86-75856E2361CD, request# GetObject { Bucket: TEST Key: key Range: bytes=0-3 } REQUEST: GET /TEST/key HTTP/1.1 HEADERS: Host: localhost:27873 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: E04F50E3-F7AF-49FF-B64B-55632E45B53F amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-3 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key / 4 2025-06-30T09:17:37.354457Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# A4CEA4E8-B4F3-4C8A-AB86-75856E2361CD, response# GetObjectResult { } >> TS3WrapperTests::AbortUnknownUpload [GOOD] >> TS3WrapperTests::HeadObject >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Table >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,1000,100,0.5] [GOOD] >> TBSVWithReboots::SimultaneousCreateDropNbs [GOOD] |79.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/wrappers/ut/unittest >> KqpOlapJson::RestoreJsonArrayVariants[2,true,0,0,100,0.5] [GOOD] >> KqpOlapJson::RestoreJsonArrayVariants[2,false,1024,1000,1000000,0] >> KqpScanArrowFormat::SingleKey >> TS3WrapperTests::HeadObject [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::AbortUnknownUpload [GOOD] Test command err: 2025-06-30T09:17:37.782674Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 1DD41BA1-7A1E-42C6-9C9E-DC242E81F97B, request# AbortMultipartUpload { Bucket: TEST Key: key UploadId: uploadId } REQUEST: DELETE /TEST/key?uploadId=uploadId HTTP/1.1 HEADERS: Host: localhost:4698 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 2B2F91D4-4E24-4529-908C-5C9DA48EC3B7 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 6 / /TEST/key / uploadId=uploadId 2025-06-30T09:17:37.784792Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 1DD41BA1-7A1E-42C6-9C9E-DC242E81F97B, response# >> KqpOlapJson::QuotedFilterVariants[10,false,0,1000,100,0.5] [GOOD] >> TSchemeShardTopicSplitMergeTest::DisableSplitMerge [GOOD] >> TS3WrapperTests::CopyPartUpload >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,10,100,0.5] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,10,1000000,0.5] |79.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/wrappers/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::RestoreFirstLevelVariants[10,false,1024,1000,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 3682, MsgBus: 2282 2025-06-30T09:17:31.386498Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669071930904462:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:31.386676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003f36/r3tmp/tmp7GfMEa/pdisk_1.dat 2025-06-30T09:17:31.527883Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:31.528212Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:31.528236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:31.530846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3682, node 1 2025-06-30T09:17:31.546992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:31.547008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:31.547012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:31.547074Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2282 TClient is connected to server localhost:2282 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:31.594265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 10); 2025-06-30T09:17:31.797087Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669071930905052:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:31.797110Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:31.832160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:17:31.858545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521669071930905193:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:17:31.858594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669071930905184:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:17:31.858610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521669071930905193:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:17:31.858632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669071930905184:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:17:31.858688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521669071930905193:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:17:31.858691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669071930905184:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:17:31.858723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521669071930905193:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:17:31.858724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669071930905184:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:17:31.858767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669071930905184:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:17:31.858769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521669071930905193:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:17:31.858797Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669071930905184:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:17:31.858797Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521669071930905193:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:17:31.858830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669071930905184:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:17:31.858834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521669071930905193:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:17:31.858860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521669071930905193:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:17:31.858863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669071930905184:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:17:31.858901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521669071930905193:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:17:31.858908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669071930905184:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:17:31.858937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521669071930905193:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:17:31.858938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669071930905184:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:17:31.858970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521669071930905193:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:17:31.858983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669071930905184:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:17:31.858998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521669071930905193:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:17:31.859012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669071930905184:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:17:31.864750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669071930905207:2302];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:17:31.864788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669071930905207:2302];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:17:31.864857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:752166907193090520 ... eration type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:17:37.797631Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:37.797686Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:37.797724Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:37.797819Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:37.797855Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:37.797893Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:37.797967Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:37.797991Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:37.798079Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:37.798174Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `SCAN_FIRST_LEVEL_ONLY`=`true`, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`false`, `COLUMNS_LIMIT`=`1024`, `SPARSED_DETECTOR_KFF`=`1000`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:17:37.807393Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521669096365630396:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:37.807436Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:37.811077Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:17:37.814066Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:37.814078Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:37.814220Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:37.814247Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:37.814331Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:37.814390Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:37.814440Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:37.814550Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:37.814680Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:37.814819Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1", "d" : null, "e.v" : {"c" : 1, "e" : {"c.a" : 2}}}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3", "d" : "d3", "e" : ["a", {"v" : ["c", 5]}]}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}')) 2025-06-30T09:17:37.819998Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521669096365630463:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:37.820052Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:37.820094Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521669096365630468:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:37.821188Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:37.826682Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521669096365630470:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:17:37.908161Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521669096365630521:2675] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:37.930785Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:17:37.930794Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:17:37.930880Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521669096365630106:2303];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=17;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037894,72075186224037897;receive=72075186224037889; 2025-06-30T09:17:37.930885Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:17:37.930896Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521669096365630106:2303];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=18;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037894,72075186224037897;receive=72075186224037889; 2025-06-30T09:17:37.930915Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521669096365630106:2303];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=20;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037894;receive=72075186224037897; 2025-06-30T09:17:37.930928Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521669096365630106:2303];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=21;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037894;receive=72075186224037897; 2025-06-30T09:17:37.930988Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":\"{\\\"c\\\":1,\\\"e\\\":{\\\"c.a\\\":2}}\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\",\"e\":\"[\\\"a\\\",{\\\"v\\\":[\\\"c\\\",5]}]\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] OUTPUT: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":\"{\\\"c\\\":1,\\\"e\\\":{\\\"c.a\\\":2}}\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\",\"e\":\"[\\\"a\\\",{\\\"v\\\":[\\\"c\\\",5]}]\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] INDEX:0/0/0 HEADER:0/0/0 >> TS3WrapperTests::CopyPartUpload [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::HeadObject [GOOD] Test command err: 2025-06-30T09:17:38.229388Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 6D7D4EEE-1A35-440B-A2A0-12C2F7C43D6C, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:10524 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B6F80662-5834-4A72-977D-908D5488A912 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2025-06-30T09:17:38.231473Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 6D7D4EEE-1A35-440B-A2A0-12C2F7C43D6C, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-30T09:17:38.231647Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# B205F620-F40B-457D-A6D4-DBDE63374096, request# HeadObject { Bucket: TEST Key: key } REQUEST: HEAD /TEST/key HTTP/1.1 HEADERS: Host: localhost:10524 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: CF9BF2AB-DD25-4A51-869C-C02653D50E29 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key / 4 2025-06-30T09:17:38.232445Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# B205F620-F40B-457D-A6D4-DBDE63374096, response# HeadObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc ContentLength: 4 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::DisableSplitMerge [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:17:36.425987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:36.426037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:36.426043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:36.426050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:36.426062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:36.426068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:36.426079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:36.426095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:36.426233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:36.426315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:36.442187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:36.442219Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:36.445920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:36.446016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:36.446070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:36.447828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:36.447911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:36.448046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:36.448118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:36.448780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:36.448828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:36.449115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:36.449128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:36.449159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:36.449168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:36.449174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:36.449196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.450663Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:17:36.474827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:36.474921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.474998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:36.475007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:36.475057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:36.475072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:36.476118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:36.476165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:36.476229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.476239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:36.476246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:36.476252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:36.476712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.476725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:36.476731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:36.477058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.477067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:36.477074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:36.477082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:36.477836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:36.478261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:36.478305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:36.478513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:36.478540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:36.478548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:36.478608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:36.478615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:36.478650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:17:36.478662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:17:36.479061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:36.479070Z node 1 :FLAT_TX_SCHEMESHARD ... rd__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-30T09:17:37.722795Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-30T09:17:37.722801Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:17:37.722819Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-30T09:17:37.723578Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-30T09:17:37.724796Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-30T09:17:37.724806Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-30T09:17:37.724874Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-30T09:17:37.724890Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-30T09:17:37.724894Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:757:2669] TestWaitNotification: OK eventTxId 105 2025-06-30T09:17:38.297975Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:38.298117Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 3 took 161us result status StatusSuccess 2025-06-30T09:17:38.298365Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "\010" } Status: Active ParentPartitionIds: 0 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\010" } Status: Active ParentPartitionIds: 0 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { ToBound: "\010" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { FromBound: "\010" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:38.371789Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:17:38.371928Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 163us result status StatusSuccess 2025-06-30T09:17:38.372151Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "\010" } Status: Active ParentPartitionIds: 0 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\010" } Status: Active ParentPartitionIds: 0 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { ToBound: "\010" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { FromBound: "\010" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } PartitionStrategy { PartitionStrategyType: DISABLED } } TestModificationResults wait txId: 106 2025-06-30T09:17:38.373035Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } PartitionStrategy { PartitionStrategyType: DISABLED } } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:38.373102Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-06-30T09:17:38.373142Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Can`t disable auto partitioning., at schemeshard: 72057594046678944 2025-06-30T09:17:38.375270Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Can`t disable auto partitioning." TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:38.375369Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Can`t disable auto partitioning., operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-30T09:17:38.375508Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-30T09:17:38.375518Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-30T09:17:38.375630Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-30T09:17:38.375660Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-30T09:17:38.375667Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:772:2683] TestWaitNotification: OK eventTxId 106 >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Query [GOOD] |79.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |79.1%| [LD] {RESULT} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |79.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::QuotedFilterVariants[10,false,0,1000,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 16339, MsgBus: 61417 2025-06-30T09:17:31.240206Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669070380792389:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:31.240246Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003dac/r3tmp/tmpd8xkLA/pdisk_1.dat 2025-06-30T09:17:31.361831Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:31.362066Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669070380792370:2080] 1751275051240079 != 1751275051240082 TServer::EnableGrpc on GrpcPort 16339, node 1 2025-06-30T09:17:31.374298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:31.374313Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:31.374315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:31.374360Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61417 2025-06-30T09:17:31.419248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:31.419272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:31.420380Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61417 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:31.451954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:31.459175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 10); 2025-06-30T09:17:31.794274Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669070380792990:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:31.794312Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:31.844200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:17:31.865273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669070380793126:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:17:31.865332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669070380793126:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:17:31.865385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669070380793126:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:17:31.865413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669070380793126:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:17:31.865434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669070380793126:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:17:31.865468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669070380793126:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:17:31.865499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669070380793126:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:17:31.865517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669070380793126:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:17:31.865549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669070380793126:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:17:31.865576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669070380793126:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:17:31.865597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669070380793126:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:17:31.865619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669070380793126:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:17:31.870097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669070380793153:2301];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:17:31.870128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669070380793153:2301];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:17:31.870179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669070380793153:2301];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:17:31.870215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669070380793153:2301];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:17:31.870239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669070380793153:2301];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:17:31.870262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669070380793153:2301];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:17:31.870289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669070380793153:2301];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:17:31.870303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521669070380793132:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:17:31.870315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669070380793153:2301];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:17:31.870329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669070380793153:2301];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:17:31.870345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521669070380793132:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:17:31.870347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669070380793153:2301];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:17:31.870360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7521669070380793153:2301];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T0 ... : /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:38.211376Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:38.214675Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:17:38.217769Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:38.217901Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:38.218041Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:38.218169Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:38.218417Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:38.218487Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:38.218579Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:38.218631Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:38.218711Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:38.218714Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`false`, `COLUMNS_LIMIT`=`0`, `SPARSED_DETECTOR_KFF`=`1000`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:17:38.226298Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521669101132362322:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:38.226323Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:38.229629Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:17:38.232510Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:38.232627Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:38.232733Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:38.232867Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:38.232927Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:38.233140Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:38.233150Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:38.233328Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:38.233354Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:38.233474Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a.b.c" : "a1", "b.c.d" : "b1", "c.d.e" : "c1"}')), (2u, JsonDocument('{"a.b.c" : "a2"}')), (3u, JsonDocument('{"b.c.d" : "b3", "d.e.f" : "d3"}')), (4u, JsonDocument('{"b.c.d" : "b4asdsasdaa", "a.b.c" : "a4"}')) 2025-06-30T09:17:38.238546Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521669101132362389:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:38.238572Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:38.238577Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521669101132362394:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:38.239533Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:38.247200Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521669101132362396:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:17:38.311874Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521669101132362447:2676] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:38.336827Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:17:38.336827Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:17:38.336931Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[6:7521669101132361915:2295];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037889;local_tx_no=18;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037893; 2025-06-30T09:17:38.336941Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:17:38.336946Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[6:7521669101132361915:2295];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037889;local_tx_no=19;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037893; 2025-06-30T09:17:38.336958Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[6:7521669101132361915:2295];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037889;local_tx_no=20;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037894; 2025-06-30T09:17:38.336973Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[6:7521669101132361915:2295];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037889;local_tx_no=21;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037894; 2025-06-30T09:17:38.337049Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"a.b.c\"") = "a2" ORDER BY Col1; COMPARE: [[2u;["{\"a.b.c\":\"a2\"}"]]] OUTPUT: [[2u;["{\"a.b.c\":\"a2\"}"]]] INDEX:4/0/0 HEADER:0/0/0 >> KqpScanArrowFormat::AggregateCountStar >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::SimultaneousCreateDropNbs [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:17:07.670715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:07.670776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:07.670785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:07.670792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:07.670806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:07.670812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:07.670825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:07.670845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:07.670976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:07.671071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:07.686085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:17:07.686115Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:07.686236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:17:07.690304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:07.691430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:07.691472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:07.693075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:07.693124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:07.693236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:07.693291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:07.694105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:07.694155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:07.694379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:07.694388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:07.694395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:07.694401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:07.694406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:07.694430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:17:07.696082Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:17:07.717977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:07.718145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:07.718226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:07.718236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:07.718286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:07.718322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:07.719279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:07.719330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:07.719401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:07.719421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:07.719429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:07.719435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:07.719993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:07.720007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:07.720014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:07.720422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:07.720433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:07.720440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:07.720449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:07.721397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:07.721841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:07.721880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:07.722125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:07.722154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... t reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:17:38.092718Z node 93 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-30T09:17:38.092778Z node 93 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-30T09:17:38.092830Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-30T09:17:38.092854Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 Forgetting tablet 72075186233409548 2025-06-30T09:17:38.093146Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-30T09:17:38.093186Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409549 2025-06-30T09:17:38.093380Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:17:38.093389Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:17:38.093403Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:17:38.093514Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:17:38.093524Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:17:38.093556Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:17:38.093599Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:17:38.093673Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:17:38.094105Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-30T09:17:38.094120Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-30T09:17:38.094146Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-30T09:17:38.094151Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-30T09:17:38.094792Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:17:38.094811Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-30T09:17:38.094834Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-30T09:17:38.094846Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-30T09:17:38.094894Z node 93 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-30T09:17:38.094914Z node 93 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:17:38.094927Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:17:38.094934Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:17:38.094954Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:17:38.096240Z node 93 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1003 2025-06-30T09:17:38.096343Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:17:38.096354Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 TestWaitNotification wait txId: 1004 2025-06-30T09:17:38.096374Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-30T09:17:38.096378Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-30T09:17:38.096481Z node 93 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:17:38.096507Z node 93 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-30T09:17:38.096517Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:17:38.096524Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [93:533:2489] 2025-06-30T09:17:38.096542Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:17:38.096546Z node 93 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [93:533:2489] TestWaitNotification: OK eventTxId 1003 TestWaitNotification: OK eventTxId 1004 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted 2025-06-30T09:17:38.096617Z node 93 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-30T09:17:38.096634Z node 93 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-30T09:17:38.096640Z node 93 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 2025-06-30T09:17:38.096739Z node 93 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:17:38.096789Z node 93 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/USER_0" took 73us result status StatusPathDoesNotExist 2025-06-30T09:17:38.096840Z node 93 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/DirA/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/DirA\' (id: [OwnerId: 72057594046678944, LocalPathId: 2]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/DirA/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/DirA" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:17:38.096906Z node 93 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:17:38.096935Z node 93 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 32us result status StatusSuccess 2025-06-30T09:17:38.097025Z node 93 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpOlapJson::RestoreJsonArrayVariants[2,false,1024,1000,1000000,0] [GOOD] >> KqpOlapJson::RestoreJsonArrayVariants[2,false,1024,1000,1000000,0.5] >> KqpScanArrowFormat::AllTypesColumns ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::CopyPartUpload [GOOD] Test command err: 2025-06-30T09:17:38.575068Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# B99856B0-5B56-471A-A41C-857A5DCCFCAB, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:1088 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7EF800B2-21DB-466E-9E59-4A1BFC4BEFD9 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2025-06-30T09:17:38.576945Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# B99856B0-5B56-471A-A41C-857A5DCCFCAB, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-30T09:17:38.577115Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 6465C853-17D6-46D6-8CE7-D5FD79EF8F6C, request# CreateMultipartUpload { Bucket: TEST Key: key1 } REQUEST: POST /TEST/key1?uploads HTTP/1.1 HEADERS: Host: localhost:1088 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C28B3C43-2041-4FB7-88DC-F5791BD5BEF2 amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-storage-class: STANDARD S3_MOCK::HttpServeAction: 4 / /TEST/key1 / uploads= 2025-06-30T09:17:38.577829Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 6465C853-17D6-46D6-8CE7-D5FD79EF8F6C, response# CreateMultipartUploadResult { Bucket: Key: TEST/key1 UploadId: 1 } 2025-06-30T09:17:38.577931Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 5E170DC2-9AC9-4BD5-99F0-1DC25DAE846F, request# UploadPartCopy { Bucket: TEST Key: key1 UploadId: 1 PartNumber: 1 } REQUEST: PUT /TEST/key1?partNumber=1&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:1088 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C0536248-92E8-446B-BEA8-5F907E1BB2EB amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-copy-source: /TEST/key x-amz-copy-source-range: bytes=1-2 S3_MOCK::HttpServeWrite: /TEST/key1 / partNumber=1&uploadId=1 / 0 2025-06-30T09:17:38.578591Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 5E170DC2-9AC9-4BD5-99F0-1DC25DAE846F, response# UploadPartCopyResult { } 2025-06-30T09:17:38.578673Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# B922865F-C188-4CC1-B323-165154674286, request# CompleteMultipartUpload { Bucket: TEST Key: key1 UploadId: 1 MultipartUpload: { Parts: [afc7e8a98f75755e513d9d5ead888e1d] } } REQUEST: POST /TEST/key1?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:1088 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 8B86234E-78F5-42EF-8A38-D123687E995B amz-sdk-request: attempt=1 content-length: 235 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /TEST/key1 / uploadId=1 2025-06-30T09:17:38.579420Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# B922865F-C188-4CC1-B323-165154674286, response# CompleteMultipartUploadResult { Bucket: Key: TEST/key1 ETag: afc7e8a98f75755e513d9d5ead888e1d } 2025-06-30T09:17:38.579507Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 4A0A7AE5-BC26-4C34-B3E0-FC403AECDA4B, request# GetObject { Bucket: TEST Key: key1 Range: bytes=0-1 } REQUEST: GET /TEST/key1 HTTP/1.1 HEADERS: Host: localhost:1088 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 20C0510A-E73B-411C-B3F8-8D3F8E195A67 amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-1 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key1 / 2 2025-06-30T09:17:38.580105Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 4A0A7AE5-BC26-4C34-B3E0-FC403AECDA4B, response# GetObjectResult { } >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,10,1000000,0.5] [GOOD] >> KqpScanArrowFormat::SingleKey [GOOD] >> KqpScanArrowFormat::JoinWithParams >> TxUsage::WriteToTopic_Demo_46_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_4_Table [GOOD] >> TxUsage::WriteToTopic_Demo_46_Query >> KqpOlapJson::RestoreJsonArrayVariants[2,false,1024,1000,1000000,0.5] [GOOD] >> KqpScanArrowInChanels::AllTypesColumns >> TxUsage::Sinks_Oltp_WriteToTopic_4_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::DoubleFilterReduceScopeVariants[2,true,0,10,1000000,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 2532, MsgBus: 3584 2025-06-30T09:17:31.348269Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669073471085315:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:31.348294Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003baa/r3tmp/tmpBUjBxc/pdisk_1.dat 2025-06-30T09:17:31.404688Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2532, node 1 2025-06-30T09:17:31.421715Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:31.421732Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:31.421734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:31.421789Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:17:31.450455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:31.450485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:31.453710Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3584 TClient is connected to server localhost:3584 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:31.567761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, Col3 UTF8, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2); 2025-06-30T09:17:31.794195Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669073471085897:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:31.794235Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:31.826241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:17:31.837348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669073471085968:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:17:31.837429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669073471085968:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:17:31.837473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669073471085968:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:17:31.837494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669073471085968:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:17:31.837510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669073471085968:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:17:31.837534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669073471085968:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:17:31.837553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669073471085968:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:17:31.837586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669073471085968:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:17:31.837613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669073471085968:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:17:31.837627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669073471085968:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:17:31.837647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669073471085968:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:17:31.837667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669073471085968:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:17:31.838177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:17:31.838191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:17:31.838202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:17:31.838205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:17:31.838219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:17:31.838222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:17:31.838230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:17:31.838233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:17:31.838238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:17:31.838242Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:17:31.838259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:17:31.838262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:17:31.838276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:17:31.838292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:17:31.838306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:17:31.838311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:17:31.838319Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:17:31.838326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdat ... act.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:17:39.025557Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:17:39.025575Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:17:39.025581Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:17:39.025594Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:17:39.025601Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:17:39.025609Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:17:39.025611Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:17:39.025617Z node 7 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:17:39.025621Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:17:39.025624Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:17:39.025669Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:17:39.025676Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:17:39.025685Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:17:39.025688Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:17:39.064583Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521669105354319273:2295];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=125924126963168;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275059064;max=18446744073709551615;plan=0;src=[7:7521669101059351625:2149];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:17:39.064601Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[7:7521669105354319268:2294];ev=NActors::IEventHandle;tablet_id=72075186224037889;tx_id=281474976715658;this=125924100809024;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275059064;max=18446744073709551615;plan=0;src=[7:7521669101059351625:2149];cookie=22:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:17:39.066599Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:17:39.066609Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:17:39.068852Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:17:39.068933Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:17:39.073393Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521669105354319368:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.073422Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.077184Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:17:39.079197Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:39.079213Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`true`, `COLUMNS_LIMIT`=`0`, `SPARSED_DETECTOR_KFF`=`10`, `MEM_LIMIT_CHUNK`=`1000000`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:17:39.084214Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521669105354319402:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.084246Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.087453Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:17:39.093455Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:39.093493Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2, Col3) VALUES(1u, JsonDocument('{"a" : "value_a", "b" : "b1", "c" : "c1"}'), "value1"), (2u, JsonDocument('{"a" : "value_a"}'), "value1"), (3u, JsonDocument('{"a" : "value_a", "b" : "value_b"}'), "value2"), (4u, JsonDocument('{"b" : "value_b", "a" : "a4"}'), "value4") 2025-06-30T09:17:39.099107Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521669105354319437:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.099139Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.099256Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521669105354319442:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.100323Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:39.109268Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521669105354319444:2324], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:17:39.169201Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521669105354319495:2454] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:39.202343Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:17:39.202555Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "value_a" AND Col3 = "value2" ORDER BY Col1; COMPARE: [[3u;["{\"a\":\"value_a\",\"b\":\"value_b\"}"];["value2"]]] OUTPUT: [[3u;["{\"a\":\"value_a\",\"b\":\"value_b\"}"];["value2"]]] INDEX:4/0/0 HEADER:0/0/0 >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Table [GOOD] >> KqpScanArrowFormat::JoinWithParams [GOOD] >> KqpScanArrowInChanels::AggregateCountStar >> KqpScanArrowInChanels::AggregateNoColumn ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::RestoreJsonArrayVariants[2,false,1024,1000,1000000,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 26139, MsgBus: 27766 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003f20/r3tmp/tmp5f3xea/pdisk_1.dat 2025-06-30T09:17:33.048100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:17:33.183790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:33.183811Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:33.204510Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:33.206132Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669074946917958:2080] 1751275052584214 != 1751275052584217 2025-06-30T09:17:33.206299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26139, node 1 2025-06-30T09:17:33.442912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:33.443155Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:33.443157Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:33.443193Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:17:33.586924Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27766 TClient is connected to server localhost:27766 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:34.264681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2); 2025-06-30T09:17:34.930694Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669083536853187:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:34.930725Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:34.976512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:17:34.994479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669083536853257:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:17:34.994557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669083536853257:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:17:34.994627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669083536853257:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:17:34.994647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669083536853257:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:17:34.994662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669083536853271:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:17:34.994666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669083536853257:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:17:34.994683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669083536853257:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:17:34.994688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669083536853271:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:17:34.994705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669083536853257:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:17:34.994730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669083536853257:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:17:34.994756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669083536853271:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:17:34.994791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669083536853271:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:17:34.994793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669083536853257:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:17:34.994815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669083536853257:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:17:34.994819Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669083536853271:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:17:34.994830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669083536853257:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:17:34.994841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669083536853271:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:17:34.994849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521669083536853257:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:17:34.994872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669083536853271:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:17:34.994899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669083536853271:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:17:34.994924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669083536853271:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:17:34.994948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669083536853271:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:17:34.994970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669083536853271:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:17:34.994993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669083536853271:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:17:34.995604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:17:34.995622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:17:34.995637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:78 ... g.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:17:39.910579Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:17:39.910587Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:17:39.910611Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:17:39.910619Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:17:39.910639Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:17:39.910647Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:17:39.910660Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:17:39.910668Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:17:39.910675Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:17:39.910680Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:17:39.910685Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:17:39.910722Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:17:39.910733Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:17:39.910757Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:17:39.910764Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:17:39.915677Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[6:7521669108094952037:2295];ev=NActors::IEventHandle;tablet_id=72075186224037889;tx_id=281474976715658;this=56017026877632;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275059915;max=18446744073709551615;plan=0;src=[6:7521669108094951669:2137];cookie=22:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:17:39.915756Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521669108094952033:2294];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=56017026879232;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275059915;max=18446744073709551615;plan=0;src=[6:7521669108094951669:2137];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:17:39.923800Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:17:39.924675Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:17:39.924745Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:17:39.925378Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:17:39.934894Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:17:39.935508Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521669108094952136:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.935523Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.940658Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:17:39.940857Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `SCAN_FIRST_LEVEL_ONLY`=`false`, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`false`, `COLUMNS_LIMIT`=`1024`, `SPARSED_DETECTOR_KFF`=`1000`, `MEM_LIMIT_CHUNK`=`1000000`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:17:39.949509Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:17:39.949959Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521669108094952168:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.950092Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.956104Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:17:39.956225Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('["a", {"v" : 4}, 1,2,3,4,5,6,7,8,9,10,11,12]')) 2025-06-30T09:17:39.963093Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521669108094952203:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.963111Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.963241Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521669108094952208:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.963924Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:39.972027Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521669108094952210:2324], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:17:40.071891Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521669112389919557:2452] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:40.119264Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;["[\"a\",{\"v\":\"4\"},\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\"]"]]] OUTPUT: [[1u;["[\"a\",{\"v\":\"4\"},\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\"]"]]] INDEX:0/0/0 HEADER:0/0/0 >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Query |79.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/wrappers/ut/unittest >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Query |79.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/wrappers/ut/unittest >> TCdcStreamTests::VirtualTimestamps >> KqpScanArrowFormat::AggregateCountStar [GOOD] >> KqpScanArrowFormat::AggregateByColumn >> TS3WrapperTests::AbortMultipartUpload >> KqpScanArrowInChanels::AggregateCountStar [GOOD] >> KqpScanArrowInChanels::AggregateByColumn >> DataShardSnapshots::MvccSnapshotAndSplit >> TS3WrapperTests::AbortMultipartUpload [GOOD] >> TCdcStreamTests::VirtualTimestamps [GOOD] >> TCdcStreamTests::ResolvedTimestamps >> TCdcStreamTests::Basic >> KqpScanArrowFormat::AllTypesColumns [GOOD] >> KqpScanArrowFormat::AllTypesColumnsCellvec |79.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sysview_reboots/unittest |79.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |79.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::AbortMultipartUpload [GOOD] Test command err: 2025-06-30T09:17:42.796416Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# E0DAFA69-1FE2-4030-B97B-E3DEA72D5203, request# CreateMultipartUpload { Bucket: TEST Key: key } REQUEST: POST /TEST/key?uploads HTTP/1.1 HEADERS: Host: localhost:23076 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 979C5E2E-1A7B-4B1A-9B6E-D4FAE5D09218 amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-storage-class: STANDARD S3_MOCK::HttpServeAction: 4 / /TEST/key / uploads= 2025-06-30T09:17:42.799907Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# E0DAFA69-1FE2-4030-B97B-E3DEA72D5203, response# CreateMultipartUploadResult { Bucket: Key: TEST/key UploadId: 1 } 2025-06-30T09:17:42.800025Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 90D03B75-1240-41C4-9E72-D2005BB930FE, request# AbortMultipartUpload { Bucket: TEST Key: key UploadId: 1 } REQUEST: DELETE /TEST/key?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:23076 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B9B0F61A-A39F-48D1-8F99-D17C72C915E7 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 6 / /TEST/key / uploadId=1 2025-06-30T09:17:42.800637Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 90D03B75-1240-41C4-9E72-D2005BB930FE, response# AbortMultipartUploadResult { } 2025-06-30T09:17:42.800702Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 4726A83F-DFC7-4644-9B9B-E41A15DE1124, request# HeadObject { Bucket: TEST Key: key } REQUEST: HEAD /TEST/key HTTP/1.1 HEADERS: Host: localhost:23076 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 1100AD95-33E7-4C38-9DE0-7383316979B6 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 2025-06-30T09:17:42.802184Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 4726A83F-DFC7-4644-9B9B-E41A15DE1124, response# No response body. |79.1%| [LD] {RESULT} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |79.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sysview_reboots/unittest >> TCdcStreamTests::ResolvedTimestamps [GOOD] >> TCdcStreamTests::SchemaChanges >> KqpScanArrowInChanels::AllTypesColumns [GOOD] >> KqpScanArrowInChanels::SingleKey |79.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sysview_reboots/unittest |79.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sysview_reboots/unittest |79.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |79.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |79.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql >> KqpScanArrowInChanels::AggregateByColumn [GOOD] >> TCdcStreamTests::SchemaChanges [GOOD] >> TCdcStreamTests::RetentionPeriod >> TCdcStreamTests::Basic [GOOD] >> TCdcStreamTests::Attributes >> KqpScanArrowInChanels::AggregateNoColumn [GOOD] >> KqpScanArrowInChanels::AggregateNoColumnNoRemaps >> KqpScanArrowFormat::AggregateByColumn [GOOD] >> KqpScanArrowFormat::AggregateNoColumn |79.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sysview_reboots/unittest >> TCdcStreamTests::Attributes [GOOD] >> TCdcStreamTests::DocApi >> KqpScanArrowFormat::AllTypesColumnsCellvec [GOOD] >> KqpScanArrowFormat::AggregateNoColumnNoRemaps ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowInChanels::AggregateByColumn [GOOD] Test command err: Trying to start YDB, gRPC: 26379, MsgBus: 11832 2025-06-30T09:17:38.445490Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669102839271121:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:38.445563Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a9d/r3tmp/tmpi4Xa5M/pdisk_1.dat 2025-06-30T09:17:38.520308Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:38.520411Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669102839271096:2080] 1751275058445283 != 1751275058445286 TServer::EnableGrpc on GrpcPort 26379, node 1 2025-06-30T09:17:38.536278Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:38.536294Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:38.536297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:38.536356Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:17:38.549258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:38.549296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:38.550295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11832 TClient is connected to server localhost:11832 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:38.596905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:38.609098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:38.630936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:38.666417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:38.679169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:38.940937Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669102839272682:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:38.940970Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:38.998549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:39.006467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:39.063549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:39.073285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:39.088368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:39.103721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:39.116612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:39.139375Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669107134240650:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.139420Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.139552Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669107134240655:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:39.140598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:39.143948Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669107134240657:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:17:39.236071Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669107134240708:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:39.448042Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:39.480008Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275059520, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 13825, MsgBus: 26476 2025-06-30T09:17:39.972630Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669105301695904:2177];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:39.985929Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a9d/r3tmp/tmp6oSkLw/pdisk_1.dat 2025-06-30T09:17:40.023738Z node 2 :IMPORT WAR ... /workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:42.514273Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:42.615510Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275062523, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 6389, MsgBus: 18382 2025-06-30T09:17:42.828062Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669121607828979:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:42.828095Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a9d/r3tmp/tmp9Tg47l/pdisk_1.dat 2025-06-30T09:17:42.846971Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6389, node 4 2025-06-30T09:17:42.859286Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:42.859302Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:42.859304Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:42.859354Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18382 TClient is connected to server localhost:18382 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:17:42.933385Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:42.933418Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:42.933960Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:42.934670Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:42.936029Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:17:42.941323Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:17:42.959946Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.993639Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:17:43.009229Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.334210Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669125902797843:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.334239Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.345359Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.402136Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.414661Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.428454Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.442184Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.455945Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.470102Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.488138Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669125902798498:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.488169Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.488234Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669125902798503:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.489109Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:43.497412Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521669125902798505:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:17:43.563408Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521669125902798556:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:43.963830Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:43.990725Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275063839, txId: 281474976715672] shutting down >> TCdcStreamTests::RetentionPeriod [GOOD] >> TCdcStreamTests::TopicPartitions |79.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sysview_reboots/unittest >> KqpScanArrowInChanels::SingleKey [GOOD] >> KqpScanArrowInChanels::JoinWithParams >> TCdcStreamTests::DocApi [GOOD] >> TCdcStreamTests::DocApiNegative |79.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sysview_reboots/unittest >> TCdcStreamTests::DocApiNegative [GOOD] >> TCdcStreamTests::Negative >> DataShardSnapshots::MvccSnapshotAndSplit [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWrites+UseSink |79.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sysview_reboots/unittest |79.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |79.2%| [LD] {RESULT} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |79.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb >> TCdcStreamTests::Negative [GOOD] >> TCdcStreamTests::DisableProtoSourceIdInfo >> TCdcStreamTests::TopicPartitions [GOOD] >> TCdcStreamTests::ReplicationAttribute >> TCdcStreamTests::DisableProtoSourceIdInfo [GOOD] >> TCdcStreamTests::CreateStream >> KqpScanArrowInChanels::AggregateNoColumnNoRemaps [GOOD] >> KqpScanArrowInChanels::AggregateWithFunction >> KqpScanArrowFormat::AggregateNoColumn [GOOD] >> KqpScanArrowFormat::AggregateEmptySum >> KqpScanArrowInChanels::JoinWithParams [GOOD] >> TCdcStreamTests::CreateStream [GOOD] >> TCdcStreamTests::AlterStream >> KqpScanArrowFormat::AggregateNoColumnNoRemaps [GOOD] >> KqpScanArrowFormat::AggregateWithFunction >> LocalPartition::Restarts [GOOD] >> LocalPartition::WithoutPartitionWithRestart >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Table [GOOD] >> TCdcStreamTests::ReplicationAttribute [GOOD] >> TCdcStreamTests::StreamOnIndexTableNegative >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWrites+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWrites-UseSink >> TCdcStreamTests::AlterStream [GOOD] >> TCdcStreamTests::DropStream ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowInChanels::JoinWithParams [GOOD] Test command err: Trying to start YDB, gRPC: 19841, MsgBus: 29455 2025-06-30T09:17:41.109070Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669115236878302:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:41.143170Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a6e/r3tmp/tmpt0QEAk/pdisk_1.dat 2025-06-30T09:17:41.500681Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:41.504210Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669115236878114:2080] 1751275061096360 != 1751275061096363 2025-06-30T09:17:41.504881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:41.504908Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:41.505586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19841, node 1 2025-06-30T09:17:41.627238Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:41.627250Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:41.627252Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:41.627289Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29455 2025-06-30T09:17:42.117347Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29455 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:42.461709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:42.468507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:17:42.470765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:42.572935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:42.648972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:42.710162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:42.828930Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669119531847029:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:42.828956Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:42.885505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.897426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.915759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.927941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.988349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.051373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.073212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.147083Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669123826814988:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.147107Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.147517Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669123826814993:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.151894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:43.155161Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669123826814995:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:17:43.249235Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669123826815048:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:43.447197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 864000000000 2025-06-30T09:17:43.529559Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275063573, txId: 281474976710674] shutting down Trying to start YDB, gRPC: 29 ... path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:44.711448Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275064756, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 25549, MsgBus: 22664 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a6e/r3tmp/tmpyGDazR/pdisk_1.dat 2025-06-30T09:17:45.039396Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521669134160726429:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:45.040997Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:17:45.101950Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25549, node 3 2025-06-30T09:17:45.123723Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:45.123738Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:45.123741Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:45.123796Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:17:45.151598Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:45.151640Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:45.152073Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22664 TClient is connected to server localhost:22664 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:45.323366Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:45.339095Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:17:45.351557Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:45.383816Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:45.419684Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:45.457893Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:45.623241Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521669134160727977:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:45.623273Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:45.632795Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:45.645273Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:45.656400Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:45.670191Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:45.682345Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:45.701299Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:45.713282Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:45.739657Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521669134160728629:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:45.739681Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:45.739745Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521669134160728634:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:45.741053Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:45.744788Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521669134160728636:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:17:45.835945Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521669134160728687:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:46.043466Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:46.144560Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275066177, txId: 281474976715672] shutting down 2025-06-30T09:17:46.256498Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275066289, txId: 281474976715674] shutting down >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Table [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Table >> TCdcStreamTests::StreamOnIndexTableNegative [GOOD] >> TCdcStreamTests::StreamOnIndexTable >> KqpScanArrowInChanels::AggregateWithFunction [GOOD] >> KqpScanArrowInChanels::AggregateEmptySum >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Query [GOOD] |79.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/control/ut/ydb-core-control-ut |79.2%| [LD] {RESULT} $(B)/ydb/core/control/ut/ydb-core-control-ut |79.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/control/ut/ydb-core-control-ut >> KqpScanArrowFormat::AggregateEmptySum [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Query >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Table >> TCdcStreamTests::StreamOnIndexTable [GOOD] >> TCdcStreamTests::StreamOnBuildingIndexTable >> KqpScanArrowFormat::AggregateWithFunction [GOOD] >> TxUsage::WriteToTopic_Demo_46_Query [GOOD] >> TCdcStreamTests::DropStream [GOOD] >> TCdcStreamTests::AlterStreamImplShouldFail ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowFormat::AggregateEmptySum [GOOD] Test command err: Trying to start YDB, gRPC: 10015, MsgBus: 7282 2025-06-30T09:17:39.424713Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669108043611567:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:39.424773Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a87/r3tmp/tmpj44OYC/pdisk_1.dat 2025-06-30T09:17:39.761631Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:39.764480Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:39.764501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:39.765782Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669108043611548:2080] 1751275059424545 != 1751275059424548 2025-06-30T09:17:39.766367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10015, node 1 2025-06-30T09:17:39.804755Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:39.804771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:39.804773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:39.804820Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7282 2025-06-30T09:17:40.427982Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7282 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:40.644574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:40.683934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:17:40.714062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:40.876563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:41.020207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:41.111635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:41.588489Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669116633547782:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:41.588519Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:41.820305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:41.872606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:41.886204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:41.912922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:41.933611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:41.968589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.003307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.036098Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669120928515732:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:42.036122Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:42.036283Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669120928515737:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:42.040456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:42.048668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-30T09:17:42.048768Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669120928515739:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:17:42.138018Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669120928515790:3410] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:42.598755Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275062530, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 25526, MsgBus: 11519 2025-06-30T09:17:43.077675Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669122971574089:2070];send_to=[0:7307199536658146131:7762515]; 2025-0 ... oChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:45.414956Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:46.339967Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275065659, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 6395, MsgBus: 6950 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a87/r3tmp/tmpT6xngi/pdisk_1.dat 2025-06-30T09:17:46.867187Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:17:46.899498Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521669138371612051:2080] 1751275066829154 != 1751275066829157 2025-06-30T09:17:46.901452Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6395, node 4 2025-06-30T09:17:46.940419Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:46.940428Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:46.940430Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:46.940475Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:17:46.951328Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:46.951354Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:46.955198Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6950 TClient is connected to server localhost:6950 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:17:47.083570Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:47.085447Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:17:47.095602Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:47.116616Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:47.159861Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:47.195886Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:47.451367Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669142666580942:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:47.451420Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:47.455589Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:47.466626Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:47.491989Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:47.533068Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:47.567213Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:47.581967Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:47.594520Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:47.627517Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669142666581596:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:47.627549Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:47.627761Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669142666581601:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:47.628648Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:47.632195Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:17:47.632312Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521669142666581603:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:17:47.691478Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521669142666581654:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:47.847222Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:48.064309Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275068011, txId: 281474976715672] shutting down >> TxUsage::WriteToTopic_Demo_47_Table |79.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |79.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |79.2%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring >> TCdcStreamTests::StreamOnBuildingIndexTable [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanEnabled >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Table [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWrites-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesRestart+UseSink >> KqpScanArrowInChanels::AggregateEmptySum [GOOD] >> THiveTest::TestDrain >> THiveTest::TestHiveBalancer ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowFormat::AggregateWithFunction [GOOD] Test command err: Trying to start YDB, gRPC: 18879, MsgBus: 26632 2025-06-30T09:17:39.782893Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669106154447222:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:39.782915Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a80/r3tmp/tmpyl4Fkc/pdisk_1.dat 2025-06-30T09:17:40.247311Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669106154447198:2080] 1751275059782643 != 1751275059782646 2025-06-30T09:17:40.284812Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:40.290852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:40.291093Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:40.295673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18879, node 1 2025-06-30T09:17:40.439786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:40.439799Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:40.439800Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:40.439845Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26632 2025-06-30T09:17:40.825389Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26632 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:41.047082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:41.058441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:17:41.071786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:41.302831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:41.508481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:41.560301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:42.487214Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669119039350724:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:42.487248Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:42.555769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.565401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.575275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.591738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.646732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.657592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.672245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:42.688219Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669119039351379:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:42.688260Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:42.688273Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669119039351384:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:42.689249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:42.691447Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669119039351386:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:17:42.780965Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669119039351437:3413] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:42.967870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 864000000000 2025-06-30T09:17:43.072083Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275063111, txId: 281474976710674] shutting down Trying to start YDB, gRPC: 72 ... hanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:45.843122Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:47.078364Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275066296, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 15806, MsgBus: 4141 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a80/r3tmp/tmp816IVV/pdisk_1.dat 2025-06-30T09:17:47.487936Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:17:47.506101Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:47.506552Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521669140765947377:2080] 1751275067452821 != 1751275067452824 TServer::EnableGrpc on GrpcPort 15806, node 4 2025-06-30T09:17:47.531407Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:47.531419Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:47.531420Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:47.531470Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4141 2025-06-30T09:17:47.573325Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:47.573354Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:47.576296Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4141 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:47.663323Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:47.671203Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:17:47.677221Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:47.747679Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:47.778996Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:47.799947Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:47.975519Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669140765948979:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:47.975609Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:47.979607Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:47.997796Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.055375Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.072917Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.085694Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.098619Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.117564Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.155321Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669145060916931:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:48.155345Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:48.155440Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669145060916936:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:48.156180Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:48.159481Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:17:48.160457Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521669145060916938:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:17:48.240694Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521669145060916989:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:48.459223Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:48.646850Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275068536, txId: 281474976715672] shutting down >> TCdcStreamTests::AlterStreamImplShouldFail [GOOD] >> TCdcStreamTests::DropStreamImplShouldFail >> TCutHistoryRestrictions::BasicTest >> TCutHistoryRestrictions::BasicTest [GOOD] >> TCutHistoryRestrictions::EmptyAllowList [GOOD] >> TCutHistoryRestrictions::EmptyDenyList [GOOD] >> TCutHistoryRestrictions::SameTabletInBothLists [GOOD] >> TCutHistoryRestrictions::BothListsEmpty [GOOD] >> ObjectDistribution::TestImbalanceCalcualtion [GOOD] >> ObjectDistribution::TestAllowedDomainsAndDown >> TCdcStreamWithInitialScanTests::InitialScanEnabled [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanDisabled >> TCdcStreamTests::DropStreamImplShouldFail [GOOD] >> TCdcStreamTests::CopyTableShouldNotCopyStream >> ObjectDistribution::TestAllowedDomainsAndDown [GOOD] >> ObjectDistribution::TestAddSameNode [GOOD] >> ObjectDistribution::TestManyIrrelevantNodes >> TCdcStreamWithInitialScanTests::InitialScanDisabled [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanProgress >> THiveTest::TestLocalDisconnect >> TxUsage::Sinks_Oltp_WriteToTopic_4_Query [GOOD] >> THeavyPerfTest::TTestLoadEverything >> TCdcStreamTests::CopyTableShouldNotCopyStream [GOOD] >> TCdcStreamTests::MoveTableShouldFail >> THiveTest::TestNoMigrationToSelf ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowInChanels::AggregateEmptySum [GOOD] Test command err: Trying to start YDB, gRPC: 63756, MsgBus: 63903 2025-06-30T09:17:42.473542Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669121823936787:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:42.473595Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a63/r3tmp/tmpNk5vct/pdisk_1.dat 2025-06-30T09:17:42.590588Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669121823936620:2080] 1751275062470212 != 1751275062470215 2025-06-30T09:17:42.594459Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63756, node 1 2025-06-30T09:17:42.606986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:42.607003Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:42.607006Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:42.607056Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63903 TClient is connected to server localhost:63903 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:17:42.662687Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:42.662712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:42.663785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:42.668203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:42.677100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:42.695618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:42.721234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:42.734435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:43.050999Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669126118905532:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.051092Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.060013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.075237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.133783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.145633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.155372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.170281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.184071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.201490Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669126118906188:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.201535Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.201777Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669126118906193:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:43.202581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:43.209827Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669126118906195:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:17:43.266005Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669126118906246:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:43.473549Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:44.113732Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275063615, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 12009, MsgBus: 25264 2025-06-30T09:17:44.384235Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669127093693569:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:44.386043Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a63/r3tmp/tmpl521Kk/pdisk_1.dat 2025-06-30T09:17:44.410266Z node 2 :IMPORT WAR ... e_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:47.578930Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:47.947254Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275067857, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 19142, MsgBus: 8706 2025-06-30T09:17:48.322992Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669143378053920:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:48.324863Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a63/r3tmp/tmpeHQ7rX/pdisk_1.dat 2025-06-30T09:17:48.355118Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19142, node 4 2025-06-30T09:17:48.381091Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:48.381101Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:48.381102Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:48.381156Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8706 2025-06-30T09:17:48.432470Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:48.432498Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:48.432955Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8706 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:48.483622Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:48.490615Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:17:48.511168Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:48.541515Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:17:48.597098Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:17:48.620547Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.721288Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669143378055421:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:48.721315Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:48.724864Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.736178Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.754706Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.769791Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.786400Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.798513Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.858344Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:48.889392Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669143378056076:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:48.889421Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:48.889537Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669143378056081:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:48.890507Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:48.894136Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-30T09:17:48.894181Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521669143378056083:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:17:48.995590Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521669143378056134:3395] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:49.363012Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:49.440054Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275069348, txId: 281474976710672] shutting down >> TxUsage::Sinks_Oltp_WriteToTopic_5_Table >> TCdcStreamWithInitialScanTests::InitialScanProgress [GOOD] >> TCdcStreamWithInitialScanTests::WithoutPqTransactions >> TCdcStreamTests::MoveTableShouldFail [GOOD] >> TCdcStreamTests::CheckSchemeLimits >> DataShardSnapshots::MvccSnapshotLockedWritesRestart+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesRestart-UseSink >> THiveTest::TestUpdateChannelValues >> TCdcStreamTests::CheckSchemeLimits [GOOD] >> TCdcStreamTests::RebootSchemeShard >> TCdcStreamWithInitialScanTests::WithoutPqTransactions [GOOD] >> TCdcStreamWithInitialScanTests::WithPqTransactions |79.2%| [TA] $(B)/ydb/core/kqp/ut/arrow/test-results/unittest/{meta.json ... results_accumulator.log} >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Query [GOOD] >> TCdcStreamTests::RebootSchemeShard [GOOD] >> TCdcStreamTests::MeteringServerless >> THiveTest::TestLocalDisconnect [GOOD] >> THiveTest::TestLocalReplacement >> DataShardSnapshots::MvccSnapshotLockedWritesRestart-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts+UseSink >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Table |79.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest >> THiveTest::TestNoMigrationToSelf [GOOD] >> THiveTest::TestReCreateTablet |79.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest >> TCdcStreamWithInitialScanTests::WithPqTransactions [GOOD] >> TCdcStreamWithInitialScanTests::AlterStream >> THiveTest::TestUpdateChannelValues [GOOD] >> THiveTest::TestStorageBalancer >> THiveTest::TestLocalReplacement [GOOD] >> THiveTest::TestHiveRestart >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Table [GOOD] >> TCdcStreamWithInitialScanTests::AlterStream [GOOD] >> TCdcStreamWithInitialScanTests::DropStream >> THiveTest::TestReCreateTablet [GOOD] >> THiveTest::TestReCreateTabletError >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Query |79.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest >> TargetTrackingScaleRecommenderPolicy::ScaleOut [GOOD] >> TargetTrackingScaleRecommenderPolicy::ScaleIn [GOOD] >> TargetTrackingScaleRecommenderPolicy::BigNumbersScaleOut [GOOD] >> TargetTrackingScaleRecommenderPolicy::BigNumbersScaleIn [GOOD] >> TargetTrackingScaleRecommenderPolicy::SpikeResistance [GOOD] >> TargetTrackingScaleRecommenderPolicy::NearTarget [GOOD] >> TargetTrackingScaleRecommenderPolicy::AtTarget [GOOD] >> TargetTrackingScaleRecommenderPolicy::Fluctuations [GOOD] >> TargetTrackingScaleRecommenderPolicy::FluctuationsBigNumbers [GOOD] >> TargetTrackingScaleRecommenderPolicy::ScaleInToMaxSeen [GOOD] >> TargetTrackingScaleRecommenderPolicy::Idle [GOOD] >> TScaleRecommenderTest::BasicTest >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts+UseSink >> LocalPartition::WithoutPartitionWithRestart [GOOD] >> LocalPartition::WithoutPartitionUnknownEndpoint >> YdbYqlClient::RetryOperationLimitedDuration [GOOD] >> TCdcStreamWithInitialScanTests::DropStream [GOOD] >> TCdcStreamWithInitialScanTests::RacyAlterStreamAndRestart >> THiveTest::TestHiveRestart [GOOD] >> THiveTest::TestLimitedNodeList |79.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest |79.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |79.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |79.2%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/arrow/test-results/unittest/{meta.json ... results_accumulator.log} >> THiveTest::TestHiveBalancer [GOOD] >> THiveTest::TestHiveBalancerWithPrefferedDC1 |79.2%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo >> THiveTest::TestReCreateTabletError [GOOD] >> THiveTest::TestNodeDisconnect >> TTxDataShardMiniKQL::WriteEraseRead >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Query >> TxUsage::WriteToTopic_Demo_11_Query [GOOD] >> TScaleRecommenderTest::BasicTest [GOOD] >> TStorageBalanceTest::TestScenario1 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> YdbYqlClient::RetryOperationLimitedDuration [GOOD] Test command err: 2025-06-30T09:17:22.533727Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669035646547393:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:22.533769Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045cb/r3tmp/tmpzhFfY5/pdisk_1.dat 2025-06-30T09:17:22.640453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:22.640483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:22.643708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:22.644914Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:22.655757Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 18124, node 1 2025-06-30T09:17:22.907125Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:22.907144Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:22.907146Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:22.907208Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15714 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:23.041701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:23.487797Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669039941515605:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:23.487828Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:23.536101Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:23.536171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:23.644788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:17:23.666712Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669039941515812:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:23.666747Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:23.666886Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669039941515817:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:23.667797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:23.682294Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669039941515819:2320], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-30T09:17:23.749849Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669039941515892:2800] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:17:23.765335Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7521669039941515913:2810], for# test_user@builtin, access# DescribeSchema 2025-06-30T09:17:23.765352Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7521669039941515913:2810], for# test_user@builtin, access# DescribeSchema 2025-06-30T09:17:23.767990Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669039941515910:2324], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:21: Error: At function: KiReadTable!
:2:21: Error: Cannot find table 'db.[Root/Test]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:17:23.768901Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZTJjMzczMjUtYjNmZWI2NzItZWQ3Y2Y1YTctZTc3ODI5NGE=, ActorId: [1:7521669039941515808:2314], ActorState: ExecuteState, TraceId: 01jz020ytg7m81e5x6cwjnb1rh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:17:24.748216Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669042411385031:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:24.748325Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045cb/r3tmp/tmpEn76xq/pdisk_1.dat 2025-06-30T09:17:24.952153Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:24.952186Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:24.953279Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:24.962587Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25847, node 4 2025-06-30T09:17:24.970802Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:24.970823Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:24.970826Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:24.970873Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28338 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:24.993077Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:25.251880Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669046706353148:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:25.251911Z node 4 :KQP ... pty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:26.179280Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:26.179281Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:26.179329Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9589 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:17:26.240639Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:26.240672Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:26.267120Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:26.273732Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:27.142073Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 2025-06-30T09:17:28.600676Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521669060818998292:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:28.600699Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:28.600814Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521669060818998304:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:17:28.601527Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:17:28.619627Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521669060818998306:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:17:28.699223Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521669060818998379:2670] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 2025-06-30T09:17:31.140253Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7521669052229062732:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:31.140290Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 2025-06-30T09:17:33.107454Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7521669080360209399:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:33.121815Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045cb/r3tmp/tmp67iBB6/pdisk_1.dat 2025-06-30T09:17:33.272261Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:33.277611Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:33.277633Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:33.285823Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:33.311130Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 26629, node 10 2025-06-30T09:17:33.324994Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:17:33.325004Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:17:33.325006Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:17:33.325048Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19524 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:17:33.645359Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:17:34.122845Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 2 of 3 2025-06-30T09:17:38.099443Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7521669080360209399:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:38.099532Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 3 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 2025-06-30T09:17:48.220633Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7389: Cannot get console configs 2025-06-30T09:17:48.220652Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 2 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 3 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 >> TxUsage::WriteToTopic_Demo_12_Table >> TTxDataShardMiniKQL::WriteEraseRead [GOOD] >> TTxDataShardMiniKQL::WriteAndReadMultipleShards >> TCdcStreamWithInitialScanTests::RacyAlterStreamAndRestart [GOOD] >> TCdcStreamWithInitialScanTests::MeteringServerless >> THiveTest::TestNodeDisconnect [GOOD] >> THiveTest::TestReassignGroupsWithRecreateTablet |79.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |79.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |79.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan >> THiveTest::TestLimitedNodeList [GOOD] >> THiveTest::TestHiveFollowersWithChangingDC >> TTxDataShardMiniKQL::ReadConstant >> THiveTest::TestStorageBalancer [GOOD] >> THiveTest::TestRestartsWithFollower >> THiveTest::TestReassignGroupsWithRecreateTablet [GOOD] >> THiveTest::TestReassignUseRelativeSpace >> TTxDataShardMiniKQL::MemoryUsageImmediateSmallTx >> TCdcStreamWithInitialScanTests::MeteringServerless [GOOD] >> TCdcStreamWithInitialScanTests::MeteringDedicated >> TTxDataShardMiniKQL::ReadConstant [GOOD] >> TTxDataShardMiniKQL::ReadAfterWrite >> TPQTest::TestSourceIdDropByUserWrites [GOOD] >> TPQTest::TestSourceIdDropBySourceIdCount >> TTxDataShardMiniKQL::WriteAndReadMultipleShards [GOOD] >> TTxDataShardMiniKQL::WriteAndReadMany |79.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |79.2%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |79.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts-UseSink >> TxUsage::Sinks_Oltp_WriteToTopic_5_Table [GOOD] >> THiveTest::TestReassignUseRelativeSpace [GOOD] >> THiveTest::TestManyFollowersOnOneNode >> TDSProxyGetTest::TestBlock42WipedErrorWithTwoBlobs [GOOD] >> TDSProxyPatchTest::NaiveErrorOnPut_Erasure4Plus2Block >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Table [GOOD] >> TTxDataShardMiniKQL::ReadAfterWrite [GOOD] >> TTxDataShardMiniKQL::ReadNonExisting >> TTxDataShardMiniKQL::ReadSpecialColumns >> TDSProxyPatchTest::NaiveErrorOnPut_Erasure4Plus2Block [GOOD] >> TDSProxyPutTest::TestBlock42PutAllOk >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Query [GOOD] >> THiveTest::TestDrain [GOOD] >> THiveTest::TestDrainWithMaxTabletsScheduled >> TDSProxyPutTest::TestBlock42PutAllOk [GOOD] >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus1Block >> ObjectDistribution::TestManyIrrelevantNodes [GOOD] >> Sequencer::Basic1 [GOOD] >> StoragePool::TestDistributionRandomProbability >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus1Block [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_5_Query >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Query >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Table >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithReadConflicts >> TTxDataShardMiniKQL::ReadNonExisting [GOOD] >> THiveTest::TestManyFollowersOnOneNode [GOOD] >> THiveTest::TestLockTabletExecutionTimeout >> TTxDataShardMiniKQL::MemoryUsageImmediateSmallTx [GOOD] >> TTxDataShardMiniKQL::MemoryUsageImmediateMediumTx >> THiveTest::TestHiveBalancerWithPrefferedDC1 [GOOD] >> THiveTest::TestHiveBalancerWithPrefferedDC2 |79.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus1Block [GOOD] >> TTxDataShardMiniKQL::ReadSpecialColumns [GOOD] >> TTxDataShardMiniKQL::SelectRange >> THiveTest::TestRestartsWithFollower [GOOD] >> THiveTest::TestStartTabletTwiceInARow ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::ReadNonExisting [GOOD] Test command err: 2025-06-30T09:17:56.540746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:56.540772Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:56.541601Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:17:56.545493Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:17:56.545619Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:136:2157] 2025-06-30T09:17:56.545680Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:17:56.555668Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:17:56.558075Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:17:56.558103Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:17:56.558218Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-30T09:17:56.558224Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-30T09:17:56.558229Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-30T09:17:56.558281Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:17:56.558292Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:17:56.558302Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:205:2157] in generation 2 2025-06-30T09:17:56.587122Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:17:56.594826Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-30T09:17:56.594914Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:17:56.594943Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:220:2216] 2025-06-30T09:17:56.594950Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-30T09:17:56.594956Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-30T09:17:56.594962Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:17:56.595012Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:56.595022Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:56.595096Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-30T09:17:56.595121Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-30T09:17:56.595149Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:17:56.595156Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:17:56.595163Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-30T09:17:56.595168Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-30T09:17:56.595173Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-30T09:17:56.595178Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-30T09:17:56.595183Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:17:56.595195Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:216:2213], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:56.595201Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:56.595212Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:214:2212], serverId# [1:216:2213], sessionId# [0:0:0] 2025-06-30T09:17:56.595830Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:136:2157]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-30T09:17:56.595844Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:17:56.595859Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:17:56.595896Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-30T09:17:56.595907Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-30T09:17:56.595919Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-30T09:17:56.595940Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-30T09:17:56.595945Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-30T09:17:56.595951Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-30T09:17:56.595956Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:17:56.596027Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-30T09:17:56.596032Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-30T09:17:56.596036Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-30T09:17:56.596041Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:17:56.596051Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-30T09:17:56.596056Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-30T09:17:56.596060Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-30T09:17:56.596064Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-30T09:17:56.596070Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-30T09:17:56.606890Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-30T09:17:56.606918Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:17:56.606925Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:17:56.606954Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-30T09:17:56.606970Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-30T09:17:56.607089Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:226:2222], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:56.607100Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:56.607109Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:225:2221], serverId# [1:226:2222], sessionId# [0:0:0] 2025-06-30T09:17:56.607133Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:136:2157]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-30T09:17:56.607139Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-30T09:17:56.607176Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-30T09:17:56.607186Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-30T09:17:56.607194Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-30T09:17:56.607200Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-30T09:17:56.608013Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-30T09:17:56.608039Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:17:56.608111Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:56.608120Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:56.608130Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:17:56.608139Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:17:56.608146Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-30T09:17:56.608155Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-30T09:17:56.608161Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... oordinators count is 1 buckets per mediator 2 2025-06-30T09:17:59.158218Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [3:287:2269] 2025-06-30T09:17:59.158224Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-30T09:17:59.158230Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 9437184 2025-06-30T09:17:59.158236Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:17:59.158270Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:711: TxInitSchemaDefaults.Execute 2025-06-30T09:17:59.158289Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:723: TxInitSchemaDefaults.Complete 2025-06-30T09:17:59.158352Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [3:239:2230], Recipient [3:239:2230]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:59.158360Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:59.158410Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-30T09:17:59.158438Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-30T09:17:59.158467Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [3:25:2072], Recipient [3:239:2230]: {TEvRegisterTabletResult TabletId# 9437184 Entry# 0} 2025-06-30T09:17:59.158473Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-30T09:17:59.158477Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 9437184 time 0 2025-06-30T09:17:59.158484Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:17:59.158525Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5614: Got TEvDataShard::TEvSchemaChanged for unknown txId 1 message# Source { RawX1: 239 RawX2: 12884904118 } Origin: 9437184 State: 2 TxId: 1 Step: 0 Generation: 3 2025-06-30T09:17:59.158542Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [3:25:2072], Recipient [3:239:2230]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 0 ReadStep# 0 } 2025-06-30T09:17:59.158551Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-30T09:17:59.158556Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 9437184 coordinator 72057594046316545 last step 0 next step 0 2025-06-30T09:17:59.158565Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:17:59.158571Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:17:59.158578Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-30T09:17:59.158584Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-30T09:17:59.158588Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-30T09:17:59.158595Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-30T09:17:59.158600Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:17:59.158617Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [3:285:2267], Recipient [3:239:2230]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 4200 Status: OK ServerId: [3:289:2271] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-30T09:17:59.158622Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-30T09:17:59.158633Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [3:127:2151], Recipient [3:239:2230]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 1 2025-06-30T09:17:59.158637Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-30T09:17:59.158642Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 1 datashard 9437184 state Ready 2025-06-30T09:17:59.158648Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 9437184 Got TEvSchemaChangedResult from SS at 9437184 2025-06-30T09:17:59.171820Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:285:2267], Recipient [3:239:2230]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 4200 ClientId: [3:285:2267] ServerId: [3:289:2271] } 2025-06-30T09:17:59.171851Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-30T09:17:59.241156Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269551617, Sender [3:103:2136], Recipient [3:239:2230]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 103 RawX2: 12884904024 } 2025-06-30T09:17:59.241187Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-30T09:17:59.241250Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:295:2275], Recipient [3:239:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:59.241255Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:59.241264Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:293:2274], serverId# [3:295:2275], sessionId# [0:0:0] 2025-06-30T09:17:59.241310Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [3:103:2136], Recipient [3:239:2230]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 103 RawX2: 12884904024 } TxBody: "\032\365\001\037\004\0021\nvalue\005\205\n\205\002\207\205\002\207\203\001H\006\002\205\004\205\002?\006\002\205\000\034MyReads MyWrites\205\004\205\002?\006\002\206\202\024Reply\024Write?\014\205\002\206\203\010\002 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\010)\211\n?\006\203\005\004\200\205\002\203\004\006\213\002\203\004\203\004$SelectRow\000\003?\036 h\020\000\000\000\000\000\000\r\000\000\000\000\000\000\000?\004\005?\"\003? p\001\013?&\003?$T\001\003?(\000\037\002\000\005?\016\005?\n?8\000\005?\014\003\005?\024\005?\020?8\000\006\000?\022\003?>\005?\032\006\000?\030\001\037/ \0018\001" TxId: 2 ExecLevel: 0 Flags: 0 2025-06-30T09:17:59.241316Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:17:59.241348Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:17:59.241494Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CheckDataTx 2025-06-30T09:17:59.241523Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-30T09:17:59.241528Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CheckDataTx 2025-06-30T09:17:59.241534Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-30T09:17:59.241538Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit BuildAndWaitDependencies 2025-06-30T09:17:59.241549Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-30T09:17:59.241563Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 9437184 2025-06-30T09:17:59.241569Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-30T09:17:59.241573Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-30T09:17:59.241578Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit ExecuteDataTx 2025-06-30T09:17:59.241582Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-30T09:17:59.241689Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:2] at tablet 9437184 with status COMPLETE 2025-06-30T09:17:59.241700Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:2] at 9437184: {NSelectRow: 1, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-30T09:17:59.241713Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-30T09:17:59.241716Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit ExecuteDataTx 2025-06-30T09:17:59.241720Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit FinishPropose 2025-06-30T09:17:59.241723Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit FinishPropose 2025-06-30T09:17:59.241732Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 2 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-30T09:17:59.241750Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is DelayComplete 2025-06-30T09:17:59.241754Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit FinishPropose 2025-06-30T09:17:59.241757Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit CompletedOperations 2025-06-30T09:17:59.241761Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CompletedOperations 2025-06-30T09:17:59.241771Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-30T09:17:59.241775Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CompletedOperations 2025-06-30T09:17:59.241778Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 9437184 has finished 2025-06-30T09:17:59.241786Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-30T09:17:59.241790Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 9437184 on unit FinishPropose 2025-06-30T09:17:59.241798Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> StoragePool::TestDistributionRandomProbability [GOOD] >> StoragePool::TestDistributionRandomProbabilityWithOverflow [GOOD] >> StoragePool::TestDistributionExactMin >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Table [GOOD] >> TTxDataShardMiniKQL::MemoryUsageImmediateMediumTx [GOOD] >> TTxDataShardMiniKQL::MemoryUsageMultiShard >> DataShardSnapshots::MvccSnapshotLockedWritesWithReadConflicts [GOOD] >> DataShardSnapshots::LockedWritesLimitedPerKey+UseSink >> TTxDataShardMiniKQL::SelectRange [GOOD] >> TTxDataShardMiniKQL::SelectRangeWithNotFullKey >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Query [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Table >> THiveTest::TestStartTabletTwiceInARow [GOOD] >> THiveTest::TestSpreadNeighboursWithUpdateTabletsObject >> TTxDataShardMiniKQL::SelectRangeWithNotFullKey [GOOD] >> TxUsage::WriteToTopic_Demo_47_Table [GOOD] >> TxUsage::WriteToTopic_Demo_47_Query >> TxUsage::WriteToTopic_Demo_12_Table [GOOD] >> THiveTest::TestHiveFollowersWithChangingDC [GOOD] >> THiveTest::TestHiveBalancerWithSystemTablets >> StoragePool::TestDistributionExactMin [GOOD] >> TxUsage::WriteToTopic_Demo_12_Query >> StoragePool::TestDistributionExactMinWithOverflow [GOOD] >> StoragePool::TestDistributionRandomMin7p ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::SelectRangeWithNotFullKey [GOOD] Test command err: 2025-06-30T09:17:59.669839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:59.669859Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:59.670869Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:17:59.676472Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:17:59.676590Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:136:2157] 2025-06-30T09:17:59.676902Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:17:59.713547Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:17:59.722605Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:17:59.722641Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:17:59.722819Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-30T09:17:59.722828Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-30T09:17:59.722834Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-30T09:17:59.722888Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:17:59.722898Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:17:59.722910Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:205:2157] in generation 2 2025-06-30T09:17:59.791705Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:17:59.846589Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-30T09:17:59.846692Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:17:59.846722Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:220:2216] 2025-06-30T09:17:59.846729Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-30T09:17:59.846749Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-30T09:17:59.846757Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:17:59.846813Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:59.846823Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:59.846911Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-30T09:17:59.846942Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-30T09:17:59.846977Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:17:59.846986Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:17:59.846994Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-30T09:17:59.847001Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-30T09:17:59.847007Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-30T09:17:59.847013Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-30T09:17:59.847020Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:17:59.847037Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:216:2213], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:59.847044Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:59.847061Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:214:2212], serverId# [1:216:2213], sessionId# [0:0:0] 2025-06-30T09:17:59.847959Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:136:2157]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nx\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\016\n\010__tablet\030\004 9\032\023\n\r__updateEpoch\030\004 :\032\020\n\n__updateNo\030\004 ;(\"J\014/Root/table1\222\002\013\th\020\000\000\000\000\000\000\020\r" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-30T09:17:59.847981Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:17:59.848000Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:17:59.848037Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-30T09:17:59.848051Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-30T09:17:59.848061Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-30T09:17:59.848081Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-30T09:17:59.848087Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-30T09:17:59.848094Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-30T09:17:59.848099Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:17:59.848190Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-30T09:17:59.848196Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-30T09:17:59.848201Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-30T09:17:59.848208Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:17:59.848220Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-30T09:17:59.848225Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-30T09:17:59.848230Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-30T09:17:59.848236Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-30T09:17:59.848242Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-30T09:17:59.860064Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-30T09:17:59.860090Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:17:59.860097Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:17:59.860130Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-30T09:17:59.860144Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-30T09:17:59.860252Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:226:2222], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:59.860260Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:59.860268Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:225:2221], serverId# [1:226:2222], sessionId# [0:0:0] 2025-06-30T09:17:59.860295Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:136:2157]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-30T09:17:59.860300Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-30T09:17:59.860334Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-30T09:17:59.860343Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-30T09:17:59.860348Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-30T09:17:59.860353Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-30T09:17:59.862687Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-30T09:17:59.862712Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:17:59.862799Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:59.862807Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:59.862817Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:17:59.862824Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:17:59.862830Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-30T09:17:59.862838Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-30T09:17:59 ... 004\206\203\014\203\014,SelectRange\000\003?* h\020\000\000\000\000\000\000\016\000\000\000\000\000\000\000?\014\005?2\003?,D\003?.F\003?0p\007\013?:\003?4e\005\'?8\003\013?>\003?\000\003?@\000\003?B\000\006\004?F\003\203\014\000\003\203\014\000\003\003?H\000\377\007\002\000\005?\032\005?\026?r\000\005?\030\003\005? \005?\034?r\000\006\000?\036\003?x\005?&\006\ 2025-06-30T09:18:01.763406Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:18:01.763417Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:18:01.763482Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit CheckDataTx 2025-06-30T09:18:01.763491Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-30T09:18:01.763494Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit CheckDataTx 2025-06-30T09:18:01.763498Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-30T09:18:01.763503Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit BuildAndWaitDependencies 2025-06-30T09:18:01.763509Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-30T09:18:01.763515Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:8] at 9437184 2025-06-30T09:18:01.763518Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-30T09:18:01.763522Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-30T09:18:01.763525Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit ExecuteDataTx 2025-06-30T09:18:01.763528Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit ExecuteDataTx 2025-06-30T09:18:01.763586Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:8] at tablet 9437184 with status COMPLETE 2025-06-30T09:18:01.763592Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:8] at 9437184: {NSelectRow: 0, NSelectRange: 1, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 2, SelectRangeBytes: 31, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-30T09:18:01.763598Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-30T09:18:01.763601Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit ExecuteDataTx 2025-06-30T09:18:01.763604Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit FinishPropose 2025-06-30T09:18:01.763608Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit FinishPropose 2025-06-30T09:18:01.763612Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 8 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-30T09:18:01.763619Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is DelayComplete 2025-06-30T09:18:01.763628Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit FinishPropose 2025-06-30T09:18:01.763631Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit CompletedOperations 2025-06-30T09:18:01.763634Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit CompletedOperations 2025-06-30T09:18:01.763640Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-30T09:18:01.763643Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit CompletedOperations 2025-06-30T09:18:01.763648Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 9437184 has finished 2025-06-30T09:18:01.763654Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-30T09:18:01.763657Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:8] at 9437184 on unit FinishPropose 2025-06-30T09:18:01.763662Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> TTxDataShardMiniKQL::MemoryUsageMultiShard [GOOD] >> DataShardSnapshots::LockedWritesLimitedPerKey+UseSink [GOOD] >> DataShardSnapshots::LockedWritesLimitedPerKey-UseSink >> THiveTest::TestLockTabletExecutionTimeout [GOOD] >> THiveTest::TestLockTabletExecutionRebootTimeout ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::MemoryUsageMultiShard [GOOD] Test command err: 2025-06-30T09:17:59.163031Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:134:2155]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:17:59.330889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:59.330924Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:59.331413Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:134:2155]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:17:59.331569Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:134:2155] 2025-06-30T09:17:59.332908Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:17:59.402228Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:134:2155]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:17:59.423388Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:17:59.423463Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:17:59.423631Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-30T09:17:59.423640Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-30T09:17:59.423647Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-30T09:17:59.423708Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:17:59.423781Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:17:59.423791Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:200:2155] in generation 2 2025-06-30T09:17:59.506257Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:17:59.528889Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-30T09:17:59.528964Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:17:59.528988Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-30T09:17:59.528994Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-30T09:17:59.528999Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-30T09:17:59.529004Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:17:59.529065Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:134:2155], Recipient [1:134:2155]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:59.529072Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:59.529135Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-30T09:17:59.529155Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-30T09:17:59.529165Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:17:59.529171Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:17:59.529178Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-30T09:17:59.529182Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-30T09:17:59.529186Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-30T09:17:59.529191Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-30T09:17:59.529196Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:17:59.529206Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:134:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:59.529211Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:59.529222Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-30T09:17:59.537928Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:134:2155]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-30T09:17:59.537965Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:17:59.538001Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:17:59.538043Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-30T09:17:59.538053Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-30T09:17:59.538064Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-30T09:17:59.538081Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-30T09:17:59.538086Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-30T09:17:59.538091Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-30T09:17:59.538095Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:17:59.538176Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-30T09:17:59.538180Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-30T09:17:59.538184Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-30T09:17:59.538188Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:17:59.538197Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-30T09:17:59.538201Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-30T09:17:59.538204Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-30T09:17:59.538208Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-30T09:17:59.538212Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-30T09:17:59.555251Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-30T09:17:59.555280Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:17:59.555287Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:17:59.555310Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-30T09:17:59.555329Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-30T09:17:59.555444Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:134:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:59.555451Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:59.555459Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-30T09:17:59.555479Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:134:2155]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-30T09:17:59.555484Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-30T09:17:59.555520Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-30T09:17:59.555527Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-30T09:17:59.555533Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-30T09:17:59.555538Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-30T09:17:59.557353Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-30T09:17:59.557368Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:17:59.557412Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:134:2155], Recipient [1:134:2155]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:59.557418Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:59.557427Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:17:59.557435Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:17:59.557440Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-30T09:17:59.557447Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2:1] in PlanQueue unit at 9437184 2025-06-30T09:17:59.557452Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2:1] at 9437184 on unit PlanQueue 2025-06-30T09:17:59. ... ressTransaction} at tablet 9437185 (3 by [3:373:2316]) from queue queue_transaction 2025-06-30T09:18:02.914493Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:373:2316]) to queue queue_transaction 2025-06-30T09:18:02.914498Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_transaction from 16.936776 to 33.873553 (insert task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:373:2316])) 2025-06-30T09:18:02.914506Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} acquired dyn mem Res{3 96990534b}, Memory{0 dyn 96990534} 2025-06-30T09:18:02.914511Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:18:02.914515Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437184 on unit ExecuteDataTx 2025-06-30T09:18:02.914595Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 5 at 9437184 restored its data 2025-06-30T09:18:03.107069Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [6:5] at tablet 9437184 with status COMPLETE 2025-06-30T09:18:03.107118Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [6:5] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 2, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 22, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-30T09:18:03.107144Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437184 is ExecutedNoMoreRestarts 2025-06-30T09:18:03.107153Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437184 executing on unit ExecuteDataTx 2025-06-30T09:18:03.107161Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437184 to execution unit CompleteOperation 2025-06-30T09:18:03.107168Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437184 on unit CompleteOperation 2025-06-30T09:18:03.107258Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437184 is DelayComplete 2025-06-30T09:18:03.107263Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437184 executing on unit CompleteOperation 2025-06-30T09:18:03.107267Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437184 to execution unit CompletedOperations 2025-06-30T09:18:03.107272Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437184 on unit CompletedOperations 2025-06-30T09:18:03.107279Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437184 is Executed 2025-06-30T09:18:03.107283Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437184 executing on unit CompletedOperations 2025-06-30T09:18:03.107288Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [6:5] at 9437184 has finished 2025-06-30T09:18:03.107296Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:03.107301Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-30T09:18:03.107306Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-30T09:18:03.107311Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-30T09:18:03.107352Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437184:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} hope 5 -> done Change{16, redo 636b alter 0b annex 0, ~{ 1001, 1, 3, 4, 12, 7, 8, 5 } -{ }, 0 gb} 2025-06-30T09:18:03.107369Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437184:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} release Res{3 96990534b}, Memory{0 dyn 0} 2025-06-30T09:18:03.107444Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437184 (3 by [3:261:2230]) (release resources {0, 96990534}) 2025-06-30T09:18:03.107465Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_transaction from 33.873553 to 16.936776 (remove task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437184 (3 by [3:261:2230])) 2025-06-30T09:18:03.107501Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437185 2025-06-30T09:18:03.107506Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit ExecuteDataTx 2025-06-30T09:18:03.107790Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 5 at 9437185 restored its data 2025-06-30T09:18:03.169781Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [6:5] at tablet 9437185 with status COMPLETE 2025-06-30T09:18:03.169830Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [6:5] at 9437185: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 2, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 22, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-30T09:18:03.169851Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is ExecutedNoMoreRestarts 2025-06-30T09:18:03.169858Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437185 executing on unit ExecuteDataTx 2025-06-30T09:18:03.169866Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437185 to execution unit CompleteOperation 2025-06-30T09:18:03.169872Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit CompleteOperation 2025-06-30T09:18:03.169948Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is DelayComplete 2025-06-30T09:18:03.169953Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437185 executing on unit CompleteOperation 2025-06-30T09:18:03.169957Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437185 to execution unit CompletedOperations 2025-06-30T09:18:03.169960Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit CompletedOperations 2025-06-30T09:18:03.169966Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is Executed 2025-06-30T09:18:03.169969Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437185 executing on unit CompletedOperations 2025-06-30T09:18:03.169973Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [6:5] at 9437185 has finished 2025-06-30T09:18:03.169988Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:03.169993Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-30T09:18:03.169997Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-30T09:18:03.170001Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-30T09:18:03.170032Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} hope 5 -> done Change{16, redo 636b alter 0b annex 0, ~{ 1001, 1, 3, 4, 12, 7, 8, 5 } -{ }, 0 gb} 2025-06-30T09:18:03.170045Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} release Res{3 96990534b}, Memory{0 dyn 0} 2025-06-30T09:18:03.170106Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:373:2316]) (release resources {0, 96990534}) 2025-06-30T09:18:03.170123Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_transaction from 16.936776 to 0.000000 (remove task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:373:2316])) 2025-06-30T09:18:03.182818Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:10} commited cookie 1 for step 9 2025-06-30T09:18:03.182861Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-30T09:18:03.182871Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:5] at 9437185 on unit CompleteOperation 2025-06-30T09:18:03.182890Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 5] from 9437185 at tablet 9437185 send result to client [3:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-30T09:18:03.182908Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437185 {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437185 SetTabletConsumer# 9437185 Flags# 0 Seqno# 2} 2025-06-30T09:18:03.182916Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-30T09:18:03.182988Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437184:3:10} commited cookie 1 for step 9 2025-06-30T09:18:03.182995Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:18:03.183000Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:5] at 9437184 on unit CompleteOperation 2025-06-30T09:18:03.183008Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 5] from 9437184 at tablet 9437184 send result to client [3:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-30T09:18:03.183014Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 1} 2025-06-30T09:18:03.183018Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:18:03.183061Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [3:351:2316], Recipient [3:463:2402]: {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437185 SetTabletConsumer# 9437185 Flags# 0 Seqno# 2} 2025-06-30T09:18:03.183067Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:18:03.183073Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437186 source 9437186 dest 9437185 consumer 9437185 txId 5 2025-06-30T09:18:03.183085Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [3:239:2230], Recipient [3:463:2402]: {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 1} 2025-06-30T09:18:03.183090Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:18:03.183094Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437186 source 9437186 dest 9437184 consumer 9437184 txId 5 >> TTxDataShardMiniKQL::CrossShard_5_AllToAll >> LocalPartition::WithoutPartitionUnknownEndpoint [GOOD] >> LocalPartition::WithoutPartitionPartitionRelocation >> UpsertLoad::ShouldWriteDataBulkUpsert >> TCdcStreamWithInitialScanTests::MeteringDedicated [GOOD] >> StoragePool::TestDistributionRandomMin7p [GOOD] >> StoragePool::TestDistributionRandomMin7pWithOverflow [GOOD] |79.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/control/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream/unittest >> TCdcStreamWithInitialScanTests::MeteringDedicated [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:17:42.749533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:42.749563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:42.749568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:42.749573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:42.749578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:42.749581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:42.749589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:42.749606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:42.749721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:42.749790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:42.761960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:42.761983Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:42.764854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:42.764904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:42.764934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:42.766473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:42.766594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:42.766705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:42.766800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:42.767354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:42.767390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:42.767626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:42.767636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:42.767659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:42.767664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:42.767669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:42.767683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:17:42.768856Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:17:42.785064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:42.785158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:42.785239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:42.785250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:42.785307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:42.785324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:42.786342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:42.786391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:42.786474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:42.786488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:42.786494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:42.786501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:42.787172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:42.787188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:42.787198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:42.787627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:42.787640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:42.787647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:42.787656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:42.788449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:42.788959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:42.789006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:42.789239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:42.789271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:42.789280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:42.789361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:42.789371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:42.789410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:17:42.789424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:17:42.789934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:42.789945Z node 1 :FLAT_TX_SCHEMESHARD ... 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72075186233409546, cookie: 281474976715657 2025-06-30T09:17:57.522955Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 281474976715657 2025-06-30T09:17:57.522964Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 281474976715657, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 5 2025-06-30T09:17:57.522976Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 6 2025-06-30T09:17:57.523009Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 2/3, is published: true 2025-06-30T09:17:57.528416Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 281474976715657 2025-06-30T09:17:57.532851Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 281474976715657 2025-06-30T09:17:57.551170Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6376: Handle TEvProposeTransactionResult, at schemeshard: 72075186233409546, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 300 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 249 } } CommitVersion { Step: 300 TxId: 281474976715657 } 2025-06-30T09:17:57.551201Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 281474976715657, tablet: 72075186233409552, partId: 1 2025-06-30T09:17:57.551236Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 300 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 249 } } CommitVersion { Step: 300 TxId: 281474976715657 } 2025-06-30T09:17:57.551253Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72075186233409546, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 300 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 249 } } CommitVersion { Step: 300 TxId: 281474976715657 } 2025-06-30T09:17:57.551566Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72075186233409546, at schemeshard: 72075186233409546, message: Source { RawX1: 766 RawX2: 81604381277 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-30T09:17:57.551573Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 281474976715657, tablet: 72075186233409552, partId: 1 2025-06-30T09:17:57.551585Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546, message: Source { RawX1: 766 RawX2: 81604381277 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-30T09:17:57.551593Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 281474976715657:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72075186233409546 2025-06-30T09:17:57.551601Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976715657:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72075186233409546 message: Source { RawX1: 766 RawX2: 81604381277 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-30T09:17:57.551619Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715657:1, shardIdx: 72075186233409546:4, shard: 72075186233409552, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72075186233409546 2025-06-30T09:17:57.551623Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-30T09:17:57.551628Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715657:1, datashard: 72075186233409552, at schemeshard: 72075186233409546 2025-06-30T09:17:57.551636Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715657:1 129 -> 240 2025-06-30T09:17:57.559337Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-30T09:17:57.575097Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-30T09:17:57.575265Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-30T09:17:57.575277Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72075186233409546] TDone opId# 281474976715657:1 ProgressState 2025-06-30T09:17:57.575297Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715657:1 progress is 3/3 2025-06-30T09:17:57.575303Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-06-30T09:17:57.575309Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715657:1 progress is 3/3 2025-06-30T09:17:57.575312Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-06-30T09:17:57.575317Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 3/3, is published: true 2025-06-30T09:17:57.575324Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-06-30T09:17:57.575330Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715657:0 2025-06-30T09:17:57.575335Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715657:0 2025-06-30T09:17:57.575349Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 3 2025-06-30T09:17:57.575354Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715657:1 2025-06-30T09:17:57.575357Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715657:1 2025-06-30T09:17:57.575373Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 5 2025-06-30T09:17:57.575379Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715657:2 2025-06-30T09:17:57.575383Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715657:2 2025-06-30T09:17:57.575387Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-30T09:18:00.894617Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:18:00.894712Z node 19 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 4 took 112us result status StatusNameConflict 2025-06-30T09:18:00.894775Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/Shared/Table/Stream/streamImpl\', error: path is not a common path (id: [OwnerId: 72075186233409546, LocalPathId: 4], type: EPathTypePersQueueGroup, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Shared/Table/Stream/streamImpl" PathId: 4 LastExistedPrefixPath: "/MyRoot/Shared/Table/Stream/streamImpl" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409554 } } PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:18:03.768134Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:18:03.768242Z node 19 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 4 took 125us result status StatusNameConflict 2025-06-30T09:18:03.768313Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/Shared/Table/Stream/streamImpl\', error: path is not a common path (id: [OwnerId: 72075186233409546, LocalPathId: 4], type: EPathTypePersQueueGroup, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Shared/Table/Stream/streamImpl" PathId: 4 LastExistedPrefixPath: "/MyRoot/Shared/Table/Stream/streamImpl" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409554 } } PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 |79.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |79.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> StoragePool::TestDistributionRandomMin7pWithOverflow [GOOD] Test command err: (1,1): 1 on 2 (1,1): 1 on 1 RemoveNode 7 (1,1): 1 on 3 (1,3): 1 on 5 (1,2): 1 on 6 (1,1): 1 on 0 (1,2): 1 on 9 RemoveNode 0 (1,3): 1 on 9 RemoveNode 2 (1,3): 1 on 3 (1,3): 1 on 4 (1,1): -1 on 0 (1,2): 1 on 6 RemoveNode 1 (1,3): 1 on 6 (1,3): 1 on 2 (1,2): 1 on 8 (1,2): -1 on 6 (1,2): 1 on 6 (1,2): 1 on 8 (1,1): 1 on 1 (1,1): 1 on 3 AddNode 0 (1,1): 1 on 0 (1,3): 1 on 2 (1,1): 1 on 2 (1,1): 1 on 3 (1,2): 1 on 5 (1,3): 1 on 7 (1,2): 1 on 7 (1,3): 1 on 4 (1,1): 1 on 1 (1,3): 1 on 8 (1,2): -1 on 6 RemoveNode 6 (1,1): 1 on 3 (1,1): 1 on 4 (1,1): -1 on 3 AddNode 2 (1,2): 1 on 8 RemoveNode 8 (1,1): 1 on 3 (1,2): 1 on 7 (1,1): 1 on 0 (1,1): 1 on 3 RemoveNode 5 (1,3): 1 on 8 RemoveNode 9 (1,3): 1 on 5 (1,1): 1 on 0 AddNode 6 (1,2): -1 on 7 (1,1): 1 on 2 (1,2): 1 on 9 AddNode 1 (1,1): 1 on 3 RemoveNode 6 (1,3): 1 on 7 (1,1): 1 on 3 (1,3): 1 on 0 (1,3): -1 on 5 (1,3): 1 on 3 (1,3): -1 on 3 (1,1): -1 on 1 (1,1): 1 on 0 (1,2): 1 on 7 (1,1): 1 on 3 (1,2): 1 on 8 (1,3): 1 on 8 (1,3): 1 on 3 (1,3): 1 on 0 (1,2): -1 on 7 (1,1): -1 on 4 (1,2): 1 on 7 RemoveNode 0 (1,1): 1 on 4 (1,3): 1 on 2 (1,3): 1 on 2 AddNode 5 (1,2): 1 on 9 (1,3): 1 on 1 (1,1): 1 on 3 RemoveNode 2 (1,3): -1 on 0 (1,2): 1 on 9 (1,1): 1 on 4 (1,1): 1 on 2 RemoveNode 4 (1,3): 1 on 0 (1,1): 1 on 1 (1,2): 1 on 6 (1,2): 1 on 7 (1,1): 1 on 0 (1,1): 1 on 1 AddNode 8 (1,1): -1 on 3 (1,1): -1 on 3 (1,2): 1 on 8 (1,2): 1 on 6 AddNode 2 (1,1): 1 on 1 (1,2): 1 on 6 RemoveNode 5 (1,3): -1 on 5 (1,1): 1 on 3 (1,2): 1 on 9 (1,3): 1 on 6 (1,2): 1 on 8 (1,3): 1 on 2 AddNode 4 (1,2): 1 on 9 (1,1): 1 on 0 (1,2): 1 on 5 (1,1): 1 on 4 AddNode 9 (1,3): -1 on 4 RemoveNode 4 (1,3): 1 on 0 (1,1): -1 on 3 (1,1): 1 on 3 (1,3): 1 on 9 (1,2): -1 on 8 (1,2): 1 on 5 AddNode 7 (1,3): 1 on 1 (1,3): 1 on 3 (1,1): 1 on 0 (1,3): 1 on 0 RemoveNode 1 (1,1): 1 on 2 RemoveNode 9 (1,1): -1 on 1 (1,3): -1 on 8 (1,2): 1 on 9 (1,1): 1 on 4 AddNode 4 (1,1): 1 on 2 AddNode 1 (1,1): 1 on 4 (1,3): -1 on 8 (1,1): 1 on 3 RemoveNode 2 (1,1): 1 on 3 (1,1): 1 on 3 (1,3): -1 on 9 AddNode 2 (1,1): 1 on 3 (1,3): -1 on 8 (1,2): 1 on 6 (1,3): -1 on 1 (1,2): -1 on 8 RemoveNode 7 (1,2): 1 on 5 RemoveNode 4 (1,1): 1 on 4 (1,1): 1 on 0 (1,3): 1 on 8 (1,2): 1 on 8 (1,2): 1 on 7 RemoveNode 2 (1,1): 1 on 4 RemoveNode 3 (1,1): 1 on 0 (1,2): 1 on 5 AddNode 7 (1,1): 1 on 4 (1,1): 1 on 4 (1,1): -1 on 2 (1,3): 1 on 3 (1,1): 1 on 0 (1,3): 1 on 8 (1,2): 1 on 8 AddNode 9 (1,2): 1 on 6 AddNode 4 (1,1): 1 on 3 AddNode 0 (1,1): 1 on 0 (1,1): -1 on 4 (1,2): 1 on 7 (1,2): 1 on 6 (1,1): 1 on 4 (1,1): 1 on 1 (1,1): 1 on 1 (1,3): -1 on 0 AddNode 2 (1,3): -1 on 0 (1,1): 1 on 4 (1,1): 1 on 4 (1,3): 1 on 0 RemoveNode 0 (1,3): 1 on 3 (1,2): -1 on 8 (1,2): 1 on 6 (1,2): 1 on 8 (1,2): 1 on 7 (1,3): 1 on 6 (1,2): 1 on 7 (1,2): 1 on 6 (1,3): 1 on 2 (1,2): 1 on 5 AddNode 6 (1,2): 1 on 7 RemoveNode 9 (1,1): 1 on 1 (1,1): 1 on 1 (1,2): 1 on 7 RemoveNode 8 (1,3): -1 on 6 RemoveNode 7 (1,1): 1 on 2 (1,2): 1 on 9 (1,1): 1 on 1 RemoveNode 4 (1,2): 1 on 5 RemoveNode 1 (1,1): 1 on 1 AddNode 8 (1,3): -1 on 8 (1,3): 1 on 0 AddNode 0 (1,3): 1 on 3 (1,2): 1 on 6 (1,2): -1 on 9 RemoveNode 2 (1,3): 1 on 2 (1,3): 1 on 1 (1,3): 1 on 8 RemoveNode 6 (1,1): -1 on 0 (1,1): 1 on 0 (1,2): 1 on 7 AddNode 3 (1,3): 1 on 5 (1,1): 1 on 3 (1,2): 1 on 8 AddNode 7 (1,3): 1 on 5 AddNode 5 (1,1): -1 on 3 RemoveNode 7 (1,3): -1 on 8 AddNode 7 (1,1): -1 on 3 (1,3): 1 on 3 RemoveNode 7 (1,2): 1 on 7 (1,3): 1 on 7 (1,1): 1 on 1 RemoveNode 0 (1,1): 1 on 3 (1,2): -1 on 9 (1,1): -1 on 2 (1,2): 1 on 9 AddNode 7 (1,2): -1 on 8 AddNode 0 (1,1): 1 on 2 (1,3): 1 on 0 (1,2): 1 on 9 AddNode 2 (1,3): 1 on 0 RemoveNode 7 (1,3): 1 on 8 RemoveNode 2 (1,1): 1 on 4 (1,2): 1 on 8 (1,2): 1 on 7 (1,3): 1 on 0 (1,3): -1 on 1 AddNode 2 (1,3): 1 on 2 (1,3): -1 on 7 (1,3): 1 on 0 (1,1): 1 on 0 (1,3): 1 on 0 (1,2): 1 on 9 RemoveNode 5 (1,1): -1 on 3 (1,3): 1 on 7 (1,1): 1 on 1 (1,2): 1 on 7 AddNode 9 (1,2): 1 on 6 (1,1): 1 on 1 (1,3): 1 on 3 (1,1): 1 on 1 (1,1): -1 on 2 (1,2): -1 on 7 AddNode 4 (1,2): 1 on 8 (1,3): 1 on 5 (1,1): 1 on 0 (1,1): 1 on 4 (1,1): 1 on 1 (1,2): 1 on 7 (1,3): -1 on 2 (1,2): 1 on 9 (1,3): -1 on 5 (1,1): 1 on 0 (1,2): 1 on 8 (1,3): -1 on 0 (1,3): 1 on 7 (1,1): 1 on 0 (1,3): 1 on 1 (1,2): 1 on 6 (1,2): -1 on 7 (1,1): 1 on 2 (1,2): 1 on 6 (1,2): -1 on 9 RemoveNode 2 (1,3): 1 on 7 (1,3): 1 on 2 (1,2): 1 on 7 (1,2): 1 on 7 (1,2): 1 on 9 (1,2): 1 on 6 (1,3): 1 on 2 (1,2): 1 on 5 (1,2): 1 on 6 RemoveNode 3 (1,1): 1 on 4 (1,2): 1 on 9 (1,2): -1 on 8 (1,3): -1 on 6 (1,3): 1 on 0 (1,1): 1 on 0 (1,3): 1 on 3 AddNode 1 (1,3): 1 on 4 (1,1): 1 on 1 RemoveNode 0 (1,1): 1 on 0 RemoveNode 4 (1,2): 1 on 9 (1,3): 1 on 7 (1,1): 1 on 1 (1,2): -1 on 6 AddNode 5 (1,3): -1 on 0 (1,2): 1 on 9 (1,1): 1 on 2 (1,2): 1 on 9 AddNode 3 (1,3): -1 on 2 (1,3): 1 on 1 RemoveNode 8 (1,1): 1 on 0 (1,2): -1 on 5 AddNode 4 (1,3): -1 on 5 (1,3): 1 on 0 (1,3): -1 on 0 (1,3): 1 on 7 (1,1): 1 on 0 RemoveNode 9 (1,1): -1 on 4 (1,3): 1 on 0 (1,2): 1 on 9 (1,2): 1 on 7 (1,2): 1 on 8 (1,2): 1 on 5 RemoveNode 1 (1,3): 1 on 4 (1,3): 1 on 6 (1,1): 1 on 0 (1,1): 1 on 1 AddNode 2 (1,2): -1 on 6 AddNode 1 (1,3): 1 on 6 (1,1): 1 on 4 (1,3): -1 on 8 (1,3): 1 on 3 RemoveNode 2 (1,1): 1 on 1 (1,1): 1 on 0 (1,3): 1 on 0 AddNode 9 (1,1): -1 on 4 (1,3): 1 on 7 (1,2): -1 on 9 (1,3): 1 on 7 RemoveNode 4 (1,3): -1 on 9 AddNode 8 (1,1): 1 on 1 (1,1): 1 on 0 (1,1): 1 on 4 (1,2): 1 on 5 (1,2): 1 on 9 RemoveNode 8 (1,2): 1 on 9 (1,3): 1 on 8 (1,2): 1 on 5 (1,3): 1 on 1 AddNode 7 (1,3): 1 on 4 AddNode 4 (1,1): 1 on 3 RemoveNode 7 (1,1): 1 on 4 (1,2): 1 on 7 (1,3): 1 on 7 (1,1): 1 on 4 (1,2): 1 on 8 AddNode 6 (1,1): 1 on 2 RemoveNode 6 (1,2): 1 on 6 (1,3): 1 on 1 (1,1): -1 on 3 AddNode 0 (1,1): 1 on 0 (1,1): -1 on 2 (1,3): 1 on 9 (1,2): -1 on 8 (1,1): 1 on 3 RemoveNode 3 (1,3): -1 on 0 (1,2): 1 on 5 RemoveNode 1 (1,2): 1 on 9 AddNode 3 (1,1): -1 on 3 (1,2): 1 on 7 (1,2): 1 on 6 AddNode 8 (1,3): 1 on 6 AddNode 1 (1,3): -1 on 3 (1,1): 1 on 3 (1,3): 1 on 4 (1,1): 1 on 4 (1,2): 1 on 6 (1,1): 1 on 3 (1,3): -1 on 7 (1,1): 1 on 4 (1,2): 1 on 8 RemoveNode 4 (1,2): 1 on 7 (1,2): 1 on 5 (1,1): -1 on 0 (1,1): 1 on 4 (1,1): 1 on 0 (1,2): 1 on 7 (1,2): 1 on 5 (1,1): 1 on 0 RemoveNode 5 (1,2): 1 on 8 (1,2): 1 on 8 RemoveNode 8 (1,1): -1 on 0 (1,3): 1 on 1 (1,2): 1 on 6 RemoveNode 0 (1,3): -1 on 2 (1,1): 1 on 0 (1,2): 1 on 8 (1,3): 1 on 6 (1,2): 1 on 6 (1,3): 1 on 8 RemoveNode 1 (1,2): 1 on 8 (1,1): 1 on 2 (1,1): 1 on 4 AddNode 2 (1,2): 1 on 6 (1,1): -1 on 2 (1,3): 1 on 5 (1,1): 1 on 4 (1,1): 1 on 3 (1,2): 1 on 5 (1,2): 1 on 9 (1,3): 1 on 1 RemoveNode 2 (1,3): 1 on 9 (1,1): 1 on 1 AddNode 2 (1,2): 1 on 8 (1,2): 1 on 6 AddNode 8 (1,2): 1 on 8 (1,3): 1 on 8 AddNode 0 (1,3): 1 on 8 (1,1): 1 on 4 (1,1): -1 on 2 RemoveNode 9 (1,1): 1 on 1 (1,1): 1 on 3 (1,1): -1 on 3 (1,3): 1 on 4 (1,3): 1 on 5 AddNode 1 (1,2): 1 on 6 (1,2): -1 on 9 (1,1): 1 on 4 (1,3): 1 on 9 (1,3): 1 on 1 (1,3): 1 on 7 (1,2): -1 on 8 (1,2): 1 on 6 (1,1): 1 on 0 (1,2): -1 on 9 (1,1): 1 on 1 (1,2): 1 on 5 (1,1): 1 on 3 (1,1): 1 on 0 (1,1): -1 on 4 (1,2): -1 on 6 (1,1): 1 on 0 (1,1): 1 on 4 (1,2): 1 on 9 (1,3): 1 on 5 (1,3): 1 on 2 AddNode 5 (1,3): 1 on 8 (1,2): 1 on 9 (1,1): 1 on 0 RemoveNode 5 (1,2): -1 on 7 (1,2): 1 on 6 (1,2): 1 on 6 (1,2): -1 on 5 (1,1): 1 on 3 (1,3): 1 on 5 (1,3): 1 on 4 (1,3): 1 on 4 (1,3): -1 on 2 (1,2): -1 on 7 (1,1): 1 on 3 (1,3): -1 on 7 (1,2): 1 on 6 (1,1): 1 on 2 AddNode 6 (1,1): -1 on 0 (1,2): -1 on 5 (1,3): 1 on 6 (1,1): 1 on 1 AddNode 9 (1,1): 1 on 4 (1,1): 1 on 1 AddNode 7 (1,3): 1 on 3 (1,2): -1 on 7 (1,2): 1 on 9 (1,2): 1 on 5 AddNode 4 (1,3): 1 on 2 (1,2): 1 on 5 RemoveNode 8 (1,2): 1 on 7 (1,2): 1 on 9 RemoveNode 6 (1,2): 1 on 7 (1,3): 1 on 3 (1,3): 1 on 8 (1,1): 1 on 3 RemoveNode 4 (1,3): 1 on 4 (1,3): 1 on 5 (1,2): 1 on 6 (1,1): 1 on 2 (1,2): 1 on 8 AddNode 8 (1,3): 1 on 9 (1,1): 1 on 3 (1,2): 1 on 9 AddNode 5 (1,1): 1 on 3 RemoveNode 8 (1,2): 1 on 9 RemoveNode 7 (1,3): 1 on 0 (1,2): -1 on 9 RemoveNode 1 (1,1): -1 on 1 (1,1): 1 on 0 (1,2): 1 on 6 RemoveNode 2 (1,2): 1 on 7 (1,2): 1 on 8 (1,2): 1 on 9 (1,2): 1 on 7 (1,1): -1 on 4 (1,3): 1 on 1 (1,2): -1 on 5 (1,1): 1 on 3 (1,2): 1 on 9 (1,2): 1 on 5 AddNode 4 (1,2): 1 on 8 RemoveNode 3 (1,2): 1 on 9 AddNode 1 (1,3): -1 on 2 (1,2): -1 on 6 (1,2): 1 on 9 (1,3): -1 on 2 AddNode 2 (1,3): 1 on 0 RemoveNode 0 (1,1): -1 on 3 (1,2): 1 on 6 (1,2): 1 on 9 (1,2): 1 on 9 AddNode 6 (1,2): -1 on 7 RemoveNode 4 (1,2): 1 on 6 AddNode 4 (1,2): 1 on 6 (1,1): 1 on 4 AddNode 0 (1,3): 1 on 4 RemoveNode 9 (1,2): 1 on 8 (1,2): 1 on 7 (1,2): 1 on 6 AddNode 8 (1,1): 1 on 0 (1,1): 1 on 0 AddNode 7 (1,1): 1 on 3 (1,3): 1 on 5 (1,3): -1 on 7 (1,1): -1 on 4 RemoveNode 6 (1,3): 1 on 0 RemoveNode 7 (1,1): 1 on 4 (1,3): 1 on 3 (1,3): 1 on 2 (1,1): 1 on 4 AddNode 9 (1,2): 1 on 8 (1,1): 1 on 0 RemoveNode 0 (1,2): -1 on 8 (1,2): 1 on 6 AddNode 7 (1,2): 1 on 9 (1,2): 1 on 7 (1,2): 1 on 5 (1,2): 1 on 5 AddNode 3 (1,1): 1 on 3 RemoveNode 7 (1,1): 1 on 2 (1,3): 1 on 0 RemoveNode 4 (1,1): 1 on 4 (1,2): -1 on 8 (1,2): 1 on 7 RemoveNode 1 (1,2): 1 on 9 (1,2): 1 on 7 (1,2): 1 on 7 (1,3): -1 on 5 AddNode 6 (1,3): 1 on 8 RemoveNode 9 (1,2): 1 on 5 (1,3): 1 on 9 (1,1): 1 on 2 (1,1): 1 on 0 (1,2): 1 on 6 RemoveNode 8 (1,2): 1 on 9 (1,3): 1 on 6 (1,2): 1 on 8 (1,2): 1 on 7 (1,2): 1 on 9 (1,1): 1 on 4 (1,1): -1 on 2 RemoveNode 6 (1,2): 1 on 6 RemoveNode 2 (1,1): 1 on 2 (1,2): 1 on 6 (1,3): 1 on 3 (1,1): 1 on 1 (1,3): 1 on 8 AddNode 0 (1,2): 1 on 8 RemoveNode 5 (1,3): 1 on 6 AddNode 8 (1,1): -1 on 1 (1,1): 1 on 4 AddNode 2 (1,1): 1 on 2 RemoveNode 2 (1,1): -1 on 4 (1,1): 1 on 1 AddNode 9 (1,2): 1 on 6 (1,2): 1 on 5 RemoveNode 8 (1,3): 1 on 3 AddNode 7 (1,2): 1 on 8 (1,2): 1 on 6 (1,2): 1 on 5 RemoveNode 3 (1,3): 1 on 3 AddNode 4 (1,2): 1 on 5 (1,3): 1 on 1 (1,3): -1 on 6 (1,2): 1 on 5 RemoveNode 4 (1,3): -1 on 6 (1,3): 1 on 2 (1,1): -1 on 4 (1,3): 1 on 9 (1,1): -1 on 0 (1,2): 1 on 7 (1,1): 1 on 1 (1,1): -1 on 1 (1,1): 1 on 1 (1,3): 1 on 6 (1,2): 1 on 8 AddNode 2 (1,3): 1 on 0 (1,2): 1 on 8 RemoveNode 7 (1,1): 1 on 3 (1,1): 1 on 1 AddNode 1 (1,1): 1 on 2 (1,3): 1 on 5 (1,3): 1 on 1 (1,3): 1 on 7 AddNode 8 (1,1): 1 on 4 (1,3): -1 on 7 (1,2): 1 on 8 (1,1): 1 on 2 (1,2): 1 on 5 AddNode 7 (1,1): -1 on 3 (1,2): -1 on 7 (1,2): 1 on 5 AddNode 4 (1,2): -1 on 9 (1,2): -1 on 7 (1,1): -1 on 2 (1,2): 1 on 6 (1,1): 1 on 3 (1,2): 1 on 7 (1,2): 1 on 8 RemoveNode 1 (1,2): 1 on 7 RemoveNode 4 (1,2): 1 on 5 (1,1): -1 on 2 (1,1): 1 on 0 (1,3): -1 on 2 (1,2): 1 on 6 AddNode 6 (1,2): 1 on 5 (1,3): 1 on 6 (1,2): 1 on 5 AddNode 4 (1,3): 1 on 8 (1,2): 1 on 8 (1,3): 1 on 1 (1,3): -1 on 6 (1,2): 1 on 8 (1,3): -1 on 3 (1,2): 1 on 6 (1,1): 1 on 2 (1,3): -1 on 8 (1,2): 1 on 5 (1,3): 1 on 3 AddNode 3 (1,1): 1 on 0 RemoveNode 8 (1,2): 1 on 7 AddNode 8 (1,3): 1 on 3 (1,1): -1 on 0 RemoveNode 0 (1,2): 1 on 8 (1,2): 1 on 9 RemoveNode 3 (1,1): -1 on 2 RemoveNode 8 (1,1): 1 on 0 RemoveNode 7 (1,1): 1 on 4 (1,2): 1 on 8 (1,3): 1 on 9 (1,1): 1 on 2 (1,3): 1 on 3 AddNode 3 (1,2): 1 on 8 AddNode 1 (1,2): 1 on 7 RemoveNode 6 (1,2): 1 on 5 (1,2): -1 on 6 RemoveNode 9 (1,1): 1 on 0 (1,2): 1 on 7 AddNode 0 (1,1): 1 on 4 AddNode 7 (1,3): 1 on 3 (1,1): 1 on 4 (1,3): 1 on 5 (1,1): 1 on 0 (1,3): 1 on 6 (1,3): -1 on 2 RemoveNode 0 (1,3): 1 on 7 AddNode 0 (1,1): 1 on 1 (1,1): -1 on 2 AddNode 6 (1,1): 1 on 3 (1,2): 1 on 5 RemoveNode 2 (1,3): 1 on 7 (1,2): 1 on 7 (1,3): 1 on 8 (1,1): 1 on 1 (1,1): 1 on 0 (1,2): 1 on 7 AddNode 2 (1,2): 1 on 6 (1,3): 1 on 0 RemoveNode 4 (1,1): 1 on 0 (1,1): 1 on 3 (1,3): 1 on 6 (1,2): 1 on 8 (1,3): -1 on 8 (1,2): -1 on 7 (1,1): -1 on 1 (1,1): 1 on 0 (1,1): 1 on 1 RemoveNode 0 (1,1): 1 on 4 RemoveNode 7 (1,2): 1 on 5 RemoveNode 1 (1,1): 1 on 2 (1,2): -1 on 7 (1,3): -1 on 9 (1,1): -1 on 4 (1,1): 1 on 4 (1,3): 1 on 5 (1,1): 1 on 4 RemoveNode 3 (1,1): 1 on 4 (1,1): 1 on 4 (1,1): 1 on 4 (1,2): 1 on 6 AddNode 0 (1,1): 1 on 3 (1,3): 1 on 0 (1,3): 1 on 1 (1,1): 1 on 2 (1,1): 1 on 1 (1,1): 1 on 0 AddNode 4 (1,2): 1 on 7 (1,2): 1 on 9 RemoveNode 4 (1,1): 1 on 3 (1,2): 1 on 5 (1,2): 1 on 6 AddNode 7 (1,3): 1 on 8 (1,1): 1 on 2 (1,3): 1 on 3 AddNode 8 (1,1 ... 3): 1 on 4 (1,3): 1 on 3 (1,2): 1 on 8 AddNode 5 (1,2): 1 on 7 (1,2): -1 on 8 RemoveNode 9 (1,2): -1 on 6 (1,2): 1 on 6 (1,3): 1 on 9 RemoveNode 6 (1,1): 1 on 2 (1,3): -1 on 2 (1,1): -1 on 4 RemoveNode 5 (1,2): 1 on 8 (1,2): 1 on 5 AddNode 2 (1,1): 1 on 4 (1,3): 1 on 3 RemoveNode 2 (1,3): 1 on 6 (1,2): 1 on 6 (1,3): 1 on 2 RemoveNode 7 (1,1): 1 on 3 AddNode 2 (1,2): 1 on 6 (1,2): 1 on 9 (1,3): 1 on 0 (1,2): 1 on 7 RemoveNode 4 (1,2): 1 on 5 (1,2): 1 on 9 AddNode 4 (1,3): 1 on 2 (1,3): 1 on 5 (1,3): 1 on 3 (1,2): 1 on 7 (1,1): 1 on 4 (1,3): 1 on 6 (1,3): 1 on 4 (1,1): 1 on 1 (1,3): 1 on 5 (1,1): -1 on 0 RemoveNode 3 (1,1): 1 on 3 (1,1): 1 on 3 (1,3): -1 on 1 RemoveNode 4 (1,3): 1 on 1 (1,1): 1 on 3 (1,1): 1 on 0 AddNode 0 (1,3): 1 on 2 (1,2): 1 on 5 AddNode 1 (1,2): 1 on 7 (1,2): 1 on 9 AddNode 4 (1,1): 1 on 2 (1,3): 1 on 0 AddNode 9 (1,3): -1 on 6 AddNode 7 (1,2): 1 on 8 (1,1): 1 on 2 RemoveNode 4 (1,3): 1 on 6 (1,1): 1 on 2 (1,1): 1 on 3 (1,2): 1 on 8 (1,2): 1 on 6 (1,2): 1 on 9 RemoveNode 7 (1,3): 1 on 9 (1,3): 1 on 3 (1,3): 1 on 5 (1,3): 1 on 5 (1,2): 1 on 6 (1,3): 1 on 7 (1,3): -1 on 2 (1,2): -1 on 9 (1,1): -1 on 4 (1,2): 1 on 7 RemoveNode 9 (1,3): 1 on 0 RemoveNode 1 (1,1): 1 on 0 AddNode 7 (1,3): 1 on 8 (1,2): 1 on 6 (1,1): 1 on 1 RemoveNode 2 (1,3): -1 on 0 (1,2): -1 on 6 (1,3): 1 on 5 AddNode 3 (1,1): -1 on 3 AddNode 4 (1,3): 1 on 1 (1,1): 1 on 2 (1,2): 1 on 5 AddNode 9 (1,1): 1 on 4 (1,2): 1 on 6 RemoveNode 7 (1,3): -1 on 5 (1,1): 1 on 1 (1,3): 1 on 6 RemoveNode 9 (1,3): 1 on 9 RemoveNode 8 (1,1): 1 on 2 AddNode 6 (1,1): 1 on 2 (1,3): 1 on 7 (1,2): 1 on 7 AddNode 8 (1,2): 1 on 5 AddNode 5 (1,2): 1 on 7 (1,2): 1 on 6 (1,2): 1 on 5 (1,3): 1 on 5 (1,1): 1 on 4 (1,2): -1 on 5 RemoveNode 4 (1,2): 1 on 5 (1,3): 1 on 2 (1,1): 1 on 1 (1,3): 1 on 3 (1,2): -1 on 9 (1,2): -1 on 6 AddNode 4 (1,3): 1 on 9 RemoveNode 4 (1,3): -1 on 1 RemoveNode 0 (1,3): 1 on 8 (1,2): 1 on 7 AddNode 2 (1,3): 1 on 1 (1,2): 1 on 6 AddNode 7 (1,2): 1 on 9 AddNode 1 (1,2): 1 on 9 (1,2): 1 on 8 (1,1): 1 on 0 (1,3): 1 on 9 RemoveNode 6 (1,2): 1 on 8 AddNode 6 (1,3): -1 on 7 (1,2): 1 on 8 (1,3): -1 on 5 (1,2): 1 on 8 AddNode 0 (1,1): 1 on 2 (1,1): 1 on 1 (1,2): 1 on 5 RemoveNode 0 (1,3): -1 on 9 (1,3): 1 on 0 AddNode 0 (1,3): 1 on 8 RemoveNode 7 (1,2): -1 on 5 (1,1): 1 on 1 (1,1): -1 on 3 RemoveNode 2 (1,1): 1 on 0 (1,2): -1 on 7 (1,3): 1 on 2 (1,1): 1 on 2 (1,3): 1 on 1 (1,1): -1 on 1 (1,2): 1 on 6 (1,3): 1 on 4 (1,2): 1 on 9 (1,3): -1 on 4 RemoveNode 3 (1,2): 1 on 6 (1,3): 1 on 4 RemoveNode 5 (1,1): 1 on 0 (1,3): 1 on 3 RemoveNode 1 (1,3): -1 on 0 (1,1): 1 on 2 (1,2): 1 on 6 (1,3): 1 on 0 (1,2): -1 on 5 AddNode 3 (1,2): 1 on 9 (1,1): 1 on 1 AddNode 2 (1,2): 1 on 8 RemoveNode 0 (1,3): 1 on 8 RemoveNode 2 (1,3): 1 on 0 RemoveNode 6 (1,2): 1 on 6 (1,3): 1 on 5 (1,1): 1 on 1 AddNode 7 (1,1): 1 on 0 (1,2): 1 on 6 (1,1): 1 on 3 (1,2): 1 on 5 RemoveNode 8 (1,2): 1 on 8 (1,1): 1 on 3 (1,1): 1 on 0 AddNode 4 (1,2): 1 on 8 AddNode 8 (1,2): 1 on 6 RemoveNode 8 (1,3): 1 on 9 AddNode 9 (1,2): 1 on 9 RemoveNode 9 (1,2): 1 on 9 AddNode 8 (1,2): 1 on 8 (1,3): -1 on 5 AddNode 6 (1,3): 1 on 3 RemoveNode 7 (1,3): 1 on 8 (1,2): 1 on 7 AddNode 0 (1,2): -1 on 9 (1,3): 1 on 4 (1,2): 1 on 7 (1,3): -1 on 5 (1,1): -1 on 1 (1,1): 1 on 2 (1,3): 1 on 6 AddNode 5 (1,3): 1 on 7 RemoveNode 3 (1,2): 1 on 8 (1,2): 1 on 5 (1,1): 1 on 3 (1,3): 1 on 5 (1,1): 1 on 4 (1,3): 1 on 8 (1,3): 1 on 7 (1,2): -1 on 8 AddNode 3 (1,1): 1 on 0 RemoveNode 0 (1,2): 1 on 6 (1,1): 1 on 2 (1,3): 1 on 8 RemoveNode 4 (1,3): 1 on 2 (1,2): -1 on 6 (1,3): 1 on 3 AddNode 2 (1,3): 1 on 5 (1,1): 1 on 2 (1,3): 1 on 2 RemoveNode 3 (1,3): 1 on 3 (1,2): 1 on 6 RemoveNode 5 (1,2): 1 on 9 (1,3): -1 on 9 (1,2): 1 on 6 (1,2): -1 on 6 AddNode 0 (1,2): 1 on 5 AddNode 3 (1,3): -1 on 4 (1,3): 1 on 7 RemoveNode 2 (1,1): 1 on 1 (1,3): 1 on 5 RemoveNode 8 (1,1): 1 on 2 (1,2): 1 on 7 (1,2): 1 on 9 RemoveNode 0 (1,1): -1 on 2 RemoveNode 6 (1,2): 1 on 6 AddNode 0 (1,1): 1 on 4 (1,1): 1 on 2 (1,1): 1 on 2 (1,2): 1 on 5 (1,1): 1 on 0 (1,2): 1 on 6 (1,3): -1 on 8 (1,3): 1 on 5 (1,3): 1 on 1 (1,1): 1 on 3 AddNode 4 (1,1): -1 on 3 (1,1): 1 on 2 (1,3): -1 on 5 RemoveNode 4 (1,3): 1 on 2 (1,1): 1 on 0 (1,3): -1 on 6 (1,1): 1 on 1 (1,2): 1 on 6 (1,2): -1 on 6 (1,2): 1 on 7 (1,3): -1 on 3 AddNode 7 (1,2): 1 on 7 RemoveNode 0 (1,3): 1 on 0 (1,1): 1 on 2 (1,2): 1 on 6 (1,3): 1 on 7 (1,1): 1 on 2 (1,2): 1 on 9 RemoveNode 7 (1,2): -1 on 6 AddNode 7 (1,2): 1 on 7 (1,2): 1 on 7 (1,2): -1 on 6 RemoveNode 3 (1,3): 1 on 9 (1,2): 1 on 5 (1,1): 1 on 3 (1,2): 1 on 5 AddNode 0 (1,1): 1 on 3 (1,1): 1 on 1 (1,2): 1 on 5 RemoveNode 7 (1,3): -1 on 7 AddNode 7 (1,1): 1 on 1 (1,2): 1 on 9 (1,2): -1 on 6 AddNode 5 (1,1): 1 on 1 (1,3): 1 on 2 (1,3): 1 on 2 (1,2): 1 on 9 AddNode 4 (1,3): 1 on 9 (1,3): -1 on 2 RemoveNode 4 (1,3): -1 on 4 RemoveNode 7 (1,1): -1 on 4 (1,3): 1 on 2 (1,3): -1 on 2 (1,1): 1 on 4 (1,1): 1 on 0 (1,2): 1 on 9 (1,3): 1 on 0 (1,2): -1 on 7 AddNode 9 (1,1): 1 on 4 (1,3): 1 on 7 (1,3): 1 on 8 (1,1): 1 on 3 AddNode 2 (1,3): 1 on 2 RemoveNode 0 (1,2): -1 on 7 (1,1): 1 on 2 (1,3): 1 on 7 (1,2): 1 on 6 (1,2): -1 on 6 AddNode 6 (1,1): 1 on 3 (1,1): 1 on 4 (1,2): -1 on 6 (1,3): 1 on 0 AddNode 7 (1,1): 1 on 0 (1,3): -1 on 8 RemoveNode 9 (1,2): 1 on 7 (1,2): 1 on 5 (1,1): 1 on 2 (1,1): 1 on 0 (1,3): 1 on 4 (1,1): 1 on 0 AddNode 0 (1,1): 1 on 3 RemoveNode 7 (1,3): 1 on 0 (1,2): 1 on 7 (1,2): 1 on 9 (1,2): 1 on 5 AddNode 7 (1,3): 1 on 8 (1,1): 1 on 1 RemoveNode 0 (1,2): 1 on 9 (1,2): -1 on 5 AddNode 1 (1,2): 1 on 5 (1,2): 1 on 6 (1,1): 1 on 4 (1,1): 1 on 3 (1,3): 1 on 0 (1,2): 1 on 9 (1,1): 1 on 1 (1,1): 1 on 1 AddNode 9 (1,3): 1 on 2 RemoveNode 7 (1,2): 1 on 5 RemoveNode 1 (1,1): -1 on 1 (1,1): -1 on 3 (1,3): 1 on 2 AddNode 0 (1,2): 1 on 7 (1,3): -1 on 0 (1,1): 1 on 3 AddNode 8 (1,2): 1 on 7 (1,3): 1 on 5 (1,2): 1 on 6 (1,3): 1 on 2 (1,3): 1 on 2 RemoveNode 0 (1,2): 1 on 5 AddNode 0 (1,1): -1 on 1 RemoveNode 2 (1,1): 1 on 2 (1,1): -1 on 2 (1,3): 1 on 8 (1,2): 1 on 9 (1,3): -1 on 6 (1,3): -1 on 8 (1,1): 1 on 2 RemoveNode 0 (1,1): -1 on 3 (1,2): 1 on 9 (1,1): 1 on 4 (1,1): 1 on 0 (1,1): 1 on 0 (1,2): -1 on 8 (1,2): 1 on 5 (1,1): 1 on 3 (1,2): 1 on 7 (1,2): 1 on 6 (1,2): 1 on 9 (1,1): 1 on 2 (1,2): 1 on 8 (1,3): 1 on 9 RemoveNode 5 (1,2): 1 on 7 (1,2): 1 on 5 (1,2): -1 on 9 (1,3): 1 on 5 (1,2): 1 on 8 (1,3): 1 on 8 RemoveNode 6 (1,2): -1 on 6 (1,3): 1 on 6 (1,3): 1 on 3 (1,2): 1 on 8 (1,1): 1 on 1 (1,3): 1 on 1 (1,1): 1 on 1 AddNode 6 (1,1): 1 on 4 AddNode 3 (1,2): 1 on 8 (1,1): 1 on 2 RemoveNode 9 (1,3): 1 on 1 AddNode 2 (1,1): 1 on 0 (1,3): 1 on 7 AddNode 9 (1,1): -1 on 2 AddNode 1 (1,1): -1 on 1 (1,2): 1 on 8 RemoveNode 2 (1,1): 1 on 3 (1,2): 1 on 7 (1,2): 1 on 7 (1,2): 1 on 9 AddNode 2 (1,2): 1 on 8 (1,2): 1 on 9 (1,3): 1 on 3 RemoveNode 2 (1,1): 1 on 4 AddNode 7 (1,1): 1 on 1 RemoveNode 9 (1,2): 1 on 9 (1,3): 1 on 7 AddNode 4 (1,2): 1 on 6 (1,3): -1 on 7 (1,2): -1 on 6 (1,3): 1 on 5 (1,2): -1 on 8 (1,1): 1 on 3 AddNode 2 (1,1): 1 on 1 (1,2): 1 on 8 (1,3): 1 on 2 (1,1): 1 on 4 (1,3): -1 on 8 (1,1): 1 on 3 (1,1): 1 on 4 RemoveNode 8 (1,1): 1 on 3 RemoveNode 4 (1,2): 1 on 8 (1,2): 1 on 9 (1,3): -1 on 2 (1,1): -1 on 0 (1,2): 1 on 5 AddNode 0 (1,1): 1 on 3 (1,1): 1 on 3 (1,1): 1 on 4 (1,1): -1 on 1 (1,1): 1 on 0 (1,1): -1 on 4 (1,2): 1 on 9 (1,3): 1 on 7 (1,3): 1 on 8 (1,1): 1 on 1 (1,3): -1 on 4 (1,1): 1 on 0 (1,1): 1 on 3 (1,1): 1 on 3 RemoveNode 1 (1,3): 1 on 3 (1,3): 1 on 0 (1,1): 1 on 3 RemoveNode 2 (1,3): 1 on 5 (1,1): -1 on 2 (1,2): 1 on 8 (1,1): 1 on 1 RemoveNode 7 (1,3): -1 on 2 (1,1): 1 on 2 (1,1): 1 on 0 (1,1): 1 on 3 (1,1): 1 on 3 (1,1): 1 on 0 AddNode 9 (1,3): -1 on 7 (1,1): 1 on 1 RemoveNode 0 (1,3): -1 on 9 AddNode 2 (1,1): 1 on 3 (1,1): -1 on 0 (1,1): 1 on 0 (1,3): -1 on 1 (1,2): 1 on 8 (1,2): -1 on 8 (1,2): 1 on 9 (1,1): -1 on 4 RemoveNode 2 (1,3): 1 on 2 (1,3): 1 on 3 (1,2): 1 on 8 (1,3): 1 on 5 (1,2): 1 on 9 AddNode 2 (1,2): -1 on 8 RemoveNode 9 (1,3): -1 on 3 (1,3): -1 on 1 RemoveNode 3 (1,1): 1 on 0 AddNode 5 (1,3): 1 on 4 RemoveNode 6 (1,2): 1 on 7 (1,1): 1 on 2 AddNode 1 (1,1): 1 on 1 RemoveNode 2 (1,3): -1 on 1 AddNode 2 (1,3): -1 on 7 (1,2): -1 on 5 (1,1): -1 on 1 (1,1): -1 on 1 (1,1): 1 on 1 AddNode 8 (1,1): 1 on 3 AddNode 3 (1,2): 1 on 9 (1,3): 1 on 5 (1,1): 1 on 2 (1,2): 1 on 6 (1,2): -1 on 7 AddNode 6 (1,3): 1 on 9 (1,1): 1 on 0 AddNode 0 (1,2): 1 on 9 AddNode 7 (1,2): 1 on 7 (1,3): 1 on 1 (1,3): 1 on 1 (1,2): -1 on 9 (1,1): -1 on 3 RemoveNode 7 (1,2): 1 on 8 (1,3): 1 on 0 RemoveNode 0 (1,3): 1 on 1 (1,1): -1 on 3 RemoveNode 1 (1,1): 1 on 4 (1,3): 1 on 0 (1,3): 1 on 5 (1,1): 1 on 0 (1,3): 1 on 8 (1,1): 1 on 2 AddNode 9 (1,3): 1 on 6 (1,3): 1 on 6 (1,1): -1 on 1 (1,1): 1 on 4 AddNode 7 (1,2): 1 on 9 (1,2): -1 on 8 (1,2): 1 on 8 (1,2): -1 on 5 AddNode 4 (1,1): 1 on 0 (1,3): 1 on 5 (1,3): 1 on 2 RemoveNode 4 (1,3): -1 on 9 (1,1): 1 on 4 (1,2): 1 on 9 RemoveNode 9 (1,2): 1 on 7 (1,1): 1 on 2 (1,1): 1 on 3 (1,2): 1 on 7 RemoveNode 6 (1,3): 1 on 2 (1,1): 1 on 2 (1,1): 1 on 0 (1,1): 1 on 0 (1,2): -1 on 5 (1,1): 1 on 2 (1,2): 1 on 9 (1,1): -1 on 3 (1,1): 1 on 1 RemoveNode 5 (1,1): 1 on 4 (1,2): 1 on 7 (1,2): 1 on 7 (1,3): 1 on 1 (1,2): 1 on 9 (1,1): 1 on 1 (1,3): 1 on 4 (1,3): 1 on 4 RemoveNode 2 (1,1): 1 on 0 (1,3): 1 on 0 AddNode 4 (1,1): 1 on 0 (1,1): 1 on 3 RemoveNode 4 (1,2): 1 on 8 (1,2): -1 on 9 (1,2): -1 on 7 AddNode 5 (1,1): 1 on 0 AddNode 0 (1,2): 1 on 7 (1,2): -1 on 5 (1,1): 1 on 2 (1,3): 1 on 8 (1,1): -1 on 2 RemoveNode 0 (1,2): 1 on 5 (1,2): -1 on 7 RemoveNode 5 (1,3): 1 on 5 AddNode 2 (1,2): 1 on 6 AddNode 0 (1,1): 1 on 3 (1,1): 1 on 4 (1,2): 1 on 5 (1,3): 1 on 3 AddNode 9 (1,1): 1 on 0 (1,2): 1 on 6 RemoveNode 0 (1,1): 1 on 1 (1,3): 1 on 2 (1,1): -1 on 4 (1,3): 1 on 3 AddNode 5 (1,2): 1 on 9 (1,3): 1 on 2 (1,2): -1 on 5 (1,2): 1 on 6 AddNode 1 (1,1): -1 on 0 RemoveNode 1 (1,2): -1 on 7 AddNode 4 (1,1): 1 on 0 (1,2): 1 on 9 (1,1): 1 on 2 RemoveNode 3 (1,3): -1 on 6 RemoveNode 8 (1,2): 1 on 8 (1,3): 1 on 7 (1,3): 1 on 0 RemoveNode 5 (1,2): -1 on 7 (1,3): 1 on 3 AddNode 6 (1,1): 1 on 2 AddNode 5 (1,2): 1 on 6 AddNode 3 (1,3): 1 on 2 RemoveNode 4 (1,3): 1 on 3 (1,2): 1 on 9 (1,3): 1 on 4 AddNode 0 (1,1): -1 on 4 RemoveNode 0 (1,2): 1 on 6 RemoveNode 5 (1,1): 1 on 0 (1,1): -1 on 4 (1,3): 1 on 1 (1,1): 1 on 0 AddNode 8 (1,1): -1 on 2 (1,3): -1 on 0 (1,3): 1 on 6 (1,1): 1 on 2 (1,2): 1 on 7 AddNode 5 (1,2): 1 on 9 (1,1): 1 on 2 (1,3): 1 on 5 (1,3): 1 on 1 RemoveNode 9 (1,2): 1 on 6 AddNode 0 (1,1): 1 on 1 (1,3): 1 on 3 (1,2): 1 on 6 (1,1): -1 on 0 (1,2): 1 on 9 (1,3): 1 on 1 (1,2): 1 on 8 (1,1): 1 on 3 (1,1): 1 on 3 (1,3): 1 on 4 RemoveNode 5 (1,2): -1 on 6 (1,3): 1 on 4 (1,1): -1 on 1 (1,1): 1 on 3 AddNode 5 (1,1): 1 on 4 (1,3): 1 on 3 (1,1): -1 on 2 (1,3): -1 on 1 (1,1): 1 on 1 (1,2): 1 on 9 (1,2): 1 on 7 (1,1): 1 on 0 (1,3): 1 on 1 RemoveNode 0 (1,2): 1 on 5 (1,3): -1 on 8 (1,2): 1 on 6 (1,1): 1 on 4 (1,1): -1 on 3 RemoveNode 6 (1,3): 1 on 9 AddNode 9 (1,1): 1 on 0 RemoveNode 5 (1,3): 1 on 0 RemoveNode 3 (1,3): -1 on 4 (1,2): 1 on 8 (1,2): 1 on 7 (1,2): -1 on 7 (1,3): -1 on 6 Final state: 403 387 397 417 400 0 0 0 0 0 0 0 0 0 0 359 427 442 433 410 192 199 174 233 198 205 200 154 185 175 - - + - - - - + + + Took 3.315426 seconds avg = 4800 min = 4800 max = 4800 std-dev = 0 ch.0 avg = 1600 ch.0 min = 1520 ch.0 max = 1714 ch.0 std-dev = 33.93847374 ch.1 avg = 1600 ch.1 min = 1521 ch.1 max = 1671 ch.1 std-dev = 32.84326415 ch.2 avg = 1600 ch.2 min = 1531 ch.2 max = 1667 ch.2 std-dev = 32.62115878 avg = 1250 std-dev = 0 avg = 4800 min = 4800 max = 4800 std-dev = 0 ch.0 avg = 1600 ch.0 min = 1600 ch.0 max = 1600 ch.0 std-dev = 0 ch.1 avg = 1600 ch.1 min = 1600 ch.1 max = 1600 ch.1 std-dev = 0 ch.2 avg = 1600 ch.2 min = 1600 ch.2 max = 1600 ch.2 std-dev = 0 avg = 1250 std-dev = 0 avg = 4800 min = 4799 max = 4801 std-dev = 0.2 ch.0 avg = 1600 ch.0 min = 1512 ch.0 max = 1667 ch.0 std-dev = 34.0123507 ch.1 avg = 1600 ch.1 min = 1534 ch.1 max = 1698 ch.1 std-dev = 33.87181719 ch.2 avg = 1600 ch.2 min = 1528 ch.2 max = 1696 ch.2 std-dev = 29.86235088 avg = 1250 std-dev = 0 >> KqpQueryService::TableSink_ReplaceFromSelectLargeOlap [GOOD] >> KqpQueryService::TableSink_ReplaceDuplicatesOlap >> THiveTest::TestHiveBalancerWithPrefferedDC2 [GOOD] >> THiveTest::TestHiveBalancerWithPreferredDC3 >> THiveTest::TestHiveBalancerWithSystemTablets [GOOD] >> THiveTest::TestHiveNoBalancingWithLowResourceUsage >> UpsertLoad::ShouldWriteDataBulkUpsert [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsert2 >> TMonitoringTests::InvalidActorId [GOOD] >> THiveTest::TestSpreadNeighboursWithUpdateTabletsObject [GOOD] >> THiveTest::TestSpreadNeighboursDifferentOwners >> TxUsage::Sinks_Oltp_WriteToTopic_5_Query [GOOD] >> DataShardSnapshots::LockedWritesLimitedPerKey-UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit+UseSink |79.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |79.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer >> TxUsage::Sinks_Oltp_WriteToTopics_1_Table |79.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TMonitoringTests::InvalidActorId [GOOD] |79.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest >> UpsertLoad::ShouldWriteDataBulkUpsert2 [GOOD] >> KqpQueryService::TableSink_ReplaceDuplicatesOlap [GOOD] >> KqpQueryService::TableSink_Oltp_Replace-UseSink |79.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest >> THiveTest::TestHiveBalancerWithPreferredDC3 [GOOD] >> THiveTest::TestHiveBalancerWithFollowers >> THiveTest::TestSpreadNeighboursDifferentOwners [GOOD] >> THiveTest::TestUpdateTabletsObjectUpdatesMetrics >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Query [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsert2 [GOOD] Test command err: 2025-06-30T09:18:04.374392Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:04.374489Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:04.374509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00462b/r3tmp/tmpCLRw7z/pdisk_1.dat 2025-06-30T09:18:04.469959Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:04.470996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:04.487905Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:04.489171Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275083970078 != 1751275083970082 2025-06-30T09:18:04.531455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:04.531497Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:04.542147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:04.616099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:04.850073Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-30T09:18:04.850128Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:697:2579], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-30T09:18:04.921464Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:697:2579], subTag: 2} TUpsertActor finished in 0.071241s, errors=0 2025-06-30T09:18:04.921508Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:698:2580] with tag# 2 2025-06-30T09:18:05.658169Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:272:2315], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:05.658269Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:18:05.658281Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00462b/r3tmp/tmpVR6ZNF/pdisk_1.dat 2025-06-30T09:18:05.759819Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-30T09:18:05.760247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:05.777470Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:05.777942Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1751275085201246 != 1751275085201249 2025-06-30T09:18:05.820910Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:05.820953Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:05.831573Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:05.907281Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:06.137172Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-30T09:18:06.137207Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:697:2579], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" 2025-06-30T09:18:06.204001Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:697:2579], subTag: 2} TUpsertActor finished in 0.066740s, errors=0 2025-06-30T09:18:06.204032Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:698:2580] with tag# 2 |79.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Table >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit+UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit-UseSink >> KqpQueryService::TableSink_Oltp_Replace-UseSink [GOOD] >> THiveTest::TestUpdateTabletsObjectUpdatesMetrics [GOOD] >> THiveTest::TestRestartTablets >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Table [FAIL] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Table |79.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TMonitoringTests::ValidActorId >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query >> TxUsage::WriteToTopic_Demo_12_Query [GOOD] >> TMonitoringTests::ValidActorId [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_2_Table >> TSchemeShardTestExtSubdomainReboots::CreateExternalSubdomainWithoutHive-AlterDatabaseCreateHiveFirst-true [GOOD] >> KqpRbo::JoinFilter >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly >> THiveTest::TestRestartTablets [GOOD] >> THiveTest::TestServerlessComputeResourcesMode >> ReadSessionImplTest::UsesOnRetryStateDuringRetries [GOOD] >> RetryPolicy::TWriteSession_TestPolicy |79.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TMonitoringTests::ValidActorId [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Query [GOOD] >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit-UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Table >> ApplyClusterEndpointTest::NoPorts [GOOD] >> ApplyClusterEndpointTest::PortFromCds [GOOD] >> ApplyClusterEndpointTest::PortFromDriver [GOOD] >> BasicUsage::MaxByteSizeEqualZero |79.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |79.3%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Table [GOOD] >> TSchemeShardTestExtSubdomainReboots::CreateExternalSubdomainWithoutHive-AlterDatabaseCreateHiveFirst-false [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain_reboots/unittest >> TSchemeShardTestExtSubdomainReboots::CreateExternalSubdomainWithoutHive-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:16:38.938774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:38.938796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.938802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:38.938807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:38.938813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:38.938817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:38.938827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.938843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:38.938937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:38.939005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:38.952171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:16:38.952190Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:38.952266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.954354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:38.955153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:38.955186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:38.956508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:38.956542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:38.956630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.956673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:38.957378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.957409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:38.957586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:38.957594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.957599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:38.957603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:38.957607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:38.957621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:16:38.958555Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.972777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:38.972845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.972902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:38.972907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:38.972946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:38.972955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:38.973475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.973509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:38.973543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.973549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:38.973552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:38.973556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:38.973902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.973913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:38.973921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:38.974316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.974323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.974328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:38.974333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:38.974936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:38.975300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:38.975324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:38.975479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.975500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... gressState, NeedSyncHive: 0 2025-06-30T09:18:08.978170Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 240 -> 240 2025-06-30T09:18:08.978251Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:18:08.978260Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:18:08.978263Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:18:08.978267Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-30T09:18:08.978271Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 9 2025-06-30T09:18:08.978284Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-30T09:18:08.978720Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:18:08.978732Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:18:08.978764Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:18:08.978769Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:18:08.978774Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:18:08.978778Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:18:08.978782Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-30T09:18:08.978795Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [224:335:2324] message: TxId: 1003 2025-06-30T09:18:08.978802Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:18:08.978808Z node 224 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:18:08.978812Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:18:08.978858Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 8 2025-06-30T09:18:08.978965Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:18:08.979349Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:18:08.979361Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [224:612:2522] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:18:08.979490Z node 224 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:08.979531Z node 224 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 50us result status StatusSuccess 2025-06-30T09:18:08.979637Z node 224 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 Coordinators: 72075186233409548 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 Mediators: 72075186233409551 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "tenant-1:hdd" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:08.979727Z node 224 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-30T09:18:08.979750Z node 224 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/USER_0" took 22us result status StatusSuccess 2025-06-30T09:18:08.979800Z node 224 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "MyRoot/USER_0" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 Coordinators: 72075186233409548 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 Mediators: 72075186233409551 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "tenant-1:hdd" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/USER_0" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:18:08.979856Z node 224 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:08.979871Z node 224 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 17us result status StatusSuccess 2025-06-30T09:18:08.979934Z node 224 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |79.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |79.3%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut >> KqpRbo::JoinFilter [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_Oltp_Replace-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 19712, MsgBus: 30739 2025-06-30T09:16:58.751989Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668932575869163:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:58.753764Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004178/r3tmp/tmpkDcBmx/pdisk_1.dat 2025-06-30T09:16:58.840270Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19712, node 1 2025-06-30T09:16:58.878966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:16:58.878982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:16:58.878985Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:16:58.879031Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30739 2025-06-30T09:16:58.903219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:58.903260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:58.904875Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30739 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:16:59.026859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:59.033935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 CREATE TABLE `/Root/ColumnShard1` (Col1 Int64 NOT NULL, Col2 Int32 NOT NULL, PRIMARY KEY (Col1)) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1000); 2025-06-30T09:16:59.287760Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668936870837051:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:59.287795Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:59.408564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:16:59.753763Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:17:00.100503Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038399;self_id=[1:7521668941165807275:2304];tablet_id=72075186224038399;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:17:00.100554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038399;self_id=[1:7521668941165807275:2304];tablet_id=72075186224038399;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:17:00.100589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038399;self_id=[1:7521668941165807275:2304];tablet_id=72075186224038399;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:17:00.100604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038399;self_id=[1:7521668941165807275:2304];tablet_id=72075186224038399;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:17:00.100622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038399;self_id=[1:7521668941165807275:2304];tablet_id=72075186224038399;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:17:00.100640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038399;self_id=[1:7521668941165807275:2304];tablet_id=72075186224038399;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:17:00.100658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038399;self_id=[1:7521668941165807275:2304];tablet_id=72075186224038399;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:17:00.100678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038399;self_id=[1:7521668941165807275:2304];tablet_id=72075186224038399;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:17:00.100698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038399;self_id=[1:7521668941165807275:2304];tablet_id=72075186224038399;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:17:00.100718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038399;self_id=[1:7521668941165807275:2304];tablet_id=72075186224038399;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:17:00.100735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038399;self_id=[1:7521668941165807275:2304];tablet_id=72075186224038399;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:17:00.100753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038399;self_id=[1:7521668941165807275:2304];tablet_id=72075186224038399;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:17:00.101374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7521668941165807321:2308];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:17:00.101423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7521668941165807321:2308];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:17:00.101483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7521668941165807321:2308];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:17:00.101517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7521668941165807321:2308];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:17:00.101546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7521668941165807321:2308];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:17:00.101571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7521668941165807321:2308];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:17:00.101599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7521668941165807321:2308];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:17:00.101626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7521668941165807321:2308];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:17:00.101651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7521668941165807321:2308];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:17:00.101678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7521668941165807321:2308];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:17:00.101702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7521668941165807321:2308];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:17:00.101726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7521668941165807321:2308];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:17:00.102348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:17:00.102368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=7207518622 ... ogressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:18:05.586395Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:18:05.586410Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:18:05.586516Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:18:05.587059Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:18:05.587168Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:18:05.587206Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:18:05.587743Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:18:05.590820Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669217825493482:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:05.590845Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669217825493487:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:05.590850Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:05.591681Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:05.595958Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669217825493489:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:18:05.691603Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669217825493540:2560] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:05.718305Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715662;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-30T09:18:05.733611Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; Trying to start YDB, gRPC: 9858, MsgBus: 1346 2025-06-30T09:18:06.382785Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521669221967112038:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:06.387130Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004178/r3tmp/tmp5f2iuV/pdisk_1.dat 2025-06-30T09:18:06.465446Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9858, node 3 2025-06-30T09:18:06.515123Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:06.515157Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:06.527260Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:06.626953Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:06.626967Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:06.626970Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:06.627021Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1346 TClient is connected to server localhost:1346 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:06.770294Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:06.772140Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:06.926292Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521669221967112594:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:06.929361Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:06.930896Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:07.006923Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:07.057491Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521669226262081249:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:07.057518Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:07.057561Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521669226262081254:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:07.058482Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:07.061735Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-30T09:18:07.061791Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521669226262081256:2404], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-30T09:18:07.114885Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521669226262081307:3178] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:07.385994Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit-UseSink [GOOD] Test command err: 2025-06-30T09:17:43.464226Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:17:43.464303Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:17:43.464316Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046fa/r3tmp/tmpetqS9T/pdisk_1.dat 2025-06-30T09:17:43.567906Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:17:43.568717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:43.585440Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:43.586603Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275062902868 != 1751275062902872 2025-06-30T09:17:43.628761Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:62:2109] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:17:43.629208Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:17:43.629398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:43.629433Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:43.640220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:43.714112Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:62:2109] Handle TEvProposeTransaction 2025-06-30T09:17:43.714148Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:62:2109] TxId# 281474976715657 ProcessProposeTransaction 2025-06-30T09:17:43.714189Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:62:2109] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:605:2513] 2025-06-30T09:17:43.739469Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:605:2513] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-30T09:17:43.739530Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:605:2513] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:17:43.739788Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:605:2513] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:17:43.739815Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:605:2513] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:17:43.739900Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:605:2513] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:17:43.739957Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:605:2513] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:17:43.739976Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:605:2513] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-30T09:17:43.740081Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:605:2513] txid# 281474976715657 HANDLE EvClientConnected 2025-06-30T09:17:43.740536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:17:43.740884Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:605:2513] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-30T09:17:43.740901Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:605:2513] txid# 281474976715657 SEND to# [1:557:2483] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-30T09:17:43.756200Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:17:43.756407Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:17:43.756487Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:17:43.756534Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:17:43.764642Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:17:43.764883Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:17:43.764913Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:17:43.765066Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:17:43.765073Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:17:43.765078Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:17:43.765123Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:17:43.765138Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:17:43.765150Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:17:43.765228Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:17:43.769148Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:17:43.769228Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:17:43.769255Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:17:43.769259Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:17:43.769264Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:17:43.769271Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:17:43.769337Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:43.769345Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:43.769448Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:17:43.769470Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:17:43.769480Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:17:43.769486Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:17:43.769493Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:17:43.769497Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:17:43.769501Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:17:43.769505Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:17:43.769509Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:17:43.769532Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:43.769539Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:43.769547Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:17:43.769649Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:17:43.769654Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:17:43.769674Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:17:43.769721Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:17:43.769730Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:17:43.769744Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:17:43.769752Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09: ... 7888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-30T09:18:09.466098Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-30T09:18:09.466106Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [13:716:2594], Recipient [13:629:2533]: {TEvReadSet step# 3028 txid# 281474976715663 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-30T09:18:09.466109Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:18:09.466111Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037888 source 72075186224037889 dest 72075186224037888 producer 72075186224037889 txId 281474976715663 2025-06-30T09:18:09.466115Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037888 got read set: {TEvReadSet step# 3028 txid# 281474976715663 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-30T09:18:09.466143Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:705: Complete [3028 : 281474976715663] from 72075186224037888 at tablet 72075186224037888 send result to client [13:933:2731], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:09.466162Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [13:629:2533], Recipient [13:716:2594]: {TEvReadSet step# 3028 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-30T09:18:09.466165Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:18:09.466168Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715663 2025-06-30T09:18:09.466172Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 3028 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-30T09:18:09.466186Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:705: Complete [3028 : 281474976715663] from 72075186224037889 at tablet 72075186224037889 send result to client [13:933:2731], exec latency: 0 ms, propose latency: 0 ms TEvProposeTransactionResult: TxKind: TX_KIND_DATA Origin: 72075186224037888 Status: COMPLETE TxId: 281474976715663 TxResult: "" ExecLatency: 0 ProposeLatency: 0 TxStats { PerShardStats { ShardId: 72075186224037888 CpuTimeUsec: 242 } } ComputeActorStats { Tasks { Tables { TablePath: "/Root/table-1" WriteRows: 1 WriteBytes: 8 } } } CommitVersion { Step: 3028 TxId: 281474976715663 } 2025-06-30T09:18:09.466286Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:09.466321Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 TEvProposeTransactionResult: TxKind: TX_KIND_DATA Origin: 72075186224037889 Status: COMPLETE TxId: 281474976715663 TxResult: "" ExecLatency: 0 ProposeLatency: 0 TxStats { PerShardStats { ShardId: 72075186224037889 CpuTimeUsec: 117 } } ComputeActorStats { Tasks { Tables { TablePath: "/Root/table-2" WriteRows: 1 WriteBytes: 8 } } } CommitVersion { Step: 3028 TxId: 281474976715663 } 2025-06-30T09:18:09.466611Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:09.467262Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-30T09:18:09.467289Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [13:629:2533], Recipient [13:716:2594]: {TEvReadSet step# 3028 txid# 281474976715663 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-30T09:18:09.467294Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:18:09.467299Z node 13 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715663 2025-06-30T09:18:09.467385Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-30T09:18:09.467429Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [13:716:2594], Recipient [13:629:2533]: {TEvReadSet step# 3028 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 1} 2025-06-30T09:18:09.467432Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:18:09.467435Z node 13 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715663 2025-06-30T09:18:09.482608Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [13:62:2109] Handle TEvExecuteKqpTransaction 2025-06-30T09:18:09.482637Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [13:62:2109] TxId# 281474976715667 ProcessProposeKqpTransaction 2025-06-30T09:18:09.482852Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jz022bhv12ajs274gnt1fyft, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YWJlZTZkNDktY2U2NmJmMWMtODk0ZTVlNy0zYWRkNjAxZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TEvRead: ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 2025-06-30T09:18:09.483382Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [13:1043:2837], Recipient [13:629:2533]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-30T09:18:09.483428Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-30T09:18:09.483437Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v3028/281474976715663 IncompleteEdge# v{min} UnprotectedReadEdge# v4000/18446744073709551615 ImmediateWriteEdge# v4001/0 ImmediateWriteEdgeReplied# v4001/0 2025-06-30T09:18:09.483445Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v4001/18446744073709551615 2025-06-30T09:18:09.483454Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-30T09:18:09.483470Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:18:09.483474Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:18:09.483478Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:18:09.483482Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:18:09.483494Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-30T09:18:09.483499Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:18:09.483501Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:18:09.483504Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:18:09.483507Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:18:09.483523Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-30T09:18:09.483602Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[13:1043:2837], 0} after executionsCount# 1 2025-06-30T09:18:09.483610Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[13:1043:2837], 0} sends rowCount# 2, bytes# 96, quota rows left# 999, quota bytes left# 5242784, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:18:09.483628Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[13:1043:2837], 0} finished in read 2025-06-30T09:18:09.483636Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:18:09.483639Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:18:09.483642Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:18:09.483645Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:18:09.483656Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:18:09.483658Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:18:09.483661Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-30T09:18:09.483665Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-30T09:18:09.483684Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-30T09:18:09.483873Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [13:1043:2837], Recipient [13:629:2533]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-30T09:18:09.483880Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } items { uint32_value: 11 } }, { items { uint32_value: 2 } items { uint32_value: 2 } items { uint32_value: 22 } } >> THiveTest::TestServerlessComputeResourcesMode [GOOD] >> THiveTest::TestResetServerlessComputeResourcesMode ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::JoinFilter [GOOD] Test command err: Trying to start YDB, gRPC: 25688, MsgBus: 1175 2025-06-30T09:18:09.287516Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669234135279734:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:09.287542Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004714/r3tmp/tmpxAGUBb/pdisk_1.dat 2025-06-30T09:18:09.392135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:09.392187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:09.393202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:09.394221Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25688, node 1 2025-06-30T09:18:09.527051Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:09.527064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:09.527066Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:09.527109Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1175 TClient is connected to server localhost:1175 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:09.666234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:09.669987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:09.991628Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669234135280327:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:09.991665Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:10.043440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:10.073770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:10.107509Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669238430247795:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:10.107543Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:10.107604Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669238430247800:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:10.108778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:10.112732Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669238430247802:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-30T09:18:10.182425Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669238430247853:2432] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:10.290898Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> ReadSessionImplTest::ForcefulDestroyPartitionStream [GOOD] >> ReadSessionImplTest::DestroyPartitionStreamRequest >> ReadSessionImplTest::DestroyPartitionStreamRequest [GOOD] >> ReadSessionImplTest::DecompressZstdEmptyMessage [GOOD] >> ReadSessionImplTest::PacksBatches_BatchABitBiggerThanLimit [GOOD] >> ReadSessionImplTest::PacksBatches_BatchesEqualToServerBatches [GOOD] >> ReadSessionImplTest::HoleBetweenOffsets >> ReadSessionImplTest::HoleBetweenOffsets [GOOD] >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] >> THiveTest::TestResetServerlessComputeResourcesMode [GOOD] >> THiveTest::TestSkipBadNode ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain_reboots/unittest >> TSchemeShardTestExtSubdomainReboots::CreateExternalSubdomainWithoutHive-AlterDatabaseCreateHiveFirst-false [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:16:38.895188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:38.895210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.895215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:38.895220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:38.895226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:38.895229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:38.895239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:38.895253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:38.895356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:38.895425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:38.910585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:16:38.910609Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:38.910717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.913836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:38.914931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:38.914970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:38.916929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:38.916976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:38.917077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.917142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:38.918144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.918195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:38.918451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:38.918462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:38.918471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:38.918479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:38.918485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:38.918510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:16:38.919993Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:16:38.940021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:38.940087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.940141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:38.940147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:38.940193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:38.940206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:38.940866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.940897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:38.940936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.940943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:38.940947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:38.940951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:38.941345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.941355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:38.941359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:38.941837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.941853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:38.941857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:38.941863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:38.942387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:38.942844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:38.942875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:38.943020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:38.943039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... gressState, NeedSyncHive: 0 2025-06-30T09:18:09.743045Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 240 -> 240 2025-06-30T09:18:09.743160Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:18:09.743172Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:18:09.743176Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:18:09.743182Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-30T09:18:09.743187Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 9 2025-06-30T09:18:09.743203Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-30T09:18:09.743778Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:18:09.743792Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:18:09.743806Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:18:09.743811Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:18:09.743818Z node 224 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:18:09.743821Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:18:09.743826Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-30T09:18:09.743838Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [224:335:2324] message: TxId: 1003 2025-06-30T09:18:09.743845Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:18:09.743851Z node 224 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:18:09.743855Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:18:09.743893Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 8 2025-06-30T09:18:09.743997Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:18:09.744312Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:18:09.744322Z node 224 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [224:612:2522] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:18:09.744446Z node 224 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:09.744485Z node 224 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 49us result status StatusSuccess 2025-06-30T09:18:09.744580Z node 224 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 Coordinators: 72075186233409548 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 Mediators: 72075186233409551 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "tenant-1:hdd" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:09.744656Z node 224 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-30T09:18:09.744676Z node 224 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/USER_0" took 18us result status StatusSuccess 2025-06-30T09:18:09.744720Z node 224 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "MyRoot/USER_0" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 Coordinators: 72075186233409548 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 Mediators: 72075186233409551 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "tenant-1:hdd" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/USER_0" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:18:09.744774Z node 224 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:09.744789Z node 224 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 16us result status StatusSuccess 2025-06-30T09:18:09.744848Z node 224 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TxUsage::WriteToTopic_Demo_47_Query [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] Test command err: 2025-06-30T09:18:12.312751Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.312758Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.312762Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:12.323633Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:12.349394Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:12.353731Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.353901Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:18:12.356514Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.356521Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.356525Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:12.370793Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:12.380708Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:12.380772Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.381055Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:18:12.381144Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-30T09:18:12.385260Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.385267Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.385272Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:12.389021Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:12.398988Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:12.399054Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.399135Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:18:12.401686Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.404864Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:18:12.406874Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:18:12.406894Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-30T09:18:12.411287Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.411294Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.411298Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:12.423003Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:12.435030Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:12.435232Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.435349Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 11 Compressed message data size: 31 2025-06-30T09:18:12.439787Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:18:12.439832Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-30T09:18:12.439889Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-30T09:18:12.439900Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-30T09:18:12.446819Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:18:12.446836Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-30T09:18:12.446847Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-30T09:18:12.447124Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 GOT RANGE 0 3 Getting new event 2025-06-30T09:18:12.447154Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-30T09:18:12.447159Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-30T09:18:12.447162Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-30T09:18:12.447186Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 GOT RANGE 3 5 Getting new event 2025-06-30T09:18:12.447197Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-30T09:18:12.447202Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-30T09:18:12.447205Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-30T09:18:12.447214Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 GOT RANGE 5 7 Getting new event 2025-06-30T09:18:12.447224Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-30T09:18:12.447228Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-30T09:18:12.447233Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-30T09:18:12.447247Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 GOT RANGE 7 9 2025-06-30T09:18:12.447663Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.447667Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.447670Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:12.466889Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:12.475133Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:12.475204Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.478984Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-30T09:18:12.479186Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:18:12.479221Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-30T09:18:12.479286Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-30T09:18:12.479294Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-30T09:18:12.479317Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:18:12.479322Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-30T09:18:12.479327Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-30T09:18:12.479331Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-30T09:18:12.479338Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-30T09:18:12.479379Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 5). Partition stream id: 1 GOT RANGE 0 5 Getting new event 2025-06-30T09:18:12.479401Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-30T09:18:12.479405Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-30T09:18:12.479408Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-30T09:18:12.479412Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-30T09:18:12.479416Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-30T09:18:12.479437Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 9). Partition stream id: 1 GOT RANGE 5 9 2025-06-30T09:18:12.479766Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.479770Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.479773Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:12.498463Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:12.498650Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:12.498699Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:12.498774Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:18:12.499172Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:18:12.499208Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:18:12.502811Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (10-11) 2025-06-30T09:18:12.502830Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-30T09:18:12.502861Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:18:12.502867Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-30T09:18:12.502872Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (10-10) 2025-06-30T09:18:12.502875Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (11-11) 2025-06-30T09:18:12.502885Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes 2025-06-30T09:18:12.502889Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes got data event: DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 11 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-30T09:18:12.502920Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 Got commit req { offset_ranges { assign_id: 1 end_offset: 3 } } RANGE 0 3 2025-06-30T09:18:12.502968Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 12). Partition stream id: 1 Got commit req { offset_ranges { assign_id: 1 start_offset: 3 end_offset: 12 } } RANGE 3 12 >> TxUsage::WriteToTopic_Demo_50_Table >> LocalPartition::WithoutPartitionPartitionRelocation [GOOD] >> LocalPartition::DirectWriteWithoutDescribeResourcesPermission >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Query >> THiveTest::TestSkipBadNode [GOOD] >> THiveTest::TestStopTenant >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query [FAIL] >> THiveTest::TestHiveNoBalancingWithLowResourceUsage [GOOD] >> THiveTest::TestLockTabletExecution >> THiveTest::TestHiveBalancerWithFollowers [GOOD] >> THiveTest::TestHiveBalancerWithLimit >> ReadSessionImplTest::ReconnectOnTmpError [GOOD] >> ReadSessionImplTest::ReconnectOnTmpErrorAndThenTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeout >> ReadSessionImplTest::ReconnectOnTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeoutAndThenCreate [GOOD] >> ReadSessionImplTest::ReconnectsAfterFailure [GOOD] >> ReadSessionImplTest::SimpleDataHandlers >> THiveTest::TestStopTenant [GOOD] >> THiveTest::TestTabletAvailability >> ReadSessionImplTest::SimpleDataHandlers [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithCommit >> THiveTest::TestDrainWithMaxTabletsScheduled [GOOD] >> THiveTest::TestDownAfterDrain >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] >> THiveTest::TestTabletAvailability [GOOD] >> THiveTest::TestTabletsStartingCounter >> TSchemeShardTestExtSubdomainReboots::CreateExternalSubdomain-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardTestExtSubdomainReboots::AlterSchemeLimits >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Query >> THiveTest::TestTabletsStartingCounter [GOOD] >> THiveTest::TestTabletsStartingCounterExternalBoot ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] Test command err: 2025-06-30T09:18:16.262515Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.262522Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.262526Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:16.275339Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-30T09:18:16.275356Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.275360Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.275381Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008647s 2025-06-30T09:18:16.299265Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:16.307022Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-30T09:18:16.307069Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.308498Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.308504Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.308509Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:16.315326Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-30T09:18:16.315342Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.315346Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.315366Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.009844s 2025-06-30T09:18:16.327676Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:16.336555Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-30T09:18:16.336588Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.343397Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.343402Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.343405Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:16.350911Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-30T09:18:16.350926Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.350931Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.350950Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.231193s 2025-06-30T09:18:16.358864Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:16.370937Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-30T09:18:16.370974Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.376403Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.376409Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.376413Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:16.394873Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-30T09:18:16.394890Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.394895Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.394914Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.164114s 2025-06-30T09:18:16.405807Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:16.405978Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-30T09:18:16.405993Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.407222Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.407228Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.407234Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:16.418972Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:16.430868Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:16.452786Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.452933Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TRANSPORT_UNAVAILABLE. Description:
: Error: GRpc error: (14): 2025-06-30T09:18:16.452938Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.452941Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.452946Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.195559s 2025-06-30T09:18:16.453013Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-30T09:18:16.467252Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.467258Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.467262Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:16.474826Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:16.483010Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:16.483078Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.489853Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:18:16.604871Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.607511Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-30T09:18:16.607534Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:18:16.607542Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-30T09:18:16.607561Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-30T09:18:16.710925Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-30T09:18:16.710981Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-30T09:18:16.711320Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.711324Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.711327Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:16.718847Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:16.733143Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:16.733207Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.737691Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:18:16.834573Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:16.834620Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-30T09:18:16.834633Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:18:16.834639Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-30T09:18:16.834654Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2025-06-30T09:18:16.834672Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-30T09:18:16.834722Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-30T09:18:16.834748Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-30T09:18:16.834773Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> TStorageBalanceTest::TestScenario1 [GOOD] >> TStorageBalanceTest::TestScenario2 >> THiveTest::TestTabletsStartingCounterExternalBoot [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Table [FAIL] >> THiveTest::TestLockTabletExecution [GOOD] >> THiveTest::TestLockTabletExecutionBadOwner >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData >> KqpPg::InsertFromSelect_Simple+useSink >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query >> THiveTest::TestHiveBalancerWithLimit [GOOD] >> THiveTest::TestHiveBalancerIgnoreTablet >> TxUsage::Sinks_Oltp_WriteToTopics_1_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query >> THiveTest::TestDownAfterDrain [GOOD] >> THiveTest::TestCreateTabletsWithRaceForStoragePoolsKIKIMR_9659 >> THiveTest::TestLockTabletExecutionBadOwner [GOOD] >> THiveTest::TestLockTabletExecutionDelete >> TTransferTests::Create >> TxUsage::WriteToTopic_Demo_50_Table [GOOD] >> LocalPartition::DirectWriteWithoutDescribeResourcesPermission [GOOD] >> LocalPartition::WithoutPartitionWithSplit >> TxUsage::WriteToTopic_Demo_50_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestTabletsStartingCounterExternalBoot [GOOD] Test command err: 2025-06-30T09:17:53.921518Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:53.927545Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:53.927640Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:53.928046Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:53.928144Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:17:53.928413Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-30T09:17:53.928428Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:53.928672Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:28:2075] ControllerId# 72057594037932033 2025-06-30T09:17:53.928680Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:53.928707Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:53.928734Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:53.933468Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:53.933490Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:53.933907Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:36:2080] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.933948Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:37:2081] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.933999Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:38:2082] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.934054Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:39:2083] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.934088Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:40:2084] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.934128Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:41:2085] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.934178Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:42:2086] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.934186Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-30T09:17:53.934202Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:28:2075] 2025-06-30T09:17:53.934209Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:28:2075] 2025-06-30T09:17:53.934220Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-30T09:17:53.934230Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:17:53.934391Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:17:53.934481Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.934488Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:17:53.934499Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:17:53.934528Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:53.938355Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:28:2075] 2025-06-30T09:17:53.938395Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.938405Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-30T09:17:53.939527Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-30T09:17:53.939952Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-30T09:17:53.939970Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:17:53.940061Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.940702Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-30T09:17:53.940721Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:17:53.940725Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:17:53.940751Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:53:2093] 2025-06-30T09:17:53.940766Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:17:53.941613Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-30T09:17:53.941624Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-30T09:17:53.941628Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-30T09:17:53.941634Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:17:53.941657Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:32:2063] 2025-06-30T09:17:53.941661Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:32:2063] 2025-06-30T09:17:53.941679Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:28:2075] 2025-06-30T09:17:53.941687Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-30T09:17:53.941699Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [1:53:2093] 2025-06-30T09:17:53.941701Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [1:53:2093] 2025-06-30T09:17:53.942036Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-30T09:17:53.942046Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:17:53.942073Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\363\365\\\016\336\205\240m2\241c\3010\003\261\342\227\n\267}" } 2025-06-30T09:17:53.942120Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [1:53:2093] 2025-06-30T09:17:53.942127Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:17:53.942163Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:53.942176Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-30T09:17:53.942183Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:17:53.942253Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:17:53.942296Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:53.942316Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StInit ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:53.942340Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-30T09:17:53.942345Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-30T09:17:53.942687Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:28:2075] 2025-06-30T09:17:53.942694Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:28:2075] 2025-06-30T09:17:53.942700Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-30T09:17:53.942705Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 2146435075 Sender# [1:47:2090] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:17:53.942712Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.004279s 2025-06-30T09:17:53.944600Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc ... Cookie: 0} 2025-06-30T09:18:19.011599Z node 25 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:18:19.011604Z node 25 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:18:19.011619Z node 25 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[25:2199047599219:0] : 8}, {[25:1099535971443:0] : 5}, {[25:24343667:0] : 2}}}} 2025-06-30T09:18:19.011624Z node 25 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72075186224037888 followers: 0 2025-06-30T09:18:19.011635Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72075186224037888] forward result error, check reconnect [25:324:2301] 2025-06-30T09:18:19.011640Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[72075186224037888] connect failed [25:324:2301] 2025-06-30T09:18:19.011664Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [25:327:2303] 2025-06-30T09:18:19.011667Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [25:327:2303] 2025-06-30T09:18:19.011674Z node 25 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:18:19.011681Z node 25 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 25 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [25:272:2262] 2025-06-30T09:18:19.011688Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [25:327:2303] 2025-06-30T09:18:19.011694Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [25:327:2303] 2025-06-30T09:18:19.011701Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [25:327:2303] 2025-06-30T09:18:19.011706Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [25:327:2303] 2025-06-30T09:18:19.011717Z node 25 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [25:327:2303] 2025-06-30T09:18:19.011739Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [25:327:2303] 2025-06-30T09:18:19.011744Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [25:327:2303] 2025-06-30T09:18:19.011748Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [25:327:2303] 2025-06-30T09:18:19.011753Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [25:327:2303] 2025-06-30T09:18:19.011757Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [25:327:2303] 2025-06-30T09:18:19.011764Z node 25 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [25:326:2302] EventType# 268697624 2025-06-30T09:18:19.011773Z node 25 :HIVE TRACE: hive_impl.cpp:118: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([25:327:2303]) [25:328:2304] 2025-06-30T09:18:19.011794Z node 25 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{6, NKikimr::NHive::TTxStartTablet} queued, type NKikimr::NHive::TTxStartTablet 2025-06-30T09:18:19.011800Z node 25 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{6, NKikimr::NHive::TTxStartTablet} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:18:19.011808Z node 25 :HIVE DEBUG: tx__start_tablet.cpp:31: HIVE#72057594037927937 THive::TTxStartTablet::Execute Tablet (72075186224037888,0) 2025-06-30T09:18:19.011851Z node 25 :HIVE DEBUG: tx__start_tablet.cpp:73: HIVE#72057594037927937 THive::TTxStartTablet::Execute, Sending TEvBootTablet(Dummy.72075186224037888.Leader.1) to node 25 storage {Version# 1 TabletID# 72075186224037888 TabletType# Dummy Channels# {0:{Channel# 0 Type# none StoragePool# def1 History# {0:{FromGeneration# 0 GroupID# 2147483648 Timestamp# 1970-01-01T00:00:00.059536Z}}, 1:{Channel# 1 Type# none StoragePool# def2 History# {0:{FromGeneration# 0 GroupID# 2147483649 Timestamp# 1970-01-01T00:00:00.059536Z}}, 2:{Channel# 2 Type# none StoragePool# def3 History# {0:{FromGeneration# 0 GroupID# 2147483650 Timestamp# 1970-01-01T00:00:00.059536Z}}} Tenant: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:19.011876Z node 25 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{6, NKikimr::NHive::TTxStartTablet} hope 1 -> done Change{6, redo 144b alter 0b annex 0, ~{ 1, 16 } -{ }, 0 gb} 2025-06-30T09:18:19.011886Z node 25 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{6, NKikimr::NHive::TTxStartTablet} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:18:19.023201Z node 25 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [6173685a7ad4b3c4] bootstrap ActorId# [25:330:2306] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:5:0:0:126:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-30T09:18:19.023234Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [6173685a7ad4b3c4] Id# [72057594037927937:2:5:0:0:126:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-30T09:18:19.023242Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [6173685a7ad4b3c4] restore Id# [72057594037927937:2:5:0:0:126:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-30T09:18:19.023251Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [6173685a7ad4b3c4] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:5:0:0:126:1] Marker# BPG33 2025-06-30T09:18:19.023257Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [6173685a7ad4b3c4] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:5:0:0:126:1] Marker# BPG32 2025-06-30T09:18:19.023276Z node 25 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [25:38:2081] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:5:0:0:126:1] FDS# 126 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-30T09:18:19.027131Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [6173685a7ad4b3c4] received {EvVPutResult Status# OK ID# [72057594037927937:2:5:0:0:126:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 20 } Cost# 80992 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 21 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-30T09:18:19.027160Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [6173685a7ad4b3c4] Result# TEvPutResult {Id# [72057594037927937:2:5:0:0:126:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-30T09:18:19.027168Z node 25 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [6173685a7ad4b3c4] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:5:0:0:126:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-30T09:18:19.027201Z node 25 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.106 sample PartId# [72057594037927937:2:5:0:0:126:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 25 } TEvVPutResult{ TimestampMs# 3.971 VDiskId# [0:1:0:0:0] NodeId# 25 Status# OK } ] } 2025-06-30T09:18:19.027226Z node 25 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:5:0:0:126:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-30T09:18:19.027256Z node 25 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} commited cookie 1 for step 5 2025-06-30T09:18:19.027307Z node 25 :HIVE DEBUG: tx__start_tablet.cpp:122: HIVE#72057594037927937 THive::TTxStartTablet::Complete Tablet (72075186224037888,0) SideEffects: {Notifications: 0x10080002 [25:326:2302] NKikimrLocal.TEvBootTablet Info { TabletID: 72075186224037888 Channels { Channel: 0 ChannelType: 0 History { FromGeneration: 0 GroupID: 2147483648 } StoragePool: "def1" } Channels { Channel: 1 ChannelType: 0 History { FromGeneration: 0 GroupID: 2147483649 } StoragePool: "def2" } Channels { Channel: 2 ChannelType: 0 History { FromGeneration: 0 GroupID: 2147483650 } StoragePool: "def3" } TabletType: Dummy Version: 1 TenantIdOwner: 72057594046678944 TenantIdLocalId: 1 } SuggestedGeneration: 1 BootMode: BOOT_MODE_LEADER FollowerId: 0} 2025-06-30T09:18:19.027387Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [25:332:2308] 2025-06-30T09:18:19.027393Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [25:332:2308] 2025-06-30T09:18:19.027410Z node 25 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:18:19.027421Z node 25 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 25 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [25:272:2262] 2025-06-30T09:18:19.027430Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [25:332:2308] 2025-06-30T09:18:19.027437Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [25:332:2308] 2025-06-30T09:18:19.027443Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [25:332:2308] 2025-06-30T09:18:19.027448Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [25:332:2308] 2025-06-30T09:18:19.027460Z node 25 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [25:332:2308] 2025-06-30T09:18:19.027479Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [25:332:2308] 2025-06-30T09:18:19.027484Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [25:332:2308] 2025-06-30T09:18:19.027488Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [25:332:2308] 2025-06-30T09:18:19.027492Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [25:332:2308] 2025-06-30T09:18:19.027497Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [25:332:2308] 2025-06-30T09:18:19.027504Z node 25 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [25:331:2307] EventType# 268830214 2025-06-30T09:18:19.027788Z node 25 :HIVE TRACE: hive_impl.cpp:118: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([25:332:2308]) [25:333:2309] >> THiveTest::TestCreateTabletsWithRaceForStoragePoolsKIKIMR_9659 [GOOD] >> THiveTest::TestDeleteTablet >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Table [GOOD] >> THiveTest::TestLockTabletExecutionDelete [GOOD] >> THiveTest::TestLockTabletExecutionDeleteReboot >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Table [GOOD] >> THiveTest::TestDeleteTablet [GOOD] >> THiveTest::TestDeleteOwnerTablets >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query >> KqpPg::NoTableQuery+useSink >> TTxDataShardMiniKQL::CrossShard_5_AllToAll [GOOD] >> TTxDataShardMiniKQL::CrossShard_6_Local >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query >> TTransferTests::Create_Disabled >> TTransferTests::Create [GOOD] >> TTransferTests::CreateSequential >> BasicUsage::MaxByteSizeEqualZero [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage >> THiveTest::TestDeleteOwnerTablets [GOOD] >> THiveTest::TestDeleteOwnerTabletsMany >> TTransferTests::Create_Disabled [GOOD] >> TTransferTests::CreateWithoutCredentials >> TTransferTests::CreateSequential [GOOD] >> KqpPg::InsertNoTargetColumns_Simple+useSink >> TTransferTests::CreateInParallel >> THiveTest::TestLockTabletExecutionDeleteReboot [GOOD] >> THiveTest::TestLockTabletExecutionBadUnlock >> KqpPg::NoTableQuery+useSink [GOOD] >> KqpPg::NoTableQuery-useSink >> TTransferTests::CreateWithoutCredentials [GOOD] >> TTransferTests::CreateWrongConfig >> TTransferTests::CreateInParallel [GOOD] >> TTransferTests::CreateDropRecreate >> TTransferTests::CreateWrongConfig [GOOD] >> TTransferTests::CreateWrongBatchSize >> KqpPg::TypeCoercionInsert-useSink >> TTransferTests::CreateDropRecreate [GOOD] >> TTransferTests::ConsistencyLevel >> TTransferTests::CreateWrongBatchSize [GOOD] >> TTransferTests::CreateWrongFlushIntervalIsSmall |79.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |79.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication >> KqpPg::InsertNoTargetColumns_Simple+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Simple-useSink >> THiveTest::TestLockTabletExecutionBadUnlock [GOOD] >> THiveTest::TestLockTabletExecutionGoodUnlock >> KqpPg::NoTableQuery-useSink [GOOD] >> KqpPg::PgCreateTable >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Query [GOOD] >> TTransferTests::ConsistencyLevel [GOOD] >> TTransferTests::Alter >> TTransferTests::CreateWrongFlushIntervalIsSmall [GOOD] >> TTransferTests::CreateWrongFlushIntervalIsBig |79.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |79.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime >> TTransferTests::CreateWrongFlushIntervalIsBig [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Table >> TTransferTests::Alter [GOOD] |79.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |79.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain >> KqpPg::InsertNoTargetColumns_Simple-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Serial-useSink >> THiveTest::TestHiveBalancerIgnoreTablet [GOOD] >> THiveTest::TestHiveBalancerNodeRestarts |79.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |79.3%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut >> THiveTest::TestLockTabletExecutionGoodUnlock [GOOD] >> THiveTest::TestHiveBalancerWithSpareNodes >> TSubDomainTest::DatashardNotRunAtAllWhenSubDomainNodesIsStopped [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_transfer/unittest >> TTransferTests::CreateWrongFlushIntervalIsBig [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:24.378814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:24.378839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:24.378845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:24.378851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:24.378861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:24.378866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:24.378876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:24.378891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:24.379009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:24.379077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:24.395898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:24.395921Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:24.399391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:24.399441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:24.399469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:24.400881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:24.400933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:24.401057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:24.401101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:24.401637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:24.401671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:24.401917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:24.401927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:24.401952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:24.401975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:24.401982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:24.401999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:24.403173Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:24.426769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:24.426855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:24.426923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:24.426931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:24.426975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:24.426989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:24.427722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:24.427765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:24.427819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:24.427828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:24.427834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:24.427840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:24.428204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:24.428213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:24.428221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:24.428574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:24.428582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:24.428589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:24.428596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:24.429353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:24.429793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:24.429829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:24.430019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:24.430045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:24.430062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:24.430122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:24.430130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:24.430173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:24.430186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:24.430576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:24.430584Z node 1 :FLAT_TX_SCHEMESHARD ... nResult Origin: 72075186233409546 TxId: 101 2025-06-30T09:18:26.106581Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5003: StateWork, processing event TEvColumnShard::TEvNotifyTxCompletionResult 2025-06-30T09:18:26.106589Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6237: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-06-30T09:18:26.106597Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-30T09:18:26.106621Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-06-30T09:18:26.106646Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-30T09:18:26.107536Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:26.107549Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:18:26.107556Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 101:0 2025-06-30T09:18:26.107586Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [6:127:2151], Recipient [6:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:18:26.107592Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:18:26.107605Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:26.107613Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-30T09:18:26.107627Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:18:26.107633Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:18:26.107638Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:18:26.107643Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:18:26.107647Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:18:26.107652Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-30T09:18:26.107665Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [6:345:2321] message: TxId: 101 2025-06-30T09:18:26.107673Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:18:26.107678Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-30T09:18:26.107683Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 101:0 2025-06-30T09:18:26.107713Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:18:26.108113Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:18:26.108132Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [6:345:2321] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 101 at schemeshard: 72057594046678944 2025-06-30T09:18:26.108163Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-30T09:18:26.108170Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [6:346:2322] 2025-06-30T09:18:26.108203Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [6:348:2324], Recipient [6:127:2151]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:18:26.108209Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:18:26.108214Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-30T09:18:26.108399Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [6:390:2359], Recipient [6:127:2151]: {TEvModifySchemeTransaction txid# 102 TabletId# 72057594046678944} 2025-06-30T09:18:26.108405Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:18:26.109158Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTransfer Replication { Name: "Transfer" Config { TransferSpecific { Target { SrcPath: "/MyRoot1/Table" DstPath: "/MyRoot/Table" } Batching { FlushIntervalMilliSeconds: 86400001 } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:26.109209Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_replication.cpp:349: [72057594046678944] TCreateReplication Propose: opId# 102:0, path# /MyRoot/Transfer 2025-06-30T09:18:26.109224Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Flush interval must be less than or equal to 24 hours, at schemeshard: 72057594046678944 2025-06-30T09:18:26.109273Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:18:26.109774Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Flush interval must be less than or equal to 24 hours" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:26.109818Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Flush interval must be less than or equal to 24 hours, operation: CREATE TRANSFER, path: /MyRoot/Transfer 2025-06-30T09:18:26.109824Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-30T09:18:26.109891Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:18:26.109898Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-30T09:18:26.109948Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [6:396:2365], Recipient [6:127:2151]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:26.109954Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:26.109975Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:18:26.109995Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124996, Sender [6:345:2321], Recipient [6:127:2151]: NKikimrScheme.TEvNotifyTxCompletion TxId: 102 2025-06-30T09:18:26.110000Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4973: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-30T09:18:26.110010Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:18:26.110024Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:18:26.110028Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [6:394:2363] 2025-06-30T09:18:26.110052Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [6:396:2365], Recipient [6:127:2151]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:18:26.110056Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:18:26.110060Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-30T09:18:26.110104Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [6:397:2366], Recipient [6:127:2151]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Transfer" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-30T09:18:26.110109Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:18:26.110119Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Transfer" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:26.110142Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Transfer" took 23us result status StatusPathDoesNotExist 2025-06-30T09:18:26.110173Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Transfer\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Transfer" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_transfer/unittest >> TTransferTests::Alter [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:23.933467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:23.933495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:23.933502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:23.933508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:23.933522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:23.933528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:23.933540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:23.933556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:23.933687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:23.933780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:23.951465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:23.951494Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:23.955600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:23.955665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:23.955700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:23.957399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:23.957471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:23.957623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:23.957688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:23.958580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:23.958633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:23.958946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:23.958961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:23.958991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:23.959000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:23.959008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:23.959029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:23.960363Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:23.989114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:23.989225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:23.989308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:23.989320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:23.989381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:23.989401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:23.990499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:23.990573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:23.990656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:23.990671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:23.990678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:23.990686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:23.991375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:23.991397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:23.991406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:23.991877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:23.991892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:23.991900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:23.991910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:23.992862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:23.993388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:23.993439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:23.993680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:23.993719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:23.993745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:23.993833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:23.993845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:23.993888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:23.993907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:23.994506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:23.994519Z node 1 :FLAT_TX_SCHEMESHARD ... lterReplication TConfigureParts opId# 104:0 HandleReply NKikimrReplication.TEvAlterReplicationResult OperationId { TxId: 104 PartId: 0 } Origin: 72075186233409547 Status: SUCCESS 2025-06-30T09:18:26.107145Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 104:0 3 -> 128 2025-06-30T09:18:26.107162Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:18:26.107169Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:695: Ack tablet strongly msg opId: 104:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:3 2025-06-30T09:18:26.107511Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:18:26.107522Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:18:26.107527Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 104:0 2025-06-30T09:18:26.107556Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [6:127:2151], Recipient [6:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:18:26.107562Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:18:26.107570Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:18:26.107576Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_replication.cpp:189: [72057594046678944] TAlterReplication TPropose opId# 104:0 ProgressState 2025-06-30T09:18:26.107581Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:18:26.107588Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 104 ready parts: 1/1 2025-06-30T09:18:26.107617Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 104 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:26.107958Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:18:26.107971Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 msg type: 269090816 2025-06-30T09:18:26.107993Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-06-30T09:18:26.108080Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269287424, Sender [6:138:2159], Recipient [6:263:2252] 2025-06-30T09:18:26.108086Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-30T09:18:26.108110Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:26.108132Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 25769805935 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:26.108140Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_replication.cpp:203: [72057594046678944] TAlterReplication TPropose opId# 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-06-30T09:18:26.108169Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 104:0 128 -> 240 2025-06-30T09:18:26.108194Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:18:26.108206Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:18:26.108218Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:695: Ack tablet strongly msg opId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 2025-06-30T09:18:26.108648Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:18:26.108663Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:385: Ack coordinator stepId#5000005 first txId#104 countTxs#1 2025-06-30T09:18:26.108671Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:355: Ack mediator stepId#5000005 2025-06-30T09:18:26.108677Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 104:0 2025-06-30T09:18:26.108714Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [6:127:2151], Recipient [6:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:18:26.108720Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation FAKE_COORDINATOR: Erasing txId 104 2025-06-30T09:18:26.108739Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:26.108746Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:18:26.108794Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:26.108800Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [6:212:2212], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-30T09:18:26.108895Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:18:26.108904Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-30T09:18:26.108915Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:18:26.108921Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:18:26.108926Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:18:26.108932Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:18:26.108937Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:18:26.108943Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-30T09:18:26.108949Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:18:26.108955Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-30T09:18:26.108960Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:0 2025-06-30T09:18:26.108990Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:18:26.108996Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 1, subscribers: 0 2025-06-30T09:18:26.109002Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 4 2025-06-30T09:18:26.109139Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 274137603, Sender [6:212:2212], Recipient [6:127:2151]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Version: 4 } 2025-06-30T09:18:26.109147Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5044: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-30T09:18:26.109162Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:18:26.109175Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:18:26.109181Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:18:26.109187Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-30T09:18:26.109192Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:18:26.109208Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-30T09:18:26.109214Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:18:26.109895Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:18:26.110003Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:18:26.110010Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 >> TTxDataShardMiniKQL::CrossShard_6_Local [GOOD] >> TTxDataShardMiniKQL::MemoryUsageImmediateHugeTx >> TxUsage::Sinks_Oltp_WriteToTopics_2_Table [GOOD] >> KqpPg::InsertNoTargetColumns_Serial-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefault+useSink >> TxUsage::Sinks_Oltp_WriteToTopics_2_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::DatashardNotRunAtAllWhenSubDomainNodesIsStopped [GOOD] Test command err: 2025-06-30T09:17:11.827616Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668985167991442:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:11.827771Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:17:11.832708Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521668985230656395:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:11.834725Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:17:11.836106Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521668986820198843:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:17:11.836128Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00415c/r3tmp/tmpRSiFpU/pdisk_1.dat 2025-06-30T09:17:11.911150Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:11.922287Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:17:11.925863Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:11.925893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:11.927451Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:11.971301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:11.971327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:11.972793Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:17:11.976379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:17:11.976404Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:17:11.977835Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 TClient is connected to server localhost:19301 2025-06-30T09:17:12.043273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:17:12.043812Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:17:12.044619Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521668985167991552:2144] Handle TEvNavigate describe path dc-1 2025-06-30T09:17:12.046703Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521668989462959305:2466] HANDLE EvNavigateScheme dc-1 2025-06-30T09:17:12.046766Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521668985167991578:2157], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:17:12.046780Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521668985167991578:2157], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:17:12.046835Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521668989462959306:2467][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:17:12.047335Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521668985167991227:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521668989462959310:2467] 2025-06-30T09:17:12.047353Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521668985167991230:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521668989462959311:2467] 2025-06-30T09:17:12.047365Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521668985167991227:2053] Subscribe: subscriber# [1:7521668989462959310:2467], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:12.047370Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521668985167991230:2056] Subscribe: subscriber# [1:7521668989462959311:2467], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:12.047384Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521668985167991233:2059] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521668989462959312:2467] 2025-06-30T09:17:12.047391Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521668985167991233:2059] Subscribe: subscriber# [1:7521668989462959312:2467], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:17:12.047398Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521668989462959310:2467][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668985167991227:2053] 2025-06-30T09:17:12.047407Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521668989462959311:2467][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668985167991230:2056] 2025-06-30T09:17:12.047407Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521668985167991227:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521668989462959310:2467] 2025-06-30T09:17:12.047410Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521668985167991230:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521668989462959311:2467] 2025-06-30T09:17:12.047412Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521668989462959312:2467][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668985167991233:2059] 2025-06-30T09:17:12.047416Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521668985167991233:2059] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521668989462959312:2467] 2025-06-30T09:17:12.047423Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521668989462959306:2467][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668989462959307:2467] 2025-06-30T09:17:12.047430Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521668989462959306:2467][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668989462959308:2467] 2025-06-30T09:17:12.047456Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521668989462959306:2467][/dc-1] Set up state: owner# [1:7521668985167991578:2157], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:12.047511Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521668989462959306:2467][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521668989462959309:2467] 2025-06-30T09:17:12.047529Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521668989462959306:2467][/dc-1] Path was already updated: owner# [1:7521668985167991578:2157], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:17:12.047551Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521668989462959310:2467][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668989462959307:2467], cookie# 1 2025-06-30T09:17:12.047555Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521668989462959311:2467][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668989462959308:2467], cookie# 1 2025-06-30T09:17:12.047559Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521668989462959312:2467][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668989462959309:2467], cookie# 1 2025-06-30T09:17:12.047565Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521668985167991233:2059] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668989462959312:2467], cookie# 1 2025-06-30T09:17:12.047577Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521668989462959312:2467][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668985167991233:2059], cookie# 1 2025-06-30T09:17:12.047582Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521668989462959306:2467][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668989462959309:2467], cookie# 1 2025-06-30T09:17:12.047593Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521668989462959306:2467][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:17:12.047602Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521668985167991227:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668989462959310:2467], cookie# 1 2025-06-30T09:17:12.047606Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521668985167991230:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521668989462959311:2467], cookie# 1 2025-06-30T09:17:12.047611Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521668989462959310:2467][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668985167991227:2053], cookie# 1 2025-06-30T09:17:12.047618Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521668989462959311:2467][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521668985167991230:2056], cookie# 1 2025-06-30T09:17:12.047623Z node 1 :SCHEME ... source_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:18:25.502954Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7521669306463265350:2480], recipient# [8:7521669306463265349:2428], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:25.645825Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7521669016101995957:2141], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:25.645873Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7521669016101995957:2141], cacheItem# { Subscriber: { Subscriber: [7:7521669016101996555:2553] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:18:25.645903Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7521669303864806586:3134], recipient# [7:7521669303864806585:2414], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:25.918984Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7521669018700455773:2102], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:25.919042Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [8:7521669018700455773:2102], cacheItem# { Subscriber: { Subscriber: [8:7521669018700455837:2113] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:18:25.919070Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7521669306463265352:2481], recipient# [8:7521669306463265351:2429], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:26.298999Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7521669016101995957:2141], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:26.299066Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7521669016101995957:2141], cacheItem# { Subscriber: { Subscriber: [7:7521669020396963939:2609] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:18:26.299099Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7521669308159773893:3138], recipient# [7:7521669308159773892:2415], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:26.504554Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7521669018700455773:2102], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:26.504603Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [8:7521669018700455773:2102], cacheItem# { Subscriber: { Subscriber: [8:7521669022995423504:2344] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:18:26.504626Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7521669310758232650:2482], recipient# [8:7521669310758232649:2430], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:26.647010Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7521669016101995957:2141], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:26.647052Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7521669016101995957:2141], cacheItem# { Subscriber: { Subscriber: [7:7521669016101996555:2553] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:18:26.647076Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7521669308159773895:3139], recipient# [7:7521669308159773894:2416], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:26.923225Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7521669018700455773:2102], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:26.923275Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [8:7521669018700455773:2102], cacheItem# { Subscriber: { Subscriber: [8:7521669018700455837:2113] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:18:26.923300Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7521669310758232652:2483], recipient# [8:7521669310758232651:2431], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Query [GOOD] >> TTxDataShardMiniKQL::MemoryUsageImmediateHugeTx [GOOD] >> THiveTest::TestDeleteOwnerTabletsMany [GOOD] >> THiveTest::TestDeleteTabletWithFollowers >> TxUsage::WriteToTopic_Demo_50_Query [GOOD] >> TxUsage::WriteToTopic_Demo_24_Table |79.3%| [TA] $(B)/ydb/core/tx/schemeshard/ut_transfer/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpPg::InsertValuesFromTableWithDefault+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefault-useSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::MemoryUsageImmediateHugeTx [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:118:2057] recipient: [1:112:2142] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:118:2057] recipient: [1:112:2142] Leader for TabletID 9437184 is [1:136:2157] sender: [1:138:2057] recipient: [1:112:2142] 2025-06-30T09:18:03.981079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:03.981109Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:03.981827Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:18:03.985426Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:18:03.985573Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:136:2157] 2025-06-30T09:18:03.985641Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:03.995809Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:18:03.999311Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:03.999370Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:03.999564Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-30T09:18:03.999574Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-30T09:18:03.999582Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-30T09:18:03.999654Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:03.999669Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:03.999683Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:207:2157] in generation 2 Leader for TabletID 9437184 is [1:136:2157] sender: [1:215:2057] recipient: [1:14:2061] 2025-06-30T09:18:04.027866Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:04.035744Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-30T09:18:04.035854Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:04.035888Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:220:2216] 2025-06-30T09:18:04.035895Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-30T09:18:04.035901Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-30T09:18:04.035907Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:18:04.035963Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:18:04.035973Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:18:04.036093Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-30T09:18:04.036124Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-30T09:18:04.036132Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:18:04.036140Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:04.036147Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-30T09:18:04.036153Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-30T09:18:04.036158Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-30T09:18:04.036164Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-30T09:18:04.036169Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:18:04.036183Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:216:2213], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:04.036188Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:04.036202Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:214:2212], serverId# [1:216:2213], sessionId# [0:0:0] 2025-06-30T09:18:04.036809Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:136:2157]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-30T09:18:04.036823Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:18:04.036838Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:18:04.036881Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-30T09:18:04.036894Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-30T09:18:04.036905Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-30T09:18:04.036933Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-30T09:18:04.036938Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-30T09:18:04.036944Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-30T09:18:04.036949Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:18:04.037036Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-30T09:18:04.037041Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-30T09:18:04.037045Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-30T09:18:04.037050Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:18:04.037063Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-30T09:18:04.037067Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-30T09:18:04.037071Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-30T09:18:04.037075Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-30T09:18:04.037081Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-30T09:18:04.048102Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-30T09:18:04.048136Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:18:04.048144Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:18:04.048175Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-30T09:18:04.048194Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:04.048327Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:226:2222], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:04.048337Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:04.048360Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:225:2221], serverId# [1:226:2222], sessionId# [0:0:0] 2025-06-30T09:18:04.048384Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:136:2157]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-30T09:18:04.048389Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-30T09:18:04.048437Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-30T09:18:04.048446Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-30T09:18:04.048451Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-30T09:18:04.048459Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-30T09:18:04.049340Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-30T09:18:04.049361Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:18:04.049434Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:18:04.049441Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:18:04.049452Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:18:04.049459Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:04.049465Z node 1 :TX_DATASHARD TRACE: datashard_pipelin ... ard_impl.h:3132: StateWork, received event# 268830214, Sender [24:293:2274], Recipient [24:239:2230]: NKikimrTabletBase.TEvGetCounters 2025-06-30T09:18:27.956581Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269551617, Sender [24:103:2136], Recipient [24:239:2230]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 103 RawX2: 103079217240 } 2025-06-30T09:18:27.956594Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-30T09:18:27.956675Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [24:298:2278], Recipient [24:239:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:27.956681Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:27.956687Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [24:297:2277], serverId# [24:298:2278], sessionId# [0:0:0] 2025-06-30T09:18:27.956723Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [24:103:2136], Recipient [24:239:2230]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 103 RawX2: 103079217240 } TxBody: "\032\324\002\037\002\006Arg\005\205\n\205\000\205\004?\000\205\002\202\0047\034MyReads MyWrites\205\004?\000\206\202\024Reply\024Write?\000?\000 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\000\005?\004?\014\005?\002)\211\006\202\203\005\004\213\002\203\004\205\002\203\004\01057$UpdateRow\000\003?\016 h\020\000\000\000\000\000\000\r\000\000\000\000\000\000\000\013?\022\003?\020T\001\005?\026)\211\n?\024\206\203\004?\024? ?\024\203\004\020Fold\000)\211\002?\"\206? \034Collect\000)\211\006?(? \203\004\203\0024ListFromRange\000\003? \000\003?,\003\022z\003?.\004\007\010\000\n\003?\024\000)\251\000? \002\000\004)\251\000?\024\002\000\002)\211\006?$\203\005@? ?\024\030Invoke\000\003?F\006Add?@?D\001\006\002\014\000\007\016\000\003\005?\010?\014\006\002?\006?R\000\003?\014?\014\037/ \0018\000" TxId: 2 ExecLevel: 0 Flags: 0 2025-06-30T09:18:27.956728Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:18:27.956750Z node 24 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:18:27.956927Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CheckDataTx 2025-06-30T09:18:27.956942Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-30T09:18:27.956946Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CheckDataTx 2025-06-30T09:18:27.956950Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-30T09:18:27.956953Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit BuildAndWaitDependencies 2025-06-30T09:18:27.956961Z node 24 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-30T09:18:27.956972Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 9437184 2025-06-30T09:18:27.956979Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-30T09:18:27.956981Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-30T09:18:27.956984Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit ExecuteDataTx 2025-06-30T09:18:27.956987Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-30T09:18:27.956994Z node 24 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-30T09:18:27.956999Z node 24 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:124: Operation [0:2] at 9437184 requested 132374 more memory 2025-06-30T09:18:27.957003Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-30T09:18:27.957052Z node 24 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:18:27.957055Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-30T09:18:27.957058Z node 24 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-30T09:18:27.957374Z node 24 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:2] at 9437184 exceeded memory limit 132502 and requests 1060016 more for the next try 2025-06-30T09:18:27.957407Z node 24 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 2 released its data 2025-06-30T09:18:27.957411Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-30T09:18:27.957436Z node 24 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:18:27.957441Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-30T09:18:27.957512Z node 24 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 2 at 9437184 restored its data 2025-06-30T09:18:27.957518Z node 24 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-30T09:18:27.957602Z node 24 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:2] at 9437184 exceeded memory limit 1192518 and requests 9540144 more for the next try 2025-06-30T09:18:27.957611Z node 24 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 2 released its data 2025-06-30T09:18:27.957614Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-30T09:18:27.957629Z node 24 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:18:27.957632Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-30T09:18:27.957689Z node 24 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 2 at 9437184 restored its data 2025-06-30T09:18:27.957696Z node 24 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-30T09:18:27.957808Z node 24 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:2] at 9437184 exceeded memory limit 10732662 and requests 85861296 more for the next try 2025-06-30T09:18:27.957819Z node 24 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 2 released its data 2025-06-30T09:18:27.957823Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-30T09:18:27.957838Z node 24 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:18:27.957841Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-30T09:18:27.957881Z node 24 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 2 at 9437184 restored its data 2025-06-30T09:18:27.957885Z node 24 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-30T09:18:28.425481Z node 24 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:2] at tablet 9437184 with status COMPLETE 2025-06-30T09:18:28.425539Z node 24 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:2] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 8, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-30T09:18:28.425566Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is ExecutedNoMoreRestarts 2025-06-30T09:18:28.425575Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit ExecuteDataTx 2025-06-30T09:18:28.425583Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit FinishPropose 2025-06-30T09:18:28.425589Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit FinishPropose 2025-06-30T09:18:28.425637Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-30T09:18:28.425642Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit FinishPropose 2025-06-30T09:18:28.425647Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit CompletedOperations 2025-06-30T09:18:28.425652Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CompletedOperations 2025-06-30T09:18:28.425668Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-30T09:18:28.425673Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CompletedOperations 2025-06-30T09:18:28.425678Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 9437184 has finished 2025-06-30T09:18:28.436882Z node 24 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-30T09:18:28.436921Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 9437184 on unit FinishPropose 2025-06-30T09:18:28.436935Z node 24 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 2 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: COMPLETE 2025-06-30T09:18:28.436970Z node 24 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:18:28.437257Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [24:303:2283], Recipient [24:239:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:28.437267Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:28.437275Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [24:302:2282], serverId# [24:303:2283], sessionId# [0:0:0] 2025-06-30T09:18:28.437309Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830214, Sender [24:301:2281], Recipient [24:239:2230]: NKikimrTabletBase.TEvGetCounters ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/unittest >> TxUsage::WriteToTopic_Demo_50_Query [GOOD] Test command err: 2025-06-30T09:16:41.660641Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668855907006144:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.660678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:16:41.692168Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cbe/r3tmp/tmp0e7obn/pdisk_1.dat 2025-06-30T09:16:41.724937Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:41.725135Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521668855907006118:2080] 1751275001660412 != 1751275001660415 TServer::EnableGrpc on GrpcPort 23059, node 1 2025-06-30T09:16:41.743160Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002cbe/r3tmp/yandexJLzyub.tmp 2025-06-30T09:16:41.743173Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002cbe/r3tmp/yandexJLzyub.tmp 2025-06-30T09:16:41.743265Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002cbe/r3tmp/yandexJLzyub.tmp 2025-06-30T09:16:41.743287Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:41.745977Z INFO: TTestServer started on Port 10084 GrpcPort 23059 TClient is connected to server localhost:10084 2025-06-30T09:16:41.762727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.762775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.763819Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected PQClient connected to localhost:23059 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.780512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:41.782975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:16:41.792652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-30T09:16:42.020691Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668860201974173:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.020756Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.020989Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668860201974201:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.021945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:42.024198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710662, at schemeshard: 72057594046644480 2025-06-30T09:16:42.024267Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521668860201974203:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-30T09:16:42.074885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.082405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.087927Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668860201974394:2509] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:16:42.102782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.102870Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521668860201974406:2321], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:16:42.102963Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=OTYyZDg1NTItYTJhYjg3ODEtNDMyOWI3NWQtYjk2ZGY1Njc=, ActorId: [1:7521668860201974171:2296], ActorState: ExecuteState, TraceId: 01jz01zp548ecgsxhjtwjyand3, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:16:42.103489Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521668860201974548:2610] 2025-06-30T09:16:42.663156Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:46.660844Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668855907006144:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:46.660878Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:16:47.364941Z :WriteToTopic_Demo_27_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:16:47.371627Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:16:47.377055Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521668881676811237:2695] connected; active server actors: 1 2025-06-30T09:16:47.377238Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:16:47.377398Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:16:47.377452Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-30T09:16:47.377594Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-30T09:16:47.379166Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:16:47.379254Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30 ... les { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "test-topic" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/test-topic" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7521669292763633635 RawX2: 42949675129 } Partitions { Partition { PartitionId: 0 } } 2025-06-30T09:18:28.586591Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:18:28.587964Z node 10 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:18:28.588260Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:18:28.588309Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:18:28.588319Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-30T09:18:28.588321Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715673, State EXECUTED 2025-06-30T09:18:28.588325Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976715673 State EXECUTED FrontTxId 281474976715673 2025-06-30T09:18:28.588329Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-30T09:18:28.588332Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715673, NewState WAIT_RS_ACKS 2025-06-30T09:18:28.588335Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976715673 moved from EXECUTED to WAIT_RS_ACKS 2025-06-30T09:18:28.588339Z node 10 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715673] PredicateAcks: 0/0 2025-06-30T09:18:28.588342Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-30T09:18:28.588344Z node 10 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715673] PredicateAcks: 0/0 2025-06-30T09:18:28.588347Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037892] add an TxId 281474976715673 to the list for deletion 2025-06-30T09:18:28.588351Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715673, NewState DELETING 2025-06-30T09:18:28.588355Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037892] delete key for TxId 281474976715673 2025-06-30T09:18:28.588365Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:18:28.588554Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:18:28.588562Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-30T09:18:28.588564Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715673, State DELETING 2025-06-30T09:18:28.588567Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037892] delete TxId 281474976715673 2025-06-30T09:18:28.595291Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-30T09:18:28.595495Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-30T09:18:28.595502Z :INFO: [/Root] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:17696 2025-06-30T09:18:28.599130Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-30T09:18:28.599499Z node 10 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-30T09:18:28.599515Z node 10 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-30T09:18:28.599664Z node 10 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-30T09:18:28.599686Z node 10 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:58668 2025-06-30T09:18:28.599692Z node 10 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:58668 proto=v1 topic=test-topic durationSec=0 2025-06-30T09:18:28.599697Z node 10 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:18:28.610808Z node 10 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-30T09:18:28.610865Z node 10 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-30T09:18:28.610868Z node 10 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-30T09:18:28.610870Z node 10 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-30T09:18:28.610886Z node 10 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [10:7521669318533438492:2422] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-30T09:18:28.610894Z node 10 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-30T09:18:28.615785Z :INFO: [/Root] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1751275108615 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:28.615833Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|2575d4d9-d3efd015-c13edf88-e8d5024_0" topic: "test-topic" 2025-06-30T09:18:28.615946Z :INFO: [/Root] MessageGroupId [src] SessionId [src|2575d4d9-d3efd015-c13edf88-e8d5024_0] Write session: close. Timeout = 0 ms 2025-06-30T09:18:28.615951Z :INFO: [/Root] MessageGroupId [src] SessionId [src|2575d4d9-d3efd015-c13edf88-e8d5024_0] Write session will now close 2025-06-30T09:18:28.615957Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|2575d4d9-d3efd015-c13edf88-e8d5024_0] Write session: aborting 2025-06-30T09:18:28.616066Z :INFO: [/Root] MessageGroupId [src] SessionId [src|2575d4d9-d3efd015-c13edf88-e8d5024_0] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:28.616072Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|2575d4d9-d3efd015-c13edf88-e8d5024_0] Write session: destroy 2025-06-30T09:18:28.616722Z :WriteToTopic_Demo_50_Query INFO: Topic created 2025-06-30T09:18:28.614958Z node 10 :PQ_WRITE_PROXY DEBUG: writer.cpp:821: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 10, Generation: 1 2025-06-30T09:18:28.614987Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [10:7521669318533438495:2422], now have 1 active actors on pipe 2025-06-30T09:18:28.615051Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-30T09:18:28.615060Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-30T09:18:28.615106Z node 10 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|2575d4d9-d3efd015-c13edf88-e8d5024_0 generated for partition 0 topic 'test-topic' owner src 2025-06-30T09:18:28.615156Z node 10 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-30T09:18:28.615181Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:28.615269Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-30T09:18:28.615273Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-30T09:18:28.615292Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:28.615322Z node 10 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|2575d4d9-d3efd015-c13edf88-e8d5024_0 2025-06-30T09:18:28.617844Z node 10 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|2575d4d9-d3efd015-c13edf88-e8d5024_0 grpc read done: success: 0 data: 2025-06-30T09:18:28.617852Z node 10 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|2575d4d9-d3efd015-c13edf88-e8d5024_0 grpc read failed 2025-06-30T09:18:28.617859Z node 10 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|2575d4d9-d3efd015-c13edf88-e8d5024_0 grpc closed 2025-06-30T09:18:28.617863Z node 10 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|2575d4d9-d3efd015-c13edf88-e8d5024_0 is DEAD 2025-06-30T09:18:28.618122Z node 10 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:28.618338Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [10:7521669318533438495:2422] destroyed 2025-06-30T09:18:28.618355Z node 10 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. >> KqpPg::TypeCoercionBulkUpsert >> THiveTest::TestDeleteTabletWithFollowers [GOOD] >> THiveTest::TestCreateTabletBeforeLocal >> KqpPg::ReadPgArray >> KqpPg::ReadPgArray [GOOD] >> KqpPg::InsertValuesFromTableWithDefault-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast+useSink >> KqpPg::TableArrayInsert+useSink >> KqpPg::InsertFromSelect_Simple+useSink [GOOD] >> KqpPg::InsertFromSelect_Simple-useSink >> THiveTest::TestCreateTabletBeforeLocal [GOOD] >> THiveTest::TestCreateTabletReboots |79.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_populator/unittest >> TPersqueueControlPlaneTestSuite::SetupReadLockSessionWithDatabase ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/unittest >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query [FAIL] Test command err: 2025-06-30T09:16:41.638101Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668859530930871:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.638140Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002ca1/r3tmp/tmpFqjkwC/pdisk_1.dat 2025-06-30T09:16:41.667417Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created TServer::EnableGrpc on GrpcPort 9731, node 1 2025-06-30T09:16:41.694144Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:41.694725Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521668859530930849:2080] 1751275001637967 != 1751275001637970 2025-06-30T09:16:41.702360Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002ca1/r3tmp/yandexSgRDhQ.tmp 2025-06-30T09:16:41.702376Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002ca1/r3tmp/yandexSgRDhQ.tmp 2025-06-30T09:16:41.702450Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002ca1/r3tmp/yandexSgRDhQ.tmp 2025-06-30T09:16:41.702494Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:41.710769Z INFO: TTestServer started on Port 20146 GrpcPort 9731 TClient is connected to server localhost:20146 PQClient connected to localhost:9731 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:16:41.740473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.740514Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.741233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.772590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:16:41.786361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-30T09:16:42.104057Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668863825898927:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.104078Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668863825898921:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.104204Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.104956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:42.106900Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521668863825898936:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:16:42.150547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.159607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.189383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.191391Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668863825899172:2537] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521668863825899293:2611] 2025-06-30T09:16:42.640003Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:46.638255Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668859530930871:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:46.638306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:16:47.534963Z :Sinks_Oltp_WriteToTopic_1_Query INFO: TTopicSdkTestSetup started 2025-06-30T09:16:47.559726Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:16:47.565056Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521668885300736012:2709] connected; active server actors: 1 2025-06-30T09:16:47.565136Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:16:47.565376Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:16:47.565420Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-30T09:16:47.565788Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-30T09:16:47.574802Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:16:47.578815Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:16:47.578925Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:16:47.578994Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-30T09:16:47.578999Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:16:47.579002Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-30T09:16:47.579007Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:16:47.579015Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:47.579032Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-30T09:16:47.580851Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72075186224037892, NodeId 1, Generation 1 2025-06-30T09:16:47.580896Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [1:7521668885300736031:2421], now have 1 active actors on pipe 2025-06-30T09:16:47.615074Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [1:7521668885300736010:2708], now have 1 active actors on pipe 2025-06-30T09:16:47.619086Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72075186224037892] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 7521668859530931228 RawX2: 4294969475 } TxId: 281474976715674 Config { TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: ... ure::TTableRecord, std::__y1::allocator> const&, NYdb::Dev::NTopic::NTests::NTestSuiteTxUsage::TFixture::ISession&, NYdb::Dev::TTransactionBase*)+328 (0x13CA2FB8) NYdb::Dev::NTopic::NTests::NTestSuiteTxUsage::TFixtureSinks::TestSinksOltpWriteToTopicAndTable6()+1055 (0x13CADFDF) NYdb::Dev::NTopic::NTests::NTestSuiteTxUsage::TCurrentTest::Execute()::'lambda'()::operator()() const+71 (0x13CBCD27) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+126 (0x13F8BAAE) NYdb::Dev::NTopic::NTests::NTestSuiteTxUsage::TCurrentTest::Execute()+422 (0x13CBC6E6) NUnitTest::TTestFactory::Execute()+803 (0x13F8C223) NUnitTest::RunMain(int, char**)+3021 (0x13F9DDCD) ??+0 (0x7F24366ECD90) __libc_start_main+128 (0x7F24366ECE40) _start+41 (0x12D7E029) 2025-06-30T09:18:15.571931Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|942a5d2-7c144d6d-4c9434bd-3733ab35_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:18:15.571936Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|942a5d2-7c144d6d-4c9434bd-3733ab35_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:18:15.571941Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|942a5d2-7c144d6d-4c9434bd-3733ab35_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:18:15.571994Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|942a5d2-7c144d6d-4c9434bd-3733ab35_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:15.571998Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|942a5d2-7c144d6d-4c9434bd-3733ab35_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:18:15.572815Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message_group_id|942a5d2-7c144d6d-4c9434bd-3733ab35_0 grpc read done: success: 0 data: 2025-06-30T09:18:15.572825Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message_group_id|942a5d2-7c144d6d-4c9434bd-3733ab35_0 grpc read failed 2025-06-30T09:18:15.572831Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message_group_id|942a5d2-7c144d6d-4c9434bd-3733ab35_0 grpc closed 2025-06-30T09:18:15.572834Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message_group_id|942a5d2-7c144d6d-4c9434bd-3733ab35_0 is DEAD 2025-06-30T09:18:15.572984Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:15.572992Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:15.573403Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521669263157915159:2521] destroyed 2025-06-30T09:18:15.573411Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521669263157915163:2521] destroyed 2025-06-30T09:18:15.573610Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:18:15.573623Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: {0, {11, 281474976715682}, 100000}, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:18:15.573665Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:5134: [PQ: 72075186224037896] Handle TEvLongTxService::TEvLockStatus LockId: 281474976715682 LockNode: 11 Status: STATUS_NOT_FOUND 2025-06-30T09:18:15.573670Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:5149: [PQ: 72075186224037896] TxWriteInfo: WriteId {11, 281474976715682}, TxId (empty maybe), Status STATUS_SUBSCRIBED 2025-06-30T09:18:15.573671Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:5163: [PQ: 72075186224037896] delete partitions for WriteId {11, 281474976715682} 2025-06-30T09:18:15.573675Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:5262: [PQ: 72075186224037896] send TEvPQ::TEvDeletePartition to partition {0, {11, 281474976715682}, 100000} 2025-06-30T09:18:15.573680Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:5134: [PQ: 72075186224037894] Handle TEvLongTxService::TEvLockStatus LockId: 281474976715682 LockNode: 11 Status: STATUS_NOT_FOUND 2025-06-30T09:18:15.573682Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:5149: [PQ: 72075186224037894] TxWriteInfo: WriteId {11, 281474976715682}, TxId (empty maybe), Status STATUS_SUBSCRIBED 2025-06-30T09:18:15.573683Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:5163: [PQ: 72075186224037894] delete partitions for WriteId {11, 281474976715682} 2025-06-30T09:18:15.573686Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:5262: [PQ: 72075186224037894] send TEvPQ::TEvDeletePartition to partition {0, {11, 281474976715682}, 100000} 2025-06-30T09:18:15.573760Z node 11 :PERSQUEUE DEBUG: partition.cpp:3863: [PQ: 72075186224037896, Partition: {0, {11, 281474976715682}, 100000}, State: StateIdle] Handle TEvPQ::TEvDeletePartition 2025-06-30T09:18:15.573775Z node 11 :PERSQUEUE DEBUG: partition.cpp:3863: [PQ: 72075186224037894, Partition: {0, {11, 281474976715682}, 100000}, State: StateIdle] Handle TEvPQ::TEvDeletePartition 2025-06-30T09:18:15.573783Z node 11 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:18:15.573786Z node 11 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from D0000100000(+) to D0000100001(-) 2025-06-30T09:18:15.573790Z node 11 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:18:15.573791Z node 11 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from D0000100000(+) to D0000100001(-) 2025-06-30T09:18:15.574316Z node 11 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 100000 offset 0 count 1 actorID [11:7521669258862947579:2471] 2025-06-30T09:18:15.574322Z node 11 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 100000 offset 1 count 2 actorID [11:7521669258862947579:2471] 2025-06-30T09:18:15.574331Z node 11 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 100000 offset 0 count 1 actorID [11:7521669258862947458:2452] 2025-06-30T09:18:15.574360Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: {0, {11, 281474976715682}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=293, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:18:15.574366Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: {0, {11, 281474976715682}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=293, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:18:15.574443Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:5196: [PQ: 72075186224037894] Handle TEvPQ::TEvDeletePartitionDone {0, {11, 281474976715682}, 100000} 2025-06-30T09:18:15.574452Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3638: [PQ: 72075186224037894] send TEvUnsubscribeLock for WriteId {11, 281474976715682} 2025-06-30T09:18:15.574465Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037894] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:18:15.574561Z node 11 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037896' partition 100000 offset 0 partno 0 count 1 parts 0 suffix '63' size 158 2025-06-30T09:18:15.574565Z node 11 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037896' partition 100000 offset 1 partno 0 count 2 parts 0 suffix '63' size 287 2025-06-30T09:18:15.574570Z node 11 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037894' partition 100000 offset 0 partno 0 count 1 parts 0 suffix '63' size 293 2025-06-30T09:18:15.574691Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037894] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:18:15.583570Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037896, Partition: {0, {11, 281474976715682}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=445, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:18:15.583585Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037896, Partition: {0, {11, 281474976715682}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=445, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:18:15.583713Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:5196: [PQ: 72075186224037896] Handle TEvPQ::TEvDeletePartitionDone {0, {11, 281474976715682}, 100000} 2025-06-30T09:18:15.583723Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3638: [PQ: 72075186224037896] send TEvUnsubscribeLock for WriteId {11, 281474976715682} 2025-06-30T09:18:15.583740Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037896] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:18:15.584135Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037896] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:18:15.586724Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|95190a09-102a28a5-884f503b-f4d3af0_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:18:15.586733Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|95190a09-102a28a5-884f503b-f4d3af0_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:18:15.586752Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|95190a09-102a28a5-884f503b-f4d3af0_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:18:15.586906Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|95190a09-102a28a5-884f503b-f4d3af0_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:15.586910Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|95190a09-102a28a5-884f503b-f4d3af0_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:18:15.587380Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|95190a09-102a28a5-884f503b-f4d3af0_0 grpc read done: success: 0 data: 2025-06-30T09:18:15.587386Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|95190a09-102a28a5-884f503b-f4d3af0_0 grpc read failed 2025-06-30T09:18:15.587393Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|95190a09-102a28a5-884f503b-f4d3af0_0 grpc closed 2025-06-30T09:18:15.587395Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|95190a09-102a28a5-884f503b-f4d3af0_0 is DEAD 2025-06-30T09:18:15.587548Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:15.587555Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:15.587809Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669263157915107:2510] destroyed 2025-06-30T09:18:15.587816Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669263157915110:2510] destroyed 2025-06-30T09:18:15.587826Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. |79.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |79.3%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_transfer/test-results/unittest/{meta.json ... results_accumulator.log} |79.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |79.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview >> THiveTest::TestHiveBalancerWithSpareNodes [GOOD] >> THiveTest::TestLocalRegistrationInSharedHive |79.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview >> KqpPg::InsertValuesFromTableWithDefaultAndCast+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast-useSink >> TPersQueueCommonTest::TestWriteWithRateLimiterWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit >> PersQueueSdkReadSessionTest::StopResumeReadingData [GOOD] >> ReadSessionImplTest::CreatePartitionStream [GOOD] >> ReadSessionImplTest::BrokenCompressedData [GOOD] >> ReadSessionImplTest::CommitOffsetTwiceIsError [GOOD] >> ReadSessionImplTest::DataReceivedCallback |79.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |79.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClass >> THiveTest::TestHiveBalancerNodeRestarts [GOOD] >> THiveTest::TestHiveBalancerDifferentResources >> THiveTest::TestLocalRegistrationInSharedHive [GOOD] >> LocalPartition::WithoutPartitionWithSplit [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool+useSink >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage [GOOD] >> BasicUsage::TWriteSession_AutoBatching [GOOD] >> BasicUsage::TWriteSession_BatchingProducesContinueTokens [GOOD] >> BasicUsage::BrokenCredentialsProvider >> THiveTest::TestCreateTabletReboots [GOOD] >> THiveTest::TestCreateTabletWithWrongSPoolsAndReassignGroupsFailButDeletionIsOk >> ReadSessionImplTest::DataReceivedCallback [GOOD] >> ReadSessionImplTest::CommonHandler >> ReadSessionImplTest::CommonHandler [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/unittest >> LocalPartition::WithoutPartitionWithSplit [GOOD] Test command err: 2025-06-30T09:16:41.704804Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668855836562470:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.704828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:16:41.750209Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002ceb/r3tmp/tmphgwx05/pdisk_1.dat 2025-06-30T09:16:41.783070Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22307, node 1 2025-06-30T09:16:41.797281Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002ceb/r3tmp/yandexaNa2Y6.tmp 2025-06-30T09:16:41.797291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002ceb/r3tmp/yandexaNa2Y6.tmp 2025-06-30T09:16:41.797360Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002ceb/r3tmp/yandexaNa2Y6.tmp 2025-06-30T09:16:41.797400Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:41.802401Z INFO: TTestServer started on Port 21102 GrpcPort 22307 2025-06-30T09:16:41.805649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.805674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.806696Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21102 PQClient connected to localhost:22307 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.857064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:16:41.876369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-30T09:16:42.134242Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668860131530500:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.134288Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.134463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668860131530513:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.135255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:42.137200Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521668860131530515:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:16:42.186628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.195167Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668860131530653:2473] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:16:42.195983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.214269Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521668860131530685:2318], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:16:42.214813Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZGQwNTJjNmUtNzJhZGJiNmEtMTFiOWU0NmItYjE2Mzk0ZjY=, ActorId: [1:7521668860131530498:2296], ActorState: ExecuteState, TraceId: 01jz01zp8k6nfxhzagasjsg4bg, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:16:42.215256Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:16:42.224536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521668860131530877:2610] 2025-06-30T09:16:42.706943Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:46.706156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668855836562470:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:46.706206Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:16:47.458865Z :ReadWithoutConsumerWithRestarts INFO: TTopicSdkTestSetup started 2025-06-30T09:16:47.483105Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:16:47.501690Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521668881606367564:2695] connected; active server actors: 1 2025-06-30T09:16:47.501786Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:16:47.501889Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:16:47.502234Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:16:47.502308Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:16:47.502365Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-30T09:16:47.502371Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:16:47.502373Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-30T09:16:47.502386Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:16:47.502393Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:47.502407Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-30T09:16:47.502442Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, ... : 72075186224037892] TxId 281474976715715, State WAIT_RS 2025-06-30T09:18:31.378817Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976715715 State WAIT_RS FrontTxId 281474976715715 2025-06-30T09:18:31.378818Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4521: [PQ: 72075186224037892] HaveParticipantsDecision 1 2025-06-30T09:18:31.378823Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715715, NewState EXECUTING 2025-06-30T09:18:31.378826Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976715715 moved from WAIT_RS to EXECUTING 2025-06-30T09:18:31.378827Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72075186224037892] Received 0, Expected 1 2025-06-30T09:18:31.378886Z node 13 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1751275111425, TxId 281474976715715 2025-06-30T09:18:31.378891Z node 13 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 281474976715715 2025-06-30T09:18:31.378915Z node 13 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:18:31.379163Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037896] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:18:31.379168Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037896] Try execute txs with state EXECUTED 2025-06-30T09:18:31.379171Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037896] TxId 281474976715715, State EXECUTED 2025-06-30T09:18:31.379173Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037896] TxId 281474976715715 State EXECUTED FrontTxId 281474976715715 2025-06-30T09:18:31.379176Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037896] TPersQueue::SendEvReadSetAckToSenders 2025-06-30T09:18:31.379183Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4048: [PQ: 72075186224037896] Send TEvTxProcessing::TEvReadSetAck {TEvReadSet step# 1751275111425 txid# 281474976715715 TabletSource# 72075186224037892 TabletDest# 72075186224037896 SetTabletConsumer# 72075186224037896 Flags# 0 Seqno# 0} 2025-06-30T09:18:31.379186Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037896] TxId 281474976715715, NewState WAIT_RS_ACKS 2025-06-30T09:18:31.379188Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037896] TxId 281474976715715 moved from EXECUTED to WAIT_RS_ACKS 2025-06-30T09:18:31.379191Z node 13 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715715] PredicateAcks: 0/1 2025-06-30T09:18:31.379192Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037896] HaveAllRecipientsReceive 0, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-30T09:18:31.379194Z node 13 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715715] PredicateAcks: 0/1 2025-06-30T09:18:31.379244Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3509: [PQ: 72075186224037892] Handle TEvTxProcessing::TEvReadSetAck Step: 1751275111425 TxId: 281474976715715 TabletSource: 72075186224037892 TabletDest: 72075186224037896 TabletConsumer: 72075186224037896 Flags: 0 Seqno: 0 2025-06-30T09:18:31.379246Z node 13 :PERSQUEUE DEBUG: transaction.cpp:310: [TxId: 281474976715715] Handle TEvReadSetAck 2025-06-30T09:18:31.379248Z node 13 :PERSQUEUE DEBUG: transaction.cpp:324: [TxId: 281474976715715] Predicate acks 1/1 2025-06-30T09:18:31.379469Z node 13 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:18:31.379481Z node 13 :PERSQUEUE DEBUG: partition_write.cpp:642: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::CheckScaleStatus splitMergeAvgWriteBytes# 124 writeSpeedUsagePercent# 3.941853841e-05 scaleThresholdSeconds# 300 totalPartitionWriteSpeed# 1048576 sourceIdCount=1 canSplit=0 Topic: "test-topic". Partition: 0 2025-06-30T09:18:31.379487Z node 13 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=179, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:18:31.379524Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3583: [PQ: 72075186224037892] Handle TEvPQ::TEvTxCommitDone Step 1751275111425, TxId 281474976715715, Partition 0 2025-06-30T09:18:31.379527Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTING 2025-06-30T09:18:31.379528Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715715, State EXECUTING 2025-06-30T09:18:31.379530Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976715715 State EXECUTING FrontTxId 281474976715715 2025-06-30T09:18:31.379532Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72075186224037892] Received 1, Expected 1 2025-06-30T09:18:31.379536Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4224: [PQ: 72075186224037892] TxId: 281474976715715 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-30T09:18:31.379538Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4555: [PQ: 72075186224037892] complete TxId 281474976715715 2025-06-30T09:18:31.379541Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4573: [PQ: 72075186224037892] delete partitions for TxId 281474976715715 2025-06-30T09:18:31.379542Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715715, NewState EXECUTED 2025-06-30T09:18:31.379545Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976715715 moved from EXECUTING to EXECUTED 2025-06-30T09:18:31.379548Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72075186224037892] write key for TxId 281474976715715 2025-06-30T09:18:31.379754Z node 13 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715715] save tx TxId: 281474976715715 State: EXECUTED MinStep: 1751275111418 MaxStep: 1751275141418 PredicatesReceived { TabletId: 72075186224037896 Predicate: true } PredicateRecipients: 72075186224037896 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 9223372036854775807 Consumer: "test-consumer" Path: "/Root/test-topic" ForceCommit: true KillReadSession: false OnlyCheckCommitedToFinish: true ReadSessionId: "test-consumer_13_1_6139182204936747075_v1" } Step: 1751275111425 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 7521669329087464970 RawX2: 4503655461948084 } Partitions { } 2025-06-30T09:18:31.379766Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:18:31.380293Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:18:31.380299Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-30T09:18:31.380301Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715715, State EXECUTED 2025-06-30T09:18:31.380303Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976715715 State EXECUTED FrontTxId 281474976715715 2025-06-30T09:18:31.380306Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-30T09:18:31.380311Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4048: [PQ: 72075186224037892] Send TEvTxProcessing::TEvReadSetAck {TEvReadSet step# 1751275111425 txid# 281474976715715 TabletSource# 72075186224037896 TabletDest# 72075186224037892 SetTabletConsumer# 72075186224037892 Flags# 0 Seqno# 0} 2025-06-30T09:18:31.380314Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715715, NewState WAIT_RS_ACKS 2025-06-30T09:18:31.380316Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976715715 moved from EXECUTED to WAIT_RS_ACKS 2025-06-30T09:18:31.380329Z node 13 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715715] PredicateAcks: 1/1 2025-06-30T09:18:31.380331Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-30T09:18:31.380332Z node 13 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715715] PredicateAcks: 1/1 2025-06-30T09:18:31.380335Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037892] add an TxId 281474976715715 to the list for deletion 2025-06-30T09:18:31.380337Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715715, NewState DELETING 2025-06-30T09:18:31.380341Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037892] delete key for TxId 281474976715715 2025-06-30T09:18:31.380348Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:18:31.380417Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3509: [PQ: 72075186224037896] Handle TEvTxProcessing::TEvReadSetAck Step: 1751275111425 TxId: 281474976715715 TabletSource: 72075186224037896 TabletDest: 72075186224037892 TabletConsumer: 72075186224037892 Flags: 0 Seqno: 0 2025-06-30T09:18:31.380419Z node 13 :PERSQUEUE DEBUG: transaction.cpp:310: [TxId: 281474976715715] Handle TEvReadSetAck 2025-06-30T09:18:31.380420Z node 13 :PERSQUEUE DEBUG: transaction.cpp:324: [TxId: 281474976715715] Predicate acks 1/1 2025-06-30T09:18:31.380424Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037896] Try execute txs with state WAIT_RS_ACKS 2025-06-30T09:18:31.380425Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037896] TxId 281474976715715, State WAIT_RS_ACKS 2025-06-30T09:18:31.380428Z node 13 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715715] PredicateAcks: 1/1 2025-06-30T09:18:31.380428Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037896] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-30T09:18:31.380429Z node 13 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715715] PredicateAcks: 1/1 2025-06-30T09:18:31.380431Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037896] add an TxId 281474976715715 to the list for deletion 2025-06-30T09:18:31.380433Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037896] TxId 281474976715715, NewState DELETING 2025-06-30T09:18:31.380435Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037896] delete key for TxId 281474976715715 2025-06-30T09:18:31.380439Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037896] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:18:31.380753Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:18:31.380758Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-30T09:18:31.380760Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715715, State DELETING 2025-06-30T09:18:31.380764Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037892] delete TxId 281474976715715 2025-06-30T09:18:31.380790Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037896] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:18:31.380792Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037896] Try execute txs with state DELETING 2025-06-30T09:18:31.380793Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037896] TxId 281474976715715, State DELETING 2025-06-30T09:18:31.380795Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037896] delete TxId 281474976715715 2025-06-30T09:18:31.402808Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|c896788-73bf7d9a-1853afc4-249ad466_0] PartitionId [2] Generation [1] Write session: destroy >> KqpPg::InsertValuesFromTableWithDefaultBool+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool-useSink >> THiveTest::TestCreateTabletWithWrongSPoolsAndReassignGroupsFailButDeletionIsOk [GOOD] >> THiveTest::TestCreateTabletAndReassignGroups3 >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestLocalRegistrationInSharedHive [GOOD] Test command err: 2025-06-30T09:17:52.411481Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:52.434707Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:52.434796Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:52.435041Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:52.435112Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:17:52.435976Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-30T09:17:52.435986Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:52.438129Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:28:2075] ControllerId# 72057594037932033 2025-06-30T09:17:52.438135Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:52.438154Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:52.438173Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:52.472555Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:52.472573Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:52.475937Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:36:2080] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:52.475971Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:37:2081] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:52.476257Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:38:2082] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:52.477190Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:39:2083] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:52.477215Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:40:2084] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:52.477675Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:41:2085] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:52.478137Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:42:2086] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:52.478142Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-30T09:17:52.478158Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:28:2075] 2025-06-30T09:17:52.478164Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:28:2075] 2025-06-30T09:17:52.478172Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-30T09:17:52.478181Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:17:52.478680Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:17:52.479049Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:52.479054Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:17:52.479061Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:17:52.479083Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:52.589209Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:28:2075] 2025-06-30T09:17:52.589255Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:52.589265Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-30T09:17:52.593633Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-30T09:17:52.594571Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-30T09:17:52.594587Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:17:52.594683Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:52.599117Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-30T09:17:52.599146Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:17:52.599152Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:17:52.599186Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:53:2093] 2025-06-30T09:17:52.599206Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:17:52.604162Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:32:2063] 2025-06-30T09:17:52.604181Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:32:2063] 2025-06-30T09:17:52.604207Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:28:2075] 2025-06-30T09:17:52.604222Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-30T09:17:52.604243Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [1:53:2093] 2025-06-30T09:17:52.604246Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [1:53:2093] 2025-06-30T09:17:52.604256Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-30T09:17:52.604267Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-30T09:17:52.604272Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-30T09:17:52.604282Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:17:52.604314Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [1:53:2093] 2025-06-30T09:17:52.604319Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:17:52.604350Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:52.604468Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-30T09:17:52.604476Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:17:52.604509Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\363\365\\\016\336\205\240m2\241c\3010\003\261\342\227\n\267}" } 2025-06-30T09:17:52.604571Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:17:52.604629Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:52.604659Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StInit ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:52.604680Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-30T09:17:52.604689Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:17:52.604727Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-30T09:17:52.604780Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-30T09:17:52.604785Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-30T09:17:52.605477Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:28:2075] 2025-06-30T09:17:52.605486Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:28:2075] 2025-06-30T09:17:52.605493Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-30T09:17:52.605499Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 2146435075 Sender# [1:47:2090] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:17:52.605508Z node 1 :BS_NODE DEBUG: {NWDC3 ... torage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-30T09:18:31.461586Z node 43 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-30T09:18:31.461593Z node 43 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-30T09:18:31.461688Z node 44 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [43:471:2304] CurrentLeaderTablet: [43:487:2315] CurrentGeneration: 1 CurrentStep: 0} 2025-06-30T09:18:31.461714Z node 43 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [44:563:2160] 2025-06-30T09:18:31.461731Z node 44 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [43:471:2304] CurrentLeaderTablet: [43:487:2315] CurrentGeneration: 1 CurrentStep: 0} 2025-06-30T09:18:31.461754Z node 44 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [43:471:2304] CurrentLeaderTablet: [43:487:2315] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[43:24343667:0] : 3}, {[43:1099535971443:0] : 6}}}} 2025-06-30T09:18:31.461760Z node 44 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037888 followers: 0 2025-06-30T09:18:31.461767Z node 44 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 44 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [43:471:2304] 2025-06-30T09:18:31.461788Z node 44 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037888] forward result remote node 43 [44:564:2161] 2025-06-30T09:18:31.461814Z node 44 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037888] remote node connected [44:564:2161] 2025-06-30T09:18:31.461820Z node 44 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [44:564:2161] 2025-06-30T09:18:31.461877Z node 43 :HIVE TRACE: hive_impl.cpp:118: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([44:563:2160]) [43:572:2364] 2025-06-30T09:18:31.461911Z node 43 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [44:564:2161] 2025-06-30T09:18:31.461930Z node 44 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [44:563:2160] 2025-06-30T09:18:31.461935Z node 44 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [44:563:2160] 2025-06-30T09:18:31.461942Z node 44 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [44:563:2160] 2025-06-30T09:18:31.461963Z node 44 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [44:563:2160] 2025-06-30T09:18:31.461975Z node 44 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594037927937 Status=OK ClientId=[44:563:2160]} 2025-06-30T09:18:31.462004Z node 43 :HIVE TRACE: hive_impl.cpp:118: HIVE#72075186224037888 Handle TEvTabletPipe::TEvServerConnected([44:564:2161]) [43:573:2365] 2025-06-30T09:18:31.462061Z node 44 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [44:564:2161] 2025-06-30T09:18:31.462065Z node 44 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [44:564:2161] 2025-06-30T09:18:31.462069Z node 44 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [44:564:2161] 2025-06-30T09:18:31.462075Z node 44 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [44:564:2161] 2025-06-30T09:18:31.462084Z node 44 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72075186224037888 Status=OK ClientId=[44:564:2161]} 2025-06-30T09:18:31.462105Z node 43 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594037927937] Push Sender# [44:560:2160] EventType# 268959744 2025-06-30T09:18:31.462139Z node 43 :HIVE DEBUG: hive_impl.cpp:145: HIVE#72057594037927937 Handle TEvLocal::TEvRegisterNode from [44:560:2160] HiveId: 72057594037927937 ServicedDomains { SchemeShard: 72057594046678944 PathId: 2 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } 2025-06-30T09:18:31.462157Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxRegisterNode} queued, type NKikimr::NHive::TTxRegisterNode 2025-06-30T09:18:31.462165Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:18:31.462175Z node 43 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037927937 THive::TTxRegisterNode(44)::Execute 2025-06-30T09:18:31.462211Z node 43 :HIVE DEBUG: hive_impl.cpp:365: HIVE#72057594037927937 ProcessWaitQueue (0) 2025-06-30T09:18:31.462216Z node 43 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037927937 ProcessBootQueue (0) 2025-06-30T09:18:31.462219Z node 43 :HIVE TRACE: hive_impl.cpp:348: HIVE#72057594037927937 ProcessBootQueue - sending 2025-06-30T09:18:31.462224Z node 43 :HIVE DEBUG: hive_impl.cpp:365: HIVE#72057594037927937 ProcessWaitQueue (0) 2025-06-30T09:18:31.462227Z node 43 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037927937 ProcessBootQueue (0) 2025-06-30T09:18:31.462237Z node 43 :HIVE WARN: node_info.cpp:25: HIVE#72057594037927937 Node(44, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:31.462251Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxRegisterNode} hope 1 -> done Change{14, redo 208b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-30T09:18:31.462259Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:18:31.462295Z node 43 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72075186224037888] Push Sender# [44:561:2161] EventType# 268959744 2025-06-30T09:18:31.462317Z node 43 :HIVE DEBUG: hive_impl.cpp:145: HIVE#72075186224037888 Handle TEvLocal::TEvRegisterNode from [44:561:2161] HiveId: 72075186224037888 ServicedDomains { SchemeShard: 72057594046678944 PathId: 2 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } 2025-06-30T09:18:31.462326Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:5} Tx{6, NKikimr::NHive::TTxRegisterNode} queued, type NKikimr::NHive::TTxRegisterNode 2025-06-30T09:18:31.462332Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:5} Tx{6, NKikimr::NHive::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:18:31.462338Z node 43 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72075186224037888 THive::TTxRegisterNode(44)::Execute 2025-06-30T09:18:31.462351Z node 43 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(44, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:31.462356Z node 43 :HIVE DEBUG: hive_impl.cpp:365: HIVE#72075186224037888 ProcessWaitQueue (0) 2025-06-30T09:18:31.462359Z node 43 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72075186224037888 ProcessBootQueue (0) 2025-06-30T09:18:31.462363Z node 43 :HIVE TRACE: hive_impl.cpp:348: HIVE#72075186224037888 ProcessBootQueue - sending 2025-06-30T09:18:31.462367Z node 43 :HIVE DEBUG: hive_impl.cpp:365: HIVE#72075186224037888 ProcessWaitQueue (0) 2025-06-30T09:18:31.462371Z node 43 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72075186224037888 ProcessBootQueue (0) 2025-06-30T09:18:31.462379Z node 43 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(44, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:31.462388Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:5} Tx{6, NKikimr::NHive::TTxRegisterNode} hope 1 -> done Change{6, redo 199b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-30T09:18:31.462394Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:5} Tx{6, NKikimr::NHive::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:18:31.462409Z node 43 :HIVE TRACE: hive_impl.cpp:332: HIVE#72057594037927937 ProcessBootQueue - executing 2025-06-30T09:18:31.462415Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{26, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-30T09:18:31.462419Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{26, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:18:31.462423Z node 43 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037927937 THive::TTxProcessBootQueue()::Execute 2025-06-30T09:18:31.462428Z node 43 :HIVE DEBUG: hive_impl.cpp:226: HIVE#72057594037927937 Handle ProcessBootQueue (size: 0) 2025-06-30T09:18:31.462432Z node 43 :HIVE DEBUG: hive_impl.cpp:229: HIVE#72057594037927937 Handle ProcessWaitQueue (size: 0) 2025-06-30T09:18:31.462437Z node 43 :HIVE DEBUG: hive_impl.cpp:306: HIVE#72057594037927937 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-30T09:18:31.462443Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{26, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{15, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-30T09:18:31.462447Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{26, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:18:31.462472Z node 43 :HIVE DEBUG: hive_impl.cpp:812: HIVE#72057594037927937 TEvInterconnect::TEvNodeInfo NodeId 44 Location DataCenter: "2" Module: "2" Rack: "2" Unit: "2" 2025-06-30T09:18:31.462482Z node 43 :HIVE TRACE: hive_impl.cpp:332: HIVE#72075186224037888 ProcessBootQueue - executing 2025-06-30T09:18:31.462487Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{7, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-30T09:18:31.462491Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{7, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:18:31.462495Z node 43 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72075186224037888 THive::TTxProcessBootQueue()::Execute 2025-06-30T09:18:31.462499Z node 43 :HIVE DEBUG: hive_impl.cpp:203: HIVE#72075186224037888 ProcessBootQueue: 0 nodes connected out of 0 2025-06-30T09:18:31.462504Z node 43 :HIVE DEBUG: hive_impl.cpp:220: HIVE#72075186224037888 ProcessBootQueue - waiting until 586524-01-19T08:01:49.551615Z because of warmup, now: 1970-01-01T00:00:00.142448Z 2025-06-30T09:18:31.462511Z node 43 :HIVE DEBUG: hive_impl.cpp:357: HIVE#72075186224037888 PostponeProcessBootQueue (18446744073709.409167s) 2025-06-30T09:18:31.462518Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{7, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-30T09:18:31.462522Z node 43 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{7, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:18:31.462534Z node 43 :HIVE DEBUG: hive_impl.cpp:812: HIVE#72075186224037888 TEvInterconnect::TEvNodeInfo NodeId 44 Location DataCenter: "2" Module: "2" Rack: "2" Unit: "2" >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query [GOOD] >> THiveTest::TestCreateTabletAndReassignGroups3 [GOOD] >> THiveTest::TestCreateTabletAndReassignGroupsWithReboots >> TPersQueueNewSchemeCacheTest::CheckGrpcWriteNoDC ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::CommonHandler [GOOD] Test command err: 2025-06-30T09:18:09.340098Z :SpecifyClustersExplicitly INFO: Random seed for debugging is 1751275089340090 2025-06-30T09:18:09.509022Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669236488275283:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:09.509058Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:09.530943Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669236311136324:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:09.532398Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:09.569057Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:09.576968Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000f6c/r3tmp/tmpli18W2/pdisk_1.dat 2025-06-30T09:18:09.611550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:09.611591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:09.612872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:09.629489Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10198, node 1 2025-06-30T09:18:09.647067Z INFO: TTestServer started on Port 31023 GrpcPort 10198 2025-06-30T09:18:09.647064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/000f6c/r3tmp/yandexaPyydH.tmp 2025-06-30T09:18:09.647076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/000f6c/r3tmp/yandexaPyydH.tmp TClient is connected to server localhost:31023 2025-06-30T09:18:09.658823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/000f6c/r3tmp/yandexaPyydH.tmp 2025-06-30T09:18:09.658965Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration PQClient connected to localhost:10198 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:09.683836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:09.684492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:09.684509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:09.685965Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:09.686330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... waiting... waiting... 2025-06-30T09:18:09.722903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:10.704655Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:10.708687Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669240606103793:2269], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:10.708721Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:10.708828Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669240606103815:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:10.712003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:10.712722Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:10.718838Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669240783243452:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:10.719780Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:10.737625Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669240606103817:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-30T09:18:10.877483Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669240606103845:2130] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:10.884313Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669240783243504:2302], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:10.884930Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZmY5NzQ1ZWYtYTllOWNiZmItMmJlZWVhYTAtZWMwYzZmZmM=, ActorId: [1:7521669240783243446:2295], ActorState: ExecuteState, TraceId: 01jz022crr8xdfebwzts3b05hd, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:10.885328Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:10.887876Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669240606103852:2277], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:10.888366Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=OWFhZDljYjgtMzI5MTEwNWYtYzQ2NzNhODYtZTkyZDQwMGM=, ActorId: [2:7521669240606103790:2267], ActorState: ExecuteState, TraceId: 01jz022crg6sp4k8hzre8nsdrr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:10.888483Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:10.888868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:11.224571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at / ... teTime: 2025-06-30T09:18:29.353000Z Ip: "ipv6:[::1]:46074" UncompressedSize: 8 Meta: { "logtype": "unknown", "ident": "unknown", "server": "ipv6:[::1]:46074" } } } } 2025-06-30T09:18:30.459615Z :INFO: [/Root] [/Root] [c5fa5880-65d18e35-90f72fe-f47d924e] Closing read session. Close timeout: 3.000000s 2025-06-30T09:18:30.459623Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-06-30T09:18:30.459630Z :INFO: [/Root] [/Root] [c5fa5880-65d18e35-90f72fe-f47d924e] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1241 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:30.459807Z :INFO: [/Root] [/Root] [c5fa5880-65d18e35-90f72fe-f47d924e] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:30.459814Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-06-30T09:18:30.459818Z :INFO: [/Root] [/Root] [c5fa5880-65d18e35-90f72fe-f47d924e] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1241 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:30.459849Z :NOTICE: [/Root] [/Root] [c5fa5880-65d18e35-90f72fe-f47d924e] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:30.460219Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_3347625816249280348_v1 grpc read done: success# 1, data# { read { } } 2025-06-30T09:18:30.460236Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_3_1_3347625816249280348_v1 grpc closed 2025-06-30T09:18:30.460263Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_3_1_3347625816249280348_v1 is DEAD 2025-06-30T09:18:30.460419Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_3_1_3347625816249280348_v1 2025-06-30T09:18:30.460457Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7521669323513089569:2474] destroyed 2025-06-30T09:18:30.460473Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_1_3347625816249280348_v1 2025-06-30T09:18:30.460574Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669323513089566:2471] disconnected; active server actors: 1 2025-06-30T09:18:30.460589Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669323513089566:2471] client user disconnected session shared/user_3_1_3347625816249280348_v1 2025-06-30T09:18:30.745788Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710693, task: 1, CA Id [3:7521669327808057032:2502]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-30T09:18:30.777251Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710693, task: 1, CA Id [3:7521669327808057032:2502]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-30T09:18:30.832348Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710693, task: 1, CA Id [3:7521669327808057032:2502]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-30T09:18:31.106291Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.106301Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.106307Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:31.106399Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:31.106526Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:31.106587Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.106680Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: 13. Commit offset: 31 2025-06-30T09:18:31.107024Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.107029Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.107033Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:31.107096Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:31.107172Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:31.107211Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.107249Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:18:31.107461Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:18:31.107619Z :INFO: Error decompressing data: (TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check) 2025-06-30T09:18:31.107630Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-3) 2025-06-30T09:18:31.107666Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:18:31.107683Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-30T09:18:31.107688Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-30T09:18:31.107696Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 3, size 16 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { DataDecompressionError: "(TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check)" Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-30T09:18:31.108635Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.108639Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.108643Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:31.115282Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:31.115451Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:31.115511Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.115573Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:18:31.115750Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.115808Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:18:31.115831Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:18:31.115840Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-30T09:18:31.115851Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 2). Partition stream id: 1 2025-06-30T09:18:31.116391Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.116396Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.116400Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:31.116475Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:31.116583Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:31.116627Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:31.116673Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:18:31.116789Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:18:31.116830Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:18:31.116884Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-30T09:18:31.116904Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:18:31.116914Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:18:31.116921Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-30T09:18:31.116963Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-30T09:18:31.116972Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-30T09:18:33.123464Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:33.123472Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:33.123477Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:33.123588Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:18:33.143522Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:18:33.143609Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:33.143850Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:33.143921Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:18:33.143936Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:18:33.143951Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes >> TPersqueueControlPlaneTestSuite::SetupReadLockSessionWithDatabase [GOOD] >> TPersqueueControlPlaneTestSuite::SetupWriteLockSessionWithDatabase >> BasicUsage::SelectDatabaseByHash [GOOD] >> BasicUsage::SelectDatabase [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull+useSink >> BasicUsage::GetAllStartPartitionSessions >> BasicUsage::WriteSessionCloseWaitsForWrites >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query [GOOD] >> TPersqueueControlPlaneTestSuite::TestAddRemoveReadRule >> KqpPg::TypeCoercionInsert-useSink [GOOD] >> KqpPg::V1CreateTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/unittest >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query [GOOD] Test command err: 2025-06-30T09:16:41.674913Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668856201733845:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.674938Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cf3/r3tmp/tmpnDDuPH/pdisk_1.dat 2025-06-30T09:16:41.707300Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:16:41.722479Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62847, node 1 2025-06-30T09:16:41.739547Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002cf3/r3tmp/yandexX6gVIk.tmp 2025-06-30T09:16:41.739560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002cf3/r3tmp/yandexX6gVIk.tmp 2025-06-30T09:16:41.739633Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002cf3/r3tmp/yandexX6gVIk.tmp 2025-06-30T09:16:41.739684Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:41.745421Z INFO: TTestServer started on Port 20067 GrpcPort 62847 TClient is connected to server localhost:20067 PQClient connected to localhost:62847 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:16:41.777189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.777223Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.778354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.809544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:41.813276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:16:41.820846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-30T09:16:42.058296Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668860496701891:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.058320Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668860496701903:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.058333Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.059424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:42.059503Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668860496701933:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.059512Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.061953Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521668860496701905:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:16:42.103302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.112393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.128394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.131788Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668860496702172:2555] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521668860496702254:2612] 2025-06-30T09:16:42.677515Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:46.678363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668856201733845:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:46.678414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:16:47.473742Z :WriteToTopic_Demo_18_RestartNo_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:16:47.491565Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:16:47.511594Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:16:47.511757Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521668881971538951:2696] connected; active server actors: 1 2025-06-30T09:16:47.511831Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:16:47.512063Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:16:47.512130Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:16:47.512171Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-30T09:16:47.512176Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:16:47.512178Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-30T09:16:47.512190Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:16:47.512197Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:47.512208Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-30T09:16:47.512242Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:16:47.512270Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-30T09:16:47.512615Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [1:7521668881971538950:2695], now have 1 active actors on pipe 2025-06-30T09:16:47.516322Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72075186224037892] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 7521668856201734138 RawX2: 4294969439 } TxId: 281474976715674 Config { TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 ... e2461-9b93482d-a3759a2e-bf64ab82] [] Returning serverBytesSize = 0 to budget 2025-06-30T09:18:32.829456Z :DEBUG: [/Root] Decompression task done. Partition/PartitionSessionId: 1 (6-10) 2025-06-30T09:18:32.829469Z :DEBUG: [/Root] [/Root] [390e2461-9b93482d-a3759a2e-bf64ab82] [] Returning serverBytesSize = 0 to budget 2025-06-30T09:18:32.830884Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-30T09:18:32.830992Z :DEBUG: [/Root] Take Data. Partition 0. Read: {1, 0} (1-1) 2025-06-30T09:18:32.831064Z :DEBUG: [/Root] Take Data. Partition 0. Read: {2, 0} (2-2) 2025-06-30T09:18:32.831103Z :DEBUG: [/Root] Take Data. Partition 0. Read: {3, 0} (3-3) 2025-06-30T09:18:32.831732Z :DEBUG: [/Root] Take Data. Partition 0. Read: {3, 1} (4-4) 2025-06-30T09:18:32.831749Z :DEBUG: [/Root] Take Data. Partition 0. Read: {3, 2} (5-5) 2025-06-30T09:18:32.831766Z :DEBUG: [/Root] Take Data. Partition 0. Read: {3, 3} (6-6) 2025-06-30T09:18:32.831784Z :DEBUG: [/Root] Take Data. Partition 0. Read: {3, 4} (7-7) 2025-06-30T09:18:32.833639Z :DEBUG: [/Root] Take Data. Partition 0. Read: {4, 0} (8-8) 2025-06-30T09:18:32.833662Z :DEBUG: [/Root] Take Data. Partition 0. Read: {4, 1} (9-9) 2025-06-30T09:18:32.836573Z :DEBUG: [/Root] Take Data. Partition 0. Read: {5, 0} (10-10) 2025-06-30T09:18:32.834883Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 grpc read done: success# 1, data# { read_request { bytes_size: 15001577 } } 2025-06-30T09:18:32.835001Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 got read request: guid# 7e5dc135-89306bbe-14b50b4a-53d70658 2025-06-30T09:18:33.031277Z :DEBUG: [/Root] Take Data. Partition 0. Read: {6, 0} (11-11) 2025-06-30T09:18:33.031317Z :DEBUG: [/Root] [/Root] [390e2461-9b93482d-a3759a2e-bf64ab82] [] The application data is transferred to the client. Number of messages 12, size 15000000 bytes 2025-06-30T09:18:33.031335Z :DEBUG: [/Root] [/Root] [390e2461-9b93482d-a3759a2e-bf64ab82] [] Returning serverBytesSize = 0 to budget 0 12 2025-06-30T09:18:33.033138Z :DEBUG: [/Root] [/Root] [390e2461-9b93482d-a3759a2e-bf64ab82] [] Commit offsets [0, 12). Partition stream id: 1 2025-06-30T09:18:33.035077Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 grpc read done: success# 1, data# { commit_offset_request { commit_offsets { partition_session_id: 1 offsets { end: 12 } } } } 2025-06-30T09:18:33.035222Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) committing to position 12 prev 0 end 12 by cookie 2 2025-06-30T09:18:33.035910Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-30T09:18:33.035930Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037894] got client message batch for topic 'topic_A' partition 0 2025-06-30T09:18:33.035983Z node 11 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 12 (startOffset 12) session test-consumer_11_1_4449529521410432441_v1 2025-06-30T09:18:33.036025Z node 11 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:18:33.039437Z :DEBUG: [/Root] [/Root] [390e2461-9b93482d-a3759a2e-bf64ab82] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 12 } } 2025-06-30T09:18:33.038928Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 12 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:18:33.038985Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:18:33.039013Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:18:33.039037Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-30T09:18:33.039095Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-30T09:18:33.039113Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 12 endOffset 12 with cookie 2 2025-06-30T09:18:33.039129Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 12 2025-06-30T09:18:33.182642Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:11:12 2025-06-30T09:18:33.182666Z :INFO: [/Root] [/Root] [390e2461-9b93482d-a3759a2e-bf64ab82] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1001 BytesRead: 15000000 MessagesRead: 12 BytesReadCompressed: 15000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:33.193743Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 checking auth because of timeout 2025-06-30T09:18:33.193783Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 auth for : test-consumer 2025-06-30T09:18:33.194166Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 Handle describe topics response 2025-06-30T09:18:33.194192Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 auth is DEAD 2025-06-30T09:18:33.194221Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:18:34.185150Z :INFO: [/Root] [/Root] [390e2461-9b93482d-a3759a2e-bf64ab82] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:34.185169Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:11:12 2025-06-30T09:18:34.185181Z :INFO: [/Root] [/Root] [390e2461-9b93482d-a3759a2e-bf64ab82] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2003 BytesRead: 15000000 MessagesRead: 12 BytesReadCompressed: 15000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:34.185207Z :NOTICE: [/Root] [/Root] [390e2461-9b93482d-a3759a2e-bf64ab82] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:18:34.185218Z :DEBUG: [/Root] [/Root] [390e2461-9b93482d-a3759a2e-bf64ab82] [] Abort session to cluster 2025-06-30T09:18:34.185455Z :NOTICE: [/Root] [/Root] [390e2461-9b93482d-a3759a2e-bf64ab82] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:34.185861Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 grpc read done: success# 0, data# { } 2025-06-30T09:18:34.185875Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 grpc read failed 2025-06-30T09:18:34.185883Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 grpc closed 2025-06-30T09:18:34.185894Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_11_1_4449529521410432441_v1 is DEAD 2025-06-30T09:18:34.186158Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_11_1_4449529521410432441_v1 2025-06-30T09:18:34.186169Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669336074371487:2512] destroyed 2025-06-30T09:18:34.186183Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_1_4449529521410432441_v1 2025-06-30T09:18:34.186237Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [11:7521669336074371484:2509] disconnected; active server actors: 1 2025-06-30T09:18:34.186241Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [11:7521669336074371484:2509] client test-consumer disconnected session test-consumer_11_1_4449529521410432441_v1 2025-06-30T09:18:34.191217Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|af6d0365-d4395ca1-eb1c71e1-1054a0b5_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2025-06-30T09:18:34.191233Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|af6d0365-d4395ca1-eb1c71e1-1054a0b5_0] PartitionId [0] Generation [2] Write session will now close 2025-06-30T09:18:34.191242Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|af6d0365-d4395ca1-eb1c71e1-1054a0b5_0] PartitionId [0] Generation [2] Write session: aborting 2025-06-30T09:18:34.191405Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|af6d0365-d4395ca1-eb1c71e1-1054a0b5_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:34.191410Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|af6d0365-d4395ca1-eb1c71e1-1054a0b5_0] PartitionId [0] Generation [2] Write session: destroy 2025-06-30T09:18:34.194868Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|af6d0365-d4395ca1-eb1c71e1-1054a0b5_0 grpc read done: success: 0 data: 2025-06-30T09:18:34.194880Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|af6d0365-d4395ca1-eb1c71e1-1054a0b5_0 grpc read failed 2025-06-30T09:18:34.194890Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|af6d0365-d4395ca1-eb1c71e1-1054a0b5_0 grpc closed 2025-06-30T09:18:34.194893Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|af6d0365-d4395ca1-eb1c71e1-1054a0b5_0 is DEAD 2025-06-30T09:18:34.195194Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:34.195768Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669327484436811:2481] destroyed 2025-06-30T09:18:34.195784Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. |79.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::SelectDatabase [GOOD] >> THiveTest::TestHiveBalancerDifferentResources [GOOD] >> THiveTest::TestHiveBalancerDifferentResources2 >> KqpPg::TypeCoercionBulkUpsert [GOOD] >> KqpPg::TypeCoercionInsert+useSink >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError [GOOD] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError >> KqpPg::InsertNoTargetColumns_SerialNotNull+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink >> BasicUsage::PropagateSessionClosed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/unittest >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query [GOOD] Test command err: 2025-06-30T09:16:41.660116Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668858658308258:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.660149Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cf9/r3tmp/tmpLkQxHj/pdisk_1.dat 2025-06-30T09:16:41.692396Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created TServer::EnableGrpc on GrpcPort 30201, node 1 2025-06-30T09:16:41.719609Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:41.719842Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521668858658308236:2080] 1751275001659957 != 1751275001659960 2025-06-30T09:16:41.724624Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002cf9/r3tmp/yandexjnLcAU.tmp 2025-06-30T09:16:41.724639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002cf9/r3tmp/yandexjnLcAU.tmp 2025-06-30T09:16:41.724711Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002cf9/r3tmp/yandexjnLcAU.tmp 2025-06-30T09:16:41.724756Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:41.730991Z INFO: TTestServer started on Port 2855 GrpcPort 30201 TClient is connected to server localhost:2855 PQClient connected to localhost:30201 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:16:41.762520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.762558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.763824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.792469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:41.796233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:16:41.807114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-30T09:16:42.064016Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668862953276308:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.064130Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.064234Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668862953276321:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.065102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:42.065467Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668862953276353:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.065479Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.066977Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521668862953276323:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:16:42.118444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.126684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.143219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.152700Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668862953276617:2577] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521668862953276685:2612] 2025-06-30T09:16:42.662888Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:46.660616Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668858658308258:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:46.660669Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:16:47.473489Z :Sinks_Oltp_WriteToTopic_1_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:16:47.500429Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:16:47.519831Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521668884428113402:2709] connected; active server actors: 1 2025-06-30T09:16:47.519934Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:16:47.520165Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:16:47.520217Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-30T09:16:47.520708Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-30T09:16:47.527121Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:16:47.527402Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:16:47.527469Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:16:47.527508Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-30T09:16:47.527512Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:16:47.527515Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-30T09:16:47.527520Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:16:47.527528Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:47.527539Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-30T09:16:47.531537Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493 ... ] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 5 } } 2025-06-30T09:18:34.444733Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 2 consumer test-consumer session test-consumer_11_2_6758681094269292862_v1 checking auth because of timeout 2025-06-30T09:18:34.444767Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_11_2_6758681094269292862_v1 auth for : test-consumer 2025-06-30T09:18:34.445557Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_11_2_6758681094269292862_v1 Handle describe topics response 2025-06-30T09:18:34.445585Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_11_2_6758681094269292862_v1 auth is DEAD 2025-06-30T09:18:34.445604Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 2 consumer test-consumer session test-consumer_11_2_6758681094269292862_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:18:35.458512Z :INFO: [/Root] [/Root] [31925d57-392dc214-8dad85de-270fb5d3] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:35.458531Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_B:0:1:4:5 2025-06-30T09:18:35.458538Z :INFO: [/Root] [/Root] [31925d57-392dc214-8dad85de-270fb5d3] Counters: { Errors: 0 CurrentSessionLifetimeMs: 6026 BytesRead: 50 MessagesRead: 5 BytesReadCompressed: 50 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:35.458562Z :NOTICE: [/Root] [/Root] [31925d57-392dc214-8dad85de-270fb5d3] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:18:35.458571Z :DEBUG: [/Root] [/Root] [31925d57-392dc214-8dad85de-270fb5d3] [] Abort session to cluster 2025-06-30T09:18:35.459057Z :NOTICE: [/Root] [/Root] [31925d57-392dc214-8dad85de-270fb5d3] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:35.462916Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer test-consumer session test-consumer_11_2_6758681094269292862_v1 grpc read done: success# 0, data# { } 2025-06-30T09:18:35.462932Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer test-consumer session test-consumer_11_2_6758681094269292862_v1 grpc read failed 2025-06-30T09:18:35.462941Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer test-consumer session test-consumer_11_2_6758681094269292862_v1 grpc closed 2025-06-30T09:18:35.462960Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer test-consumer session test-consumer_11_2_6758681094269292862_v1 is DEAD 2025-06-30T09:18:35.463646Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037896] Destroy direct read session test-consumer_11_2_6758681094269292862_v1 2025-06-30T09:18:35.463661Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521669320901527496:2544] destroyed 2025-06-30T09:18:35.463681Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_2_6758681094269292862_v1 2025-06-30T09:18:35.463697Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037897][topic_B] pipe [11:7521669320901527493:2541] disconnected; active server actors: 1 2025-06-30T09:18:35.463702Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037897][topic_B] pipe [11:7521669320901527493:2541] client test-consumer disconnected session test-consumer_11_2_6758681094269292862_v1 2025-06-30T09:18:35.463965Z :INFO: [/Root] [/Root] [daa511d0-d249673d-e45f5ca5-61624ecf] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:35.463990Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:3:4 2025-06-30T09:18:35.464001Z :INFO: [/Root] [/Root] [daa511d0-d249673d-e45f5ca5-61624ecf] Counters: { Errors: 0 CurrentSessionLifetimeMs: 8042 BytesRead: 40 MessagesRead: 4 BytesReadCompressed: 40 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:35.464026Z :NOTICE: [/Root] [/Root] [daa511d0-d249673d-e45f5ca5-61624ecf] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:18:35.464038Z :DEBUG: [/Root] [/Root] [daa511d0-d249673d-e45f5ca5-61624ecf] [] Abort session to cluster 2025-06-30T09:18:35.464192Z :NOTICE: [/Root] [/Root] [daa511d0-d249673d-e45f5ca5-61624ecf] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:35.470408Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|f1914d0e-1de754c7-d669ecf3-7f14175e_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:18:35.470426Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|f1914d0e-1de754c7-d669ecf3-7f14175e_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:18:35.470437Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|f1914d0e-1de754c7-d669ecf3-7f14175e_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:18:35.470083Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_15163963060647790593_v1 grpc read done: success# 0, data# { } 2025-06-30T09:18:35.470603Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|f1914d0e-1de754c7-d669ecf3-7f14175e_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:35.470098Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_11_1_15163963060647790593_v1 grpc read failed 2025-06-30T09:18:35.470610Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|f1914d0e-1de754c7-d669ecf3-7f14175e_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:18:35.470105Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_11_1_15163963060647790593_v1 grpc closed 2025-06-30T09:18:35.470117Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_11_1_15163963060647790593_v1 is DEAD 2025-06-30T09:18:35.470700Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [11:7521669312311592802:2506] disconnected; active server actors: 1 2025-06-30T09:18:35.470709Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [11:7521669312311592802:2506] client test-consumer disconnected session test-consumer_11_1_15163963060647790593_v1 2025-06-30T09:18:35.470758Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_11_1_15163963060647790593_v1 2025-06-30T09:18:35.470766Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669312311592815:2511] destroyed 2025-06-30T09:18:35.470785Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_1_15163963060647790593_v1 2025-06-30T09:18:35.474966Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message_group_id|f1914d0e-1de754c7-d669ecf3-7f14175e_0 grpc read done: success: 0 data: 2025-06-30T09:18:35.474977Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message_group_id|f1914d0e-1de754c7-d669ecf3-7f14175e_0 grpc read failed 2025-06-30T09:18:35.474987Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message_group_id|f1914d0e-1de754c7-d669ecf3-7f14175e_0 grpc closed 2025-06-30T09:18:35.474991Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message_group_id|f1914d0e-1de754c7-d669ecf3-7f14175e_0 is DEAD 2025-06-30T09:18:35.475298Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:35.475309Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:35.479129Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521669312311592791:2503] destroyed 2025-06-30T09:18:35.479152Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521669312311592794:2503] destroyed 2025-06-30T09:18:35.479175Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:18:35.483092Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|7f46652f-a9e1511d-a8f1f7de-71edfe79_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:18:35.483107Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|7f46652f-a9e1511d-a8f1f7de-71edfe79_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:18:35.483118Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|7f46652f-a9e1511d-a8f1f7de-71edfe79_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:18:35.483352Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|7f46652f-a9e1511d-a8f1f7de-71edfe79_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:35.483358Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|7f46652f-a9e1511d-a8f1f7de-71edfe79_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:18:35.487066Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|7f46652f-a9e1511d-a8f1f7de-71edfe79_0 grpc read done: success: 0 data: 2025-06-30T09:18:35.487080Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|7f46652f-a9e1511d-a8f1f7de-71edfe79_0 grpc read failed 2025-06-30T09:18:35.487090Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|7f46652f-a9e1511d-a8f1f7de-71edfe79_0 grpc closed 2025-06-30T09:18:35.487093Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|7f46652f-a9e1511d-a8f1f7de-71edfe79_0 is DEAD 2025-06-30T09:18:35.487331Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:35.487343Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:35.487738Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669312311592737:2492] destroyed 2025-06-30T09:18:35.487750Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669312311592740:2492] destroyed 2025-06-30T09:18:35.487763Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> BasicUsage::WaitEventBlocksBeforeDiscovery >> BasicUsage::BasicWriteSession >> BasicUsage::WriteSessionNoAvailableDatabase >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit >> TPersqueueControlPlaneTestSuite::SetupWriteLockSessionWithDatabase [GOOD] >> BasicUsage::FallbackToSingleDb >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Table [GOOD] >> BasicUsage::RetryDiscoveryWithCancel >> THiveTest::TestCreateTabletAndReassignGroupsWithReboots [GOOD] >> THiveTest::TestCreateTabletChangeToExternal >> KqpPg::TableArrayInsert+useSink [GOOD] >> KqpPg::TableArrayInsert-useSink >> TSchemeShardSubDomainTest::RestartAtInFly >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query >> TxUsage::WriteToTopic_Demo_24_Table [GOOD] >> TSchemeShardSubDomainTest::SchemeQuotas >> BasicUsage::WriteSessionWriteInHandlers >> TSchemeShardSubDomainTest::CreateAndWait >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink [GOOD] >> THiveTest::TestCreateTabletChangeToExternal [GOOD] >> THiveTest::TestExternalBoot ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersqueueControlPlaneTestSuite::SetupWriteLockSessionWithDatabase [GOOD] Test command err: === Server->StartServer(false); 2025-06-30T09:18:31.336352Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669329297943282:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:31.337112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:31.407022Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669331695200195:2154];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:31.412082Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:31.673812Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00403d/r3tmp/tmpT8tGuf/pdisk_1.dat 2025-06-30T09:18:31.748581Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:31.895152Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:31.909843Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:31.909869Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:31.910188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:31.910201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:31.915830Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:31.915886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:31.919565Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:31.959415Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 13801, node 1 2025-06-30T09:18:32.018977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/00403d/r3tmp/yandexvdxESx.tmp 2025-06-30T09:18:32.018993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/00403d/r3tmp/yandexvdxESx.tmp 2025-06-30T09:18:32.019059Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/00403d/r3tmp/yandexvdxESx.tmp 2025-06-30T09:18:32.019114Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:32.026300Z INFO: TTestServer started on Port 27193 GrpcPort 13801 TClient is connected to server localhost:27193 PQClient connected to localhost:13801 === TenantModeEnabled() = 1 === Init PQ - start server on port 13801 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:32.179631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:18:32.179707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:32.179775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-30T09:18:32.179780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-30T09:18:32.180406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:18:32.181029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:32.183557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-30T09:18:32.183611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:18:32.183663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:32.183672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-30T09:18:32.183674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-30T09:18:32.183678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710657:0 2 -> 3 2025-06-30T09:18:32.185067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:32.185384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:18:32.185388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710657:0 3 -> 128 2025-06-30T09:18:32.185908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:18:32.185913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-30T09:18:32.185917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:18:32.186091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:32.186095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:32.186098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-30T09:18:32.186116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-30T09:18:32.200082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:32.207384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-30T09:18:32.207451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:18:32.216269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 1751275112251, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-30T09:18:32.216328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1751275112251 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-30T09:18:32.216337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-30T09:18:32.216431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710657:0 128 -> 240 2025-06-30T09:18:32.216439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-30T09:18:32.216476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-30T09:18:32.216487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-30T09:18:32.218079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at sc ... 0T09:18:36.270329Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715664, pathId: [OwnerId: 72057594046644480, LocalPathId: 11], version: 5 2025-06-30T09:18:36.270331Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 11] was 2 2025-06-30T09:18:36.270365Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 12 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715664 2025-06-30T09:18:36.270369Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 12 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715664 2025-06-30T09:18:36.270371Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715664 2025-06-30T09:18:36.270372Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715664, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], version: 2 2025-06-30T09:18:36.270373Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 4 2025-06-30T09:18:36.270376Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715664, subscribers: 1 2025-06-30T09:18:36.270378Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [3:7521669353473484044:2329] 2025-06-30T09:18:36.270756Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715664 2025-06-30T09:18:36.270956Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715664 Create topic result: 1 === EnablePQLogs === CreateChannel === NewStub === InitializeWritePQService === InitializeWritePQService start iteration 2025-06-30T09:18:36.360018Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; === InitializeWritePQService create streamingWriter === InitializeWritePQService Write 2025-06-30T09:18:36.379356Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-30T09:18:36.379371Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-30T09:18:36.379639Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "Root/acc/topic1" message_group_id: "12345678" } 2025-06-30T09:18:36.379671Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "Root/acc/topic1" message_group_id: "12345678" from ipv6:[::1]:37170 2025-06-30T09:18:36.379677Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:37170 proto=v1 topic=Root/acc/topic1 durationSec=0 2025-06-30T09:18:36.379680Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:18:36.381697Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:36.384002Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-30T09:18:36.384057Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-30T09:18:36.384060Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-30T09:18:36.384061Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-30T09:18:36.384079Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7521669353473484299:2341] (SourceId=12345678, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-30T09:18:36.384087Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-30T09:18:36.384354Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:821: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 3, Generation: 1 2025-06-30T09:18:36.384434Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 12345678|cbf9f294-ae3ae842-a37633e7-5c567072_0 generated for partition 0 topic 'acc/topic1' owner 12345678 2025-06-30T09:18:36.390148Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: 12345678|cbf9f294-ae3ae842-a37633e7-5c567072_0 2025-06-30T09:18:36.390893Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: 12345678|cbf9f294-ae3ae842-a37633e7-5c567072_0 grpc read done: success: 0 data: 2025-06-30T09:18:36.390905Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: 12345678|cbf9f294-ae3ae842-a37633e7-5c567072_0 grpc read failed 2025-06-30T09:18:36.390974Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 1 sessionId: 12345678|cbf9f294-ae3ae842-a37633e7-5c567072_0 2025-06-30T09:18:36.390979Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: 12345678|cbf9f294-ae3ae842-a37633e7-5c567072_0 is DEAD 2025-06-30T09:18:36.391127Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison Finish: 0 === InitializeWritePQService done === PersQueueClient === InitializePQ completed 2025-06-30T09:18:36.396220Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-30T09:18:36.396238Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-30T09:18:36.396393Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { topic: "topic1" message_group_id: "12345678" } 2025-06-30T09:18:36.396432Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 2 topic: "topic1" message_group_id: "12345678" from ipv6:[::1]:37170 2025-06-30T09:18:36.396443Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=2 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:37170 proto=v1 topic=topic1 durationSec=0 2025-06-30T09:18:36.396448Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:18:36.397046Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: describe result for acl check 2025-06-30T09:18:36.397080Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-30T09:18:36.397081Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-30T09:18:36.397082Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-30T09:18:36.397095Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7521669353473484319:2350] (SourceId=12345678, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-30T09:18:36.397099Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 2 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-30T09:18:36.397202Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:821: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 3, Generation: 1 2025-06-30T09:18:36.397239Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 12345678|e43eb9b0-76f327ca-e323e718-45fad8ba_0 generated for partition 0 topic 'acc/topic1' owner 12345678 2025-06-30T09:18:36.397722Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: 12345678|e43eb9b0-76f327ca-e323e718-45fad8ba_0 2025-06-30T09:18:36.398477Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: 12345678|e43eb9b0-76f327ca-e323e718-45fad8ba_0 grpc read done: success: 0 data: 2025-06-30T09:18:36.398484Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: 12345678|e43eb9b0-76f327ca-e323e718-45fad8ba_0 grpc read failed 2025-06-30T09:18:36.398489Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: 12345678|e43eb9b0-76f327ca-e323e718-45fad8ba_0 grpc closed 2025-06-30T09:18:36.398493Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: 12345678|e43eb9b0-76f327ca-e323e718-45fad8ba_0 is DEAD 2025-06-30T09:18:36.398723Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison |79.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |79.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |79.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest >> TSchemeShardSubDomainTest::RestartAtInFly [GOOD] >> TxUsage::WriteToTopic_Demo_24_Query >> THiveTest::TestExternalBoot [GOOD] >> TPersqueueControlPlaneTestSuite::TestAddRemoveReadRule [GOOD] >> TSchemeShardSubDomainTest::CreateAndWait [GOOD] >> TPersqueueDataPlaneTestSuite::WriteSession >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RestartAtInFly [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:37.386215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:37.386247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:37.386255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:37.386262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:37.386276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:37.386282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:37.386292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:37.386310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:37.386459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:37.386539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:37.403921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:37.403952Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:37.408003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:37.408075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:37.408113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:37.409641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:37.409728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:37.409868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:37.409962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:37.410800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:37.410850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:37.411169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:37.411183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:37.411210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:37.411220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:37.411227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:37.411249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.412602Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:37.432087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:37.432172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.432232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:37.432240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:37.432278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:37.432287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:37.433087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:37.433138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:37.433197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.433207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:37.433211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:37.433216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:37.433664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.433678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:37.433683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:37.434410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.434421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.434426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:37.434432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:37.435013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:37.435374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:37.435409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:37.435566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:37.435593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:37.435602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:37.435651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:37.435656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:37.435700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:37.435711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:37.436096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:37.436108Z node 1 :FLAT_TX_SCHEMESHARD ... tPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-30T09:18:37.485432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-30T09:18:37.485485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:18:37.485528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:18:37.485532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:18:37.485545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4706: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4765: KMeansTreeCluster records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4854: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4881: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.485891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4908: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.487199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:37.487762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:37.487777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:37.487881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:37.487893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:37.487899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:37.488065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 100 2025-06-30T09:18:37.550927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-30T09:18:37.550948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 Leader for TabletID 72057594046678944 is [1:465:2413] sender: [1:529:2058] recipient: [1:15:2062] 2025-06-30T09:18:37.551125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-30T09:18:37.551163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-30T09:18:37.551168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:527:2461] TestWaitNotification: OK eventTxId 100 2025-06-30T09:18:37.551278Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:37.551332Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 76us result status StatusSuccess 2025-06-30T09:18:37.551463Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:37.551540Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:37.551558Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 20us result status StatusSuccess 2025-06-30T09:18:37.551621Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 8772, MsgBus: 15464 2025-06-30T09:18:24.689265Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669301736567811:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:24.689385Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003340/r3tmp/tmpVAy37C/pdisk_1.dat 2025-06-30T09:18:24.748762Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8772, node 1 2025-06-30T09:18:24.771935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:24.771950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:24.771963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:24.772011Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15464 2025-06-30T09:18:24.792258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:24.792285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:24.793379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15464 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:24.862495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:25.055861Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669306031535696:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:25.055864Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669306031535708:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:25.055883Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:25.056551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:25.058243Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669306031535710:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:25.136411Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669306031535761:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:25.147373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 26116, MsgBus: 23219 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003340/r3tmp/tmpTZjCvF/pdisk_1.dat 2025-06-30T09:18:25.407901Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:18:25.427929Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26116, node 2 2025-06-30T09:18:25.454978Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:25.454995Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:25.454998Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:25.455056Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23219 2025-06-30T09:18:25.507021Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:25.507056Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:25.513341Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23219 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:25.559513Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:25.561340Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:25.916681Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669305245433572:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:25.916715Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:25.916822Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669305245433607:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:25.917877Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:25.921077Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:18:25.921165Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669305245433609:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:25.996309Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669305245433660:2329] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:26.004315Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/sc ... 9:18:35.370832Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:35.372911Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:35.734054Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521669348272720237:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:35.734100Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:35.734271Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521669348272720264:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:35.735249Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:35.743010Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:18:35.743114Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7521669348272720266:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:35.807876Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7521669348272720317:2329] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:35.814053Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:35.890119Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:35.912984Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7521669348272720554:2319], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Missing not null column in input: c. All not null columns should be initialized, code: 2032 2025-06-30T09:18:35.913104Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=10&id=MWI2ZDE1MzAtMTdlNzRkNzUtMjkyYjlhZDctMzkwNWQ1NTk=, ActorId: [10:7521669348272720552:2318], ActorState: ExecuteState, TraceId: 01jz0235c37vrq86jfs8d11jcz, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-30T09:18:36.207556Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 20506, MsgBus: 5642 2025-06-30T09:18:36.418423Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7521669350473669228:2191];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:36.420782Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003340/r3tmp/tmpROgFL5/pdisk_1.dat 2025-06-30T09:18:36.450447Z node 11 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20506, node 11 2025-06-30T09:18:36.502716Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:36.502748Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:36.502751Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:36.502809Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5642 2025-06-30T09:18:36.525739Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:36.525765Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:36.526774Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5642 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:18:36.568447Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:36.570638Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:36.897039Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7521669350473669662:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.897068Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.897184Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7521669350473669697:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.898163Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:36.901303Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7521669350473669699:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:36.976001Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7521669350473669750:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:36.982211Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.005270Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.024232Z node 11 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [11:7521669354768637280:2319], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Missing not null column in input: c. All not null columns should be initialized, code: 2032 2025-06-30T09:18:37.024351Z node 11 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=11&id=YzA4OGU2ODctZjY1NmFhYjUtNDliMTg2YjktMjM1M2M5ZmQ=, ActorId: [11:7521669354768637278:2318], ActorState: ExecuteState, TraceId: 01jz0236ew03pfmx8fxpwztc29, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: |79.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_system_names/ydb-core-tx-schemeshard-ut_system_names |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_system_names/ydb-core-tx-schemeshard-ut_system_names >> TAsyncIndexTests::SplitBothWithReboots[PipeResets] |79.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_system_names/ydb-core-tx-schemeshard-ut_system_names >> TVectorIndexTests::CreateTableCoveredEmbedding ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateAndWait [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:37.521809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:37.521837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:37.521843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:37.521849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:37.521862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:37.521867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:37.521879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:37.521894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:37.522041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:37.522119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:37.541465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:37.541493Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:37.544628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:37.544691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:37.544717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:37.546454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:37.546548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:37.546671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:37.546784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:37.547671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:37.547724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:37.548060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:37.548074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:37.548105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:37.548115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:37.548123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:37.548146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.549738Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:37.571143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:37.571260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.571373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:37.571383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:37.571438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:37.571453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:37.572465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:37.572533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:37.572609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.572623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:37.572630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:37.572638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:37.573224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.573238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:37.573248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:37.573611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.573623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.573631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:37.573639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:37.574411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:37.574885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:37.574928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:37.575155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:37.575187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:37.575196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:37.575264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:37.575272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:37.575311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:37.575325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:37.575926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:37.575941Z node 1 :FLAT_TX_SCHEMESHARD ... , operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.592978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-30T09:18:37.592992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:18:37.592997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:18:37.593003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:18:37.593008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:18:37.593013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-30T09:18:37.593020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:18:37.593026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-30T09:18:37.593032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 101:0 2025-06-30T09:18:37.593056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:18:37.593062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-30T09:18:37.593067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-30T09:18:37.593071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-30T09:18:37.593282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:18:37.593302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:18:37.593309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-30T09:18:37.593314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-30T09:18:37.593319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:18:37.593429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:18:37.593441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:18:37.593445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-30T09:18:37.593450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-30T09:18:37.593454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:18:37.593463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-30T09:18:37.594167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:18:37.594413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 100, wait until txId: 101 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-30T09:18:37.594493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-30T09:18:37.594502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-30T09:18:37.594519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-30T09:18:37.594523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-30T09:18:37.594629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-30T09:18:37.594651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-30T09:18:37.594659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:336:2325] 2025-06-30T09:18:37.594694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-30T09:18:37.594713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-30T09:18:37.594717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:336:2325] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-30T09:18:37.594811Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:37.594856Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/USER_0" took 55us result status StatusSuccess 2025-06-30T09:18:37.594988Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:37.595075Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:37.595093Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir" took 19us result status StatusSuccess 2025-06-30T09:18:37.595147Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir" PathDescription { Self { Name: "dir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |79.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |79.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError [GOOD] Test command err: === Server->StartServer(false); 2025-06-30T09:18:32.819725Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669333815039092:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:32.820078Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:32.829756Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669335809023252:2159];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:32.830532Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004034/r3tmp/tmpqDRnsj/pdisk_1.dat 2025-06-30T09:18:33.044311Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:33.045801Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:33.147213Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:33.221920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:33.222237Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 12222, node 1 2025-06-30T09:18:33.231009Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:33.231245Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:33.259020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004034/r3tmp/yandexSL4GTm.tmp 2025-06-30T09:18:33.259034Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004034/r3tmp/yandexSL4GTm.tmp 2025-06-30T09:18:33.259089Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004034/r3tmp/yandexSL4GTm.tmp 2025-06-30T09:18:33.259138Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:33.285635Z INFO: TTestServer started on Port 14081 GrpcPort 12222 2025-06-30T09:18:33.286981Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:33.287012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:33.299841Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14081 PQClient connected to localhost:12222 === TenantModeEnabled() = 1 === Init PQ - start server on port 12222 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:33.438595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:18:33.438665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.438728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-30T09:18:33.438838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-30T09:18:33.438889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:18:33.438903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:33.443103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-30T09:18:33.443155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:18:33.443205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.443215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-30T09:18:33.443217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-30T09:18:33.443220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715657:0 2 -> 3 waiting... 2025-06-30T09:18:33.447255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.447268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:18:33.447273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715657:0 3 -> 128 2025-06-30T09:18:33.455014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:33.455025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-30T09:18:33.455028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:33.457550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.457560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.457565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-30T09:18:33.457583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-30T09:18:33.465806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:33.467630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-30T09:18:33.467674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:18:33.472340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 1751275113511, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-30T09:18:33.472384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1751275113511 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-30T09:18:33.472391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-30T09:18:33.472447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715657:0 128 -> 240 2025-06-30T09:18:33.472453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-30T09:18:33.472758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-30T09:18:33.472767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-30T09:18:33.477751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-30T09:18:33.477759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TT ... DIFY PERMISSIONS 2025-06-30T09:18:37.233733Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root/acc" OperationType: ESchemeOpModifyACL ModifyACL { Name: "topic1" DiffACL: "\n\027\010\001\022\023\032\021test_user@builtin\n\037\010\000\022\033\010\001\020\366\213\001\032\021test_user@builtin \003" } } TxId: 281474976715665 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:43720" , at schemeshard: 72057594046644480 2025-06-30T09:18:37.233802Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_modify_acl.cpp:33: TModifyACL Propose, path: /Root/acc/topic1, operationId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-30T09:18:37.233842Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5309: ExamineTreeVFS visit path id [OwnerId: 72057594046644480, LocalPathId: 10] name: topic1 type: EPathTypePersQueueGroup state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046644480, LocalPathId: 9] 2025-06-30T09:18:37.233845Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5325: ExamineTreeVFS run path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-30T09:18:37.233896Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715665:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046644480 2025-06-30T09:18:37.233906Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:18:37.233931Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715665:0 progress is 1/1 2025-06-30T09:18:37.233935Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715665 ready parts: 1/1 2025-06-30T09:18:37.233939Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715665:0 progress is 1/1 2025-06-30T09:18:37.233941Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715665 ready parts: 1/1 2025-06-30T09:18:37.233977Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 3 2025-06-30T09:18:37.233995Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715665, ready parts: 1/1, is published: false 2025-06-30T09:18:37.234005Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 10], at schemeshard: 72057594046644480 2025-06-30T09:18:37.234008Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715665 ready parts: 1/1 2025-06-30T09:18:37.234013Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715665:0 2025-06-30T09:18:37.234019Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976715665, publications: 1, subscribers: 0 2025-06-30T09:18:37.234022Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976715665, [OwnerId: 72057594046644480, LocalPathId: 10], 3 2025-06-30T09:18:37.235175Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 281474976715665, response: Status: StatusSuccess TxId: 281474976715665 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-30T09:18:37.235274Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715665, database: /Root, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /Root/acc/topic1, add access: +W:test_user@builtin, remove access: -():test_user@builtin:- 2025-06-30T09:18:37.235332Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-30T09:18:37.235335Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715665, path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-30T09:18:37.235393Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-30T09:18:37.235398Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7521669349572565716:2372], at schemeshard: 72057594046644480, txId: 281474976715665, path id: 10 2025-06-30T09:18:37.235793Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976715665 2025-06-30T09:18:37.235810Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976715665 2025-06-30T09:18:37.235813Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715665 2025-06-30T09:18:37.235820Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715665, pathId: [OwnerId: 72057594046644480, LocalPathId: 10], version: 3 2025-06-30T09:18:37.235825Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 4 2025-06-30T09:18:37.235857Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715665, subscribers: 0 2025-06-30T09:18:37.236861Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-30T09:18:37.236874Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-30T09:18:37.236995Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { topic: "/Root/acc/topic1" message_group_id: "test-message-group" } 2025-06-30T09:18:37.237023Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 2 topic: "/Root/acc/topic1" message_group_id: "test-message-group" from ipv6:[::1]:43714 2025-06-30T09:18:37.237028Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=2 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:43714 proto=v1 topic=/Root/acc/topic1 durationSec=0 2025-06-30T09:18:37.237045Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:18:37.237415Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: describe result for acl check 2025-06-30T09:18:37.237466Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-30T09:18:37.237475Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-30T09:18:37.237477Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-30T09:18:37.237491Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7521669353867534044:2351] (SourceId=test-message-group, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-30T09:18:37.237496Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 2 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-30T09:18:37.237740Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:821: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 3, Generation: 1 2025-06-30T09:18:37.237865Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie test-message-group|d6633972-27be784b-c8b59068-76edcf3d_0 generated for partition 0 topic 'acc/topic1' owner test-message-group 2025-06-30T09:18:37.238085Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: test-message-group|d6633972-27be784b-c8b59068-76edcf3d_0 2025-06-30T09:18:37.238548Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-message-group|d6633972-27be784b-c8b59068-76edcf3d_0 grpc read done: success: 1 data: update_token_request [content omitted] 2025-06-30T09:18:37.238672Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1346: updating token 2025-06-30T09:18:37.238679Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:18:37.238963Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: test-message-group|d6633972-27be784b-c8b59068-76edcf3d_0 describe result for acl check 2025-06-30T09:18:37.239001Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 2 reason: access to topic 'Topic /Root/acc/topic1 in database: /Root' denied for 'test_user_2@builtin' due to 'no WriteTopic rights', Marker# PQ1125 sessionId: test-message-group|d6633972-27be784b-c8b59068-76edcf3d_0 2025-06-30T09:18:37.239138Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: test-message-group|d6633972-27be784b-c8b59068-76edcf3d_0 is DEAD 2025-06-30T09:18:37.239221Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:37.239646Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715665 2025-06-30T09:18:37.460804Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TAsyncIndexTests::SplitIndexWithReboots[TabletReboots] |79.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest >> TSchemeShardTestExtSubdomainReboots::AlterSchemeLimits [GOOD] >> KqpPg::PgCreateTable [GOOD] >> KqpPg::PgUpdate+useSink >> THiveTest::TestHiveBalancerDifferentResources2 [GOOD] >> THiveTest::TestHiveBalancerUselessNeighbourMoves >> TVectorIndexTests::CreateTableCoveredEmbedding [GOOD] |79.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] >> TAsyncIndexTests::MergeIndexWithReboots[TabletReboots] >> KqpPg::InsertFromSelect_Simple-useSink [GOOD] >> KqpPg::InsertFromSelect_NoReorder-useSink >> TSchemeShardSubDomainTest::SchemeQuotas [GOOD] |79.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |79.4%| [LD] {RESULT} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableCoveredEmbedding [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:38.361011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:38.361043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:38.361051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:38.361057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:38.361064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:38.361069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:38.361080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:38.361095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:38.361223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:38.361312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:38.379203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:38.379228Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:38.383359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:38.383459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:38.383525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:38.386101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:38.386221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:38.386374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:38.386490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:38.387798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:38.387888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:38.388275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:38.388293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:38.388322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:38.388333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:38.388340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:38.388384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.390317Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:38.415075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:38.415173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.415249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:38.415256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:38.415310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:38.415327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:38.416150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:38.416206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:38.416277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.416289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:38.416296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:38.416304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:38.416876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.416892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:38.416900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:38.417355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.417369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.417377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:38.417385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:38.418227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:38.418821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:38.418878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:38.419121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:38.419154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:38.419168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:38.419256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:38.419265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:38.419306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:38.419321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:38.419825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:38.419836Z node 1 :FLAT_TX_SCHEMESHARD ... ts: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "idx_vector" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "embedding" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataColumnNames: "embedding" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:38.599939Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/idx_vector/indexImplLevelTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:18:38.599963Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/idx_vector/indexImplLevelTable" took 25us result status StatusSuccess 2025-06-30T09:18:38.600042Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/idx_vector/indexImplLevelTable" PathDescription { Self { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplLevelTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "__ydb_id" Type: "Uint64" TypeId: 4 Id: 2 NotNull: true IsBuildInProgress: false } Columns { Name: "__ydb_centroid" Type: "String" TypeId: 4097 Id: 3 NotNull: true IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "__ydb_id" KeyColumnIds: 1 KeyColumnIds: 2 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:38.600103Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/idx_vector/indexImplPostingTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:18:38.600116Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/idx_vector/indexImplPostingTable" took 15us result status StatusSuccess 2025-06-30T09:18:38.600156Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/idx_vector/indexImplPostingTable" PathDescription { Self { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplPostingTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "id" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "embedding" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "id" KeyColumnIds: 1 KeyColumnIds: 2 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TPersqueueDataPlaneTestSuite::WriteSession [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestExternalBoot [GOOD] Test command err: 2025-06-30T09:17:51.439372Z node 3 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:51.467167Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:51.467261Z node 3 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 3 PDiskId# 1 Path# "SectorMap:2:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:51.467527Z node 3 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:51.467589Z node 3 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:51.468703Z node 3 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [3:74:2076] ControllerId# 72057594037932033 2025-06-30T09:17:51.468710Z node 3 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:51.468736Z node 3 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:51.468998Z node 3 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:51.493343Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:51.493368Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:51.502095Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:81:2080] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.502684Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:82:2081] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.502714Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:83:2082] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.502983Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:84:2083] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.503681Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:85:2084] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.503946Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:86:2085] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.504217Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:87:2086] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.504224Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-30T09:17:51.504247Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [3:74:2076] 2025-06-30T09:17:51.504253Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [3:74:2076] 2025-06-30T09:17:51.504265Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-30T09:17:51.504275Z node 3 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:17:51.505485Z node 3 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:17:51.505516Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:51.517017Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:51.517073Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:51.517214Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:51.517534Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:17:51.519258Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-30T09:17:51.519272Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:51.521699Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:98:2078] ControllerId# 72057594037932033 2025-06-30T09:17:51.521707Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:51.521723Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:51.521954Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:51.531551Z node 3 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:51.539115Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [3:74:2076] 2025-06-30T09:17:51.539371Z node 3 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:51.539377Z node 3 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:17:51.539423Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:67:2065] 2025-06-30T09:17:51.539430Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:67:2065] 2025-06-30T09:17:51.539440Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:17:51.593633Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:51.593658Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:51.594161Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:105:2083] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.594204Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:106:2084] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.594237Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:107:2085] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.594270Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:108:2086] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.594297Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:109:2087] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.594325Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:110:2088] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.594353Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:111:2089] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.594363Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-30T09:17:51.594382Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:98:2078] 2025-06-30T09:17:51.594388Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:98:2078] 2025-06-30T09:17:51.594399Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-30T09:17:51.594408Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:17:51.594627Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:17:51.594646Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:51.606085Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:51.606146Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:51.606298Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:51.606352Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:51.608529Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:122:2077] ControllerId# 72057594037932033 2025-06-30T09:17:51.608536Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:51.608554Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:51.608785Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:51.615112Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:17:51.687810Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:51.687843Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:51.692939Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:121:2076] Create Queue# [2:129:2081] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.693167Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:121:2076] Create Queue# [2:130:2082] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.694429Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:121:2076] C ... ge::TEvVPut# {ID# [72057594037927937:2:4:0:0:698:1] FDS# 698 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-30T09:18:37.629117Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [6173685a7ad4b3c4] received {EvVPutResult Status# OK ID# [72057594037927937:2:4:0:0:698:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 19 } Cost# 85496 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 20 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-30T09:18:37.629149Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [6173685a7ad4b3c4] Result# TEvPutResult {Id# [72057594037927937:2:4:0:0:698:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-30T09:18:37.629158Z node 29 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [6173685a7ad4b3c4] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:4:0:0:698:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-30T09:18:37.629188Z node 29 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.152 sample PartId# [72057594037927937:2:4:0:0:698:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 29 } TEvVPutResult{ TimestampMs# 0.717 VDiskId# [0:1:0:0:0] NodeId# 29 Status# OK } ] } 2025-06-30T09:18:37.629215Z node 29 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:4:0:0:698:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-30T09:18:37.629251Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} commited cookie 1 for step 4 2025-06-30T09:18:37.629269Z node 29 :HIVE DEBUG: tx__create_tablet.cpp:503: HIVE#72057594037927937 THive::TTxCreateTablet::Complete (72057594037927937,0) TabletId: 72075186224037888 SideEffects: {Notifications: 0x10040201 [29:269:2261] {EvCreateTabletReply Status: OK Owner: 72057594037927937 OwnerIdx: 0 TabletID: 72075186224037888 Origin: 72057594037927937}} 2025-06-30T09:18:37.629302Z node 29 :HIVE DEBUG: tx__update_tablet_groups.cpp:332: HIVE#72057594037927937 THive::TTxUpdateTabletGroups{127324277099488}(72075186224037888)::Complete SideEffects: {Notifications: 0x10040207 [29:269:2261] {EvTabletCreationResult Status: OK TabletID: 72075186224037888} Callbacks: 1 Actions: NKikimr::TTabletKillRequest} 2025-06-30T09:18:37.629358Z node 29 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:18:37.629382Z node 29 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-30T09:18:37.629391Z node 29 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-30T09:18:37.629398Z node 29 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-30T09:18:37.629408Z node 29 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:18:37.629420Z node 29 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:18:37.629428Z node 29 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:18:37.629484Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [29:324:2299] 2025-06-30T09:18:37.629491Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [29:324:2299] 2025-06-30T09:18:37.629501Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [29:324:2299] 2025-06-30T09:18:37.629516Z node 29 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:18:37.629527Z node 29 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 29 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [29:278:2266] 2025-06-30T09:18:37.629538Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [29:324:2299] 2025-06-30T09:18:37.629547Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [29:324:2299] 2025-06-30T09:18:37.629553Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [29:324:2299] 2025-06-30T09:18:37.629569Z node 29 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [29:324:2299] 2025-06-30T09:18:37.629596Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [29:324:2299] 2025-06-30T09:18:37.629602Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [29:324:2299] 2025-06-30T09:18:37.629610Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [29:324:2299] 2025-06-30T09:18:37.629618Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [29:324:2299] 2025-06-30T09:18:37.629623Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [29:324:2299] 2025-06-30T09:18:37.629633Z node 29 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [29:323:2298] EventType# 268697624 2025-06-30T09:18:37.629647Z node 29 :HIVE TRACE: hive_impl.cpp:118: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([29:324:2299]) [29:325:2300] 2025-06-30T09:18:37.629673Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{6, NKikimr::NHive::TTxStartTablet} queued, type NKikimr::NHive::TTxStartTablet 2025-06-30T09:18:37.629680Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{6, NKikimr::NHive::TTxStartTablet} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:18:37.629693Z node 29 :HIVE DEBUG: tx__start_tablet.cpp:31: HIVE#72057594037927937 THive::TTxStartTablet::Execute Tablet (72075186224037888,0) 2025-06-30T09:18:37.629753Z node 29 :HIVE DEBUG: tx__start_tablet.cpp:73: HIVE#72057594037927937 THive::TTxStartTablet::Execute, Sending TEvBootTablet(Dummy.72075186224037888.Leader.1) to node 29 storage {Version# 1 TabletID# 72075186224037888 TabletType# Dummy Channels# {0:{Channel# 0 Type# none StoragePool# def1 History# {0:{FromGeneration# 0 GroupID# 2147483648 Timestamp# 1970-01-01T00:00:00.059536Z}}, 1:{Channel# 1 Type# none StoragePool# def2 History# {0:{FromGeneration# 0 GroupID# 2147483649 Timestamp# 1970-01-01T00:00:00.059536Z}}, 2:{Channel# 2 Type# none StoragePool# def3 History# {0:{FromGeneration# 0 GroupID# 2147483650 Timestamp# 1970-01-01T00:00:00.059536Z}}} Tenant: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:37.629778Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{6, NKikimr::NHive::TTxStartTablet} hope 1 -> done Change{6, redo 144b alter 0b annex 0, ~{ 1, 16 } -{ }, 0 gb} 2025-06-30T09:18:37.629788Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{6, NKikimr::NHive::TTxStartTablet} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:18:37.640165Z node 29 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [c8d415ebd9884d79] bootstrap ActorId# [29:327:2302] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:5:0:0:126:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-30T09:18:37.640225Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [c8d415ebd9884d79] Id# [72057594037927937:2:5:0:0:126:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-30T09:18:37.640236Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [c8d415ebd9884d79] restore Id# [72057594037927937:2:5:0:0:126:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-30T09:18:37.640249Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [c8d415ebd9884d79] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:5:0:0:126:1] Marker# BPG33 2025-06-30T09:18:37.640257Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [c8d415ebd9884d79] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:5:0:0:126:1] Marker# BPG32 2025-06-30T09:18:37.640296Z node 29 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [29:36:2080] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:5:0:0:126:1] FDS# 126 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-30T09:18:37.640961Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [c8d415ebd9884d79] received {EvVPutResult Status# OK ID# [72057594037927937:2:5:0:0:126:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 20 } Cost# 80992 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 21 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-30T09:18:37.641006Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [c8d415ebd9884d79] Result# TEvPutResult {Id# [72057594037927937:2:5:0:0:126:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-30T09:18:37.641018Z node 29 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [c8d415ebd9884d79] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:5:0:0:126:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-30T09:18:37.641072Z node 29 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.197 sample PartId# [72057594037927937:2:5:0:0:126:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 29 } TEvVPutResult{ TimestampMs# 0.883 VDiskId# [0:1:0:0:0] NodeId# 29 Status# OK } ] } 2025-06-30T09:18:37.641122Z node 29 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:5:0:0:126:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-30T09:18:37.641164Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} commited cookie 1 for step 5 2025-06-30T09:18:37.641254Z node 29 :HIVE DEBUG: tx__start_tablet.cpp:122: HIVE#72057594037927937 THive::TTxStartTablet::Complete Tablet (72075186224037888,0) SideEffects: {Notifications: 0x10080002 [29:323:2298] NKikimrLocal.TEvBootTablet Info { TabletID: 72075186224037888 Channels { Channel: 0 ChannelType: 0 History { FromGeneration: 0 GroupID: 2147483648 } StoragePool: "def1" } Channels { Channel: 1 ChannelType: 0 History { FromGeneration: 0 GroupID: 2147483649 } StoragePool: "def2" } Channels { Channel: 2 ChannelType: 0 History { FromGeneration: 0 GroupID: 2147483650 } StoragePool: "def3" } TabletType: Dummy Version: 1 TenantIdOwner: 72057594046678944 TenantIdLocalId: 1 } SuggestedGeneration: 1 BootMode: BOOT_MODE_LEADER FollowerId: 0} |79.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest |79.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:37.451754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:37.451777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:37.451782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:37.451786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:37.451796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:37.451800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:37.451808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:37.451821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:37.451931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:37.451986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:37.464883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:37.464905Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:37.468439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:37.468512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:37.468547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:37.470165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:37.470247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:37.470374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:37.470455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:37.471082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:37.471128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:37.471409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:37.471421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:37.471445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:37.471452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:37.471456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:37.471472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.472751Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:37.491633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:37.491716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.491783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:37.491791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:37.491827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:37.491838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:37.492712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:37.492762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:37.492833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.492845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:37.492852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:37.492859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:37.493307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.493317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:37.493325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:37.493625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.493632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:37.493637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:37.493643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:37.494415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:37.494882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:37.494928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:37.495136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:37.495159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:37.495166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:37.495214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:37.495220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:37.495248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:37.495257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:37.495634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:37.495642Z node 1 :FLAT_TX_SCHEMESHARD ... 137:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:38.840237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 11 2025-06-30T09:18:38.840248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 3 2025-06-30T09:18:38.840752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 137, response: Status: StatusAccepted TxId: 137 SchemeshardId: 72057594046678944 PathId: 10, at schemeshard: 72057594046678944 2025-06-30T09:18:38.840800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 137, database: /MyRoot/USER_0, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /MyRoot/USER_0/Table11 2025-06-30T09:18:38.840854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:38.840861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 137, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:18:38.840918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 137, path id: [OwnerId: 72057594046678944, LocalPathId: 10] 2025-06-30T09:18:38.840934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:38.840940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:1023:2883], at schemeshard: 72057594046678944, txId: 137, path id: 2 2025-06-30T09:18:38.840947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:1023:2883], at schemeshard: 72057594046678944, txId: 137, path id: 10 2025-06-30T09:18:38.841067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 137:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.841078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 137:0 ProgressState, operation type: TxCreateTable, at tablet# 72057594046678944 2025-06-30T09:18:38.841123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 137:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046678944 OwnerIdx: 10 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 10 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-30T09:18:38.841265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 18 PathOwnerId: 72057594046678944, cookie: 137 2025-06-30T09:18:38.841279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 18 PathOwnerId: 72057594046678944, cookie: 137 2025-06-30T09:18:38.841284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 137 2025-06-30T09:18:38.841290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 137, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18 2025-06-30T09:18:38.841296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 12 2025-06-30T09:18:38.841469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 10 Version: 1 PathOwnerId: 72057594046678944, cookie: 137 2025-06-30T09:18:38.841482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 10 Version: 1 PathOwnerId: 72057594046678944, cookie: 137 2025-06-30T09:18:38.841487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 137 2025-06-30T09:18:38.841492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 137, pathId: [OwnerId: 72057594046678944, LocalPathId: 10], version: 1 2025-06-30T09:18:38.841497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 4 2025-06-30T09:18:38.841509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 137, ready parts: 0/1, is published: true 2025-06-30T09:18:38.842169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 137:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:10 msg type: 268697601 2025-06-30T09:18:38.842205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 137, partId: 0, tablet: 72057594037968897 2025-06-30T09:18:38.842213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1790: TOperation RegisterRelationByShardIdx, TxId: 137, shardIdx: 72057594046678944:10, partId: 0 2025-06-30T09:18:38.842295Z node 1 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72057594046678944 OwnerIdx: 10 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 10 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-30T09:18:38.842347Z node 1 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72057594046678944, OwnerIdx 10, type DataShard, boot OK, tablet id 72075186233409555 2025-06-30T09:18:38.842367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5958: Handle TEvCreateTabletReply at schemeshard: 72057594046678944 message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-06-30T09:18:38.842374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1804: TOperation FindRelatedPartByShardIdx, TxId: 137, shardIdx: 72057594046678944:10, partId: 0 2025-06-30T09:18:38.842390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 137:0, at schemeshard: 72057594046678944, message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-06-30T09:18:38.842397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 137:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046678944 2025-06-30T09:18:38.842405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:180: TCreateParts opId# 137:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-06-30T09:18:38.842427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 137:0 2 -> 3 2025-06-30T09:18:38.843192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 137 2025-06-30T09:18:38.843377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 137 2025-06-30T09:18:38.843894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 137:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.843946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 137:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.843955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_table.cpp:197: TCreateTable TConfigureParts operationId# 137:0 ProgressState at tabletId# 72057594046678944 2025-06-30T09:18:38.843969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:217: TCreateTable TConfigureParts operationId# 137:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409555 seqNo: 4:5 2025-06-30T09:18:38.844054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:233: TCreateTable TConfigureParts operationId# 137:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409555 message: TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 971 RawX2: 4294970138 } TxBody: "\n\236\004\n\007Table11\020\n\032\r\n\003key\030\002 \001(\000@\000\032\020\n\005Value\030\200$ \002(\000@\000(\001:\262\003\022\253\003\010\200\200\200\002\020\254\002\030\364\003 \200\200\200\010(\0000\200\200\200 8\200\200\200\010@\2008H\000RX\010\000\020\000\030\010 \010(\200\200\200@0\377\377\377\377\0178\001B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen1P\nX\200\200\001`nh\000p\000Rb\010\001\020\200\200\200\024\030\005 \020(\200\200\200\200\0020\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen2P\nX\200\200\001`nh\200\200\200\004p\200\200\200\004Rc\010\002\020\200\200\200\310\001\030\005 \020(\200\200\200\200@0\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen3P\nX\200\200\001`nh\200\200\200(p\200\200\200(X\001`\005j$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionr\017compaction_gen0z\017compaction_gen0\202\001\004scan\210\001\200\200\200\010\220\001\364\003\230\0012\270\001\2008\300\001\006R\002\020\001J\026/MyRoot/USER_0/Table11\242\001\006\001\000\000\000\000\200\252\001\000\260\001\001\270\001\000\210\002\001\222\002\013\t\240\207\205\000\000\000\000\001\020\n:\004\010\004\020\005" TxId: 137 ExecLevel: 0 Flags: 0 SchemeShardId: 72057594046678944 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } SubDomainPathId: 2 2025-06-30T09:18:38.844968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 137:0 from tablet: 72057594046678944 to tablet: 72075186233409555 cookie: 72057594046678944:10 msg type: 269549568 2025-06-30T09:18:38.845011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 137, partId: 0, tablet: 72075186233409555 TestModificationResult got TxId: 137, wait until txId: 137 >> TAsyncIndexTests::MergeIndexWithReboots[PipeResets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] Test command err: === Server->StartServer(false); 2025-06-30T09:18:32.191841Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669336611920246:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:32.191873Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:32.299176Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669335386146412:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:32.308578Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00403a/r3tmp/tmpBruCLJ/pdisk_1.dat 2025-06-30T09:18:32.521867Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:32.523487Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:32.962629Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:32.976531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:32.976555Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:32.977356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:32.977366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:32.991651Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:32.991695Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:32.993116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20505, node 1 2025-06-30T09:18:33.227590Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:33.273934Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/00403a/r3tmp/yandexnQfs2e.tmp 2025-06-30T09:18:33.273945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/00403a/r3tmp/yandexnQfs2e.tmp 2025-06-30T09:18:33.274013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/00403a/r3tmp/yandexnQfs2e.tmp 2025-06-30T09:18:33.274060Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:33.282817Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:33.304957Z INFO: TTestServer started on Port 2957 GrpcPort 20505 TClient is connected to server localhost:2957 PQClient connected to localhost:20505 === TenantModeEnabled() = 1 === Init PQ - start server on port 20505 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:33.588324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:18:33.588400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.588467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-30T09:18:33.588472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-30T09:18:33.589642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:18:33.589658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:33.595149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-30T09:18:33.595211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:18:33.595280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.595289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-30T09:18:33.595291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-30T09:18:33.595294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-06-30T09:18:33.598308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.598320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:18:33.598323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710657:0 3 -> 128 2025-06-30T09:18:33.603392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.603408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.603414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-30T09:18:33.603439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-30T09:18:33.609766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:33.609932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:18:33.609937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-30T09:18:33.609942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:18:33.616678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-30T09:18:33.617013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:18:33.621746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 1751275113665, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-30T09:18:33.621820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1751275113665 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-30T09:18:33.621832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-30T09:18:33.621947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710657:0 128 -> 240 2025-06-30T09:18:33.621967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-30T09:18:33.622012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-30T09:18:33.622025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, Local ... ECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-30T09:18:38.153297Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7521669361502127098:2363] (SourceId=123, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-30T09:18:38.153303Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 4 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-30T09:18:38.153867Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037893] server connected, pipe [3:7521669361502127101:2363], now have 1 active actors on pipe 2025-06-30T09:18:38.154240Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:821: TPartitionWriter 72075186224037893 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037893, NodeId 4, Generation: 1 2025-06-30T09:18:38.154440Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-30T09:18:38.154453Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-30T09:18:38.154490Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 123|81216b89-d7de3ca7-1d19a0b2-7f320866_0 generated for partition 0 topic 'PQ/account/topic' owner 123 2025-06-30T09:18:38.154539Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-30T09:18:38.154560Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:38.155048Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-30T09:18:38.155058Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-30T09:18:38.155086Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:38.155330Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 4 partition: 0 MaxSeqNo: 2 sessionId: 123|81216b89-d7de3ca7-1d19a0b2-7f320866_0 2025-06-30T09:18:38.155686Z :INFO: [] MessageGroupId [123] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1751275118155 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:38.155734Z :INFO: [] MessageGroupId [123] SessionId [] Write session established. Init response: last_sequence_number: 2 session_id: "123|81216b89-d7de3ca7-1d19a0b2-7f320866_0" topic: "PQ/account/topic" 2025-06-30T09:18:38.155846Z :DEBUG: [] MessageGroupId [123] SessionId [123|81216b89-d7de3ca7-1d19a0b2-7f320866_0] Write 1 messages with Id from 1 to 1 2025-06-30T09:18:38.155876Z :DEBUG: [] MessageGroupId [123] SessionId [123|81216b89-d7de3ca7-1d19a0b2-7f320866_0] Write session: try to update token 2025-06-30T09:18:38.155884Z :DEBUG: [] MessageGroupId [123] SessionId [123|81216b89-d7de3ca7-1d19a0b2-7f320866_0] Send 1 message(s) (0 left), first sequence number is 3 2025-06-30T09:18:38.156011Z :INFO: [] MessageGroupId [123] SessionId [123|81216b89-d7de3ca7-1d19a0b2-7f320866_0] Write session: close. Timeout = 10000 ms 2025-06-30T09:18:38.156164Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: 123|81216b89-d7de3ca7-1d19a0b2-7f320866_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-30T09:18:38.156331Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037893 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-30T09:18:38.156521Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-30T09:18:38.156558Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-30T09:18:38.156582Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 1 2025-06-30T09:18:38.156723Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037893 (partition=0) Received event: NActors::IEventHandle 2025-06-30T09:18:38.156970Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-30T09:18:38.156979Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-30T09:18:38.156993Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72075186224037893] got client message topic: PQ/account/topic partition: 0 SourceId: '\000123' SeqNo: 3 partNo : 0 messageNo: 1 size 372 offset: -1 2025-06-30T09:18:38.157023Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1843: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Send write quota request. Topic: "PQ/account/topic". Partition: 0. Amount: 376. Cookie: 3 2025-06-30T09:18:38.157038Z node 4 :PERSQUEUE DEBUG: partition.cpp:3720: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Got quota. Topic: "PQ/account/topic". Partition: 0: Cookie: 3 2025-06-30T09:18:38.157087Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'PQ/account/topic' partition 0 part blob processing sourceId '\000123' seqNo 3 partNo 0 2025-06-30T09:18:38.157125Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'PQ/account/topic' partition 0 part blob complete sourceId '\000123' seqNo 3 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 443 count 1 nextOffset 3 batches 1 2025-06-30T09:18:38.157180Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Add new write blob: topic 'PQ/account/topic' partition 0 compactOffset 2,1 HeadOffset 2 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000? size 431 WTime 1751275118156 2025-06-30T09:18:38.157207Z node 4 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:18:38.157216Z node 4 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 431 2025-06-30T09:18:38.159450Z node 4 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 2 count 1 size 431 actorID [4:7521669353925951735:2290] 2025-06-30T09:18:38.159499Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 376 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:18:38.159510Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:18:38.159523Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Answering for message sourceid: '\000123', Topic: 'PQ/account/topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-30T09:18:38.159562Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037893, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=1293, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:18:38.159800Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-30T09:18:38.159874Z node 4 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037893' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' size 431 2025-06-30T09:18:38.160681Z :DEBUG: [] MessageGroupId [123] SessionId [123|81216b89-d7de3ca7-1d19a0b2-7f320866_0] Write session got write response: sequence_numbers: 3 offsets: 2 already_written: false write_statistics { persist_duration_ms: 2 } 2025-06-30T09:18:38.160694Z :DEBUG: [] MessageGroupId [123] SessionId [123|81216b89-d7de3ca7-1d19a0b2-7f320866_0] Write session: acknoledged message 1 2025-06-30T09:18:38.160033Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037893 (partition=0) Received event: NActors::IEventHandle 2025-06-30T09:18:38.258788Z :INFO: [] MessageGroupId [123] SessionId [123|81216b89-d7de3ca7-1d19a0b2-7f320866_0] Write session will now close 2025-06-30T09:18:38.258814Z :DEBUG: [] MessageGroupId [123] SessionId [123|81216b89-d7de3ca7-1d19a0b2-7f320866_0] Write session: aborting 2025-06-30T09:18:38.259033Z :INFO: [] MessageGroupId [123] SessionId [123|81216b89-d7de3ca7-1d19a0b2-7f320866_0] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:38.259045Z :DEBUG: [] MessageGroupId [123] SessionId [123|81216b89-d7de3ca7-1d19a0b2-7f320866_0] Write session: destroy 2025-06-30T09:18:38.259315Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: 123|81216b89-d7de3ca7-1d19a0b2-7f320866_0 grpc read done: success: 0 data: 2025-06-30T09:18:38.259331Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: 123|81216b89-d7de3ca7-1d19a0b2-7f320866_0 grpc read failed 2025-06-30T09:18:38.259340Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: 123|81216b89-d7de3ca7-1d19a0b2-7f320866_0 grpc closed 2025-06-30T09:18:38.259343Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: 123|81216b89-d7de3ca7-1d19a0b2-7f320866_0 is DEAD 2025-06-30T09:18:38.259567Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037893 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:38.260023Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037893] server disconnected, pipe [3:7521669361502127101:2363] destroyed 2025-06-30T09:18:38.260049Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:18:38.531604Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7521669361502127115:2370], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:38.531692Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=3&id=YjlhYTUyNzQtNjYyNzU4NGItNzE4MGM2MTEtZjc4YThkMTY=, ActorId: [3:7521669361502127108:2366], ActorState: ExecuteState, TraceId: 01jz0237xy8zf573qv5pc9z5q0, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:38.531865Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> KqpPg::PgUpdate+useSink [GOOD] >> KqpPg::PgUpdate-useSink |79.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersqueueDataPlaneTestSuite::WriteSession [GOOD] Test command err: === Server->StartServer(false); 2025-06-30T09:18:35.888983Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669349470436556:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:35.912285Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669346818673834:2247];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004024/r3tmp/tmpI8aR2A/pdisk_1.dat 2025-06-30T09:18:36.047237Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:36.047379Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:36.046795Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:36.046951Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:36.129697Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:36.166913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:36.166942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:36.173487Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:36.173837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:36.184103Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 17334, node 1 2025-06-30T09:18:36.243013Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:36.243047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:36.259100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:36.267293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004024/r3tmp/yandex4g2M6u.tmp 2025-06-30T09:18:36.267308Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004024/r3tmp/yandex4g2M6u.tmp 2025-06-30T09:18:36.267388Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004024/r3tmp/yandex4g2M6u.tmp 2025-06-30T09:18:36.267466Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:36.273431Z INFO: TTestServer started on Port 24045 GrpcPort 17334 TClient is connected to server localhost:24045 PQClient connected to localhost:17334 === TenantModeEnabled() = 1 === Init PQ - start server on port 17334 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:36.352332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976720657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:18:36.352423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:36.352494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-30T09:18:36.352512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976720657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-30T09:18:36.352600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976720657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:18:36.352614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:36.359184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 281474976720657, response: Status: StatusAccepted TxId: 281474976720657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-30T09:18:36.359250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976720657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:18:36.359310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:36.359322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976720657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-30T09:18:36.359326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976720657:0 ProgressState no shards to create, do next state 2025-06-30T09:18:36.359331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976720657:0 2 -> 3 waiting... 2025-06-30T09:18:36.363897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:36.363918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976720657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:18:36.363925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976720657:0 3 -> 128 2025-06-30T09:18:36.364226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-30T09:18:36.364238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976720657, ready parts: 0/1, is published: true 2025-06-30T09:18:36.364243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-30T09:18:36.374350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:36.374368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:36.374375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976720657:0, at tablet# 72057594046644480 2025-06-30T09:18:36.374402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 281474976720657 ready parts: 1/1 2025-06-30T09:18:36.375485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976720657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:36.379134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976720657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976720657 msg type: 269090816 2025-06-30T09:18:36.379195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976720657, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:18:36.384350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 1751275116423, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-30T09:18:36.384417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976720657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1751275116423 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-30T09:18:36.384426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976720657:0, at tablet# 72057594046644480 2025-06-30T09:18:36.384517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976720657:0 128 -> 240 2025-06-30T09:18:36.384525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976720657:0, at tablet# 72057594046644480 2025-06-30T09:18:36.384568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-30T09:18:36.384580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-30T09:18:36.387250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at sc ... .831538Z :NOTICE: [/Root] [/Root] [e17b7195-8a70d94a-d9469dde-558850a1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:38.831580Z :INFO: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] Starting read session 2025-06-30T09:18:38.831591Z :DEBUG: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] Starting session to cluster null (localhost:8686) 2025-06-30T09:18:38.831617Z :DEBUG: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:38.831620Z :DEBUG: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:38.831623Z :DEBUG: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] [null] Reconnecting session to cluster null in 0.000000s 2025-06-30T09:18:38.831924Z :DEBUG: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] [null] Successfully connected. Initializing session 2025-06-30T09:18:38.834962Z node 3 :PQ_READ_PROXY DEBUG: grpc_pq_read.h:111: new grpc connection 2025-06-30T09:18:38.834977Z node 3 :PQ_READ_PROXY DEBUG: grpc_pq_read.h:133: new session created cookie 2 2025-06-30T09:18:38.838929Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer session grpc read done: success# 1, data# { init_request { topics_read_settings { topic: "/Root/account1/write_topic" } read_only_original: true consumer: "consumer_aba" read_params { max_read_size: 104857600 } } } 2025-06-30T09:18:38.839016Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:916: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 read init: from# ipv6:[::1]:44404, request# { init_request { topics_read_settings { topic: "/Root/account1/write_topic" } read_only_original: true consumer: "consumer_aba" read_params { max_read_size: 104857600 } } } 2025-06-30T09:18:38.839102Z node 3 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 auth for : consumer_aba 2025-06-30T09:18:38.842845Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:38.843724Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:38.844170Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037894][write_topic] pipe [3:7521669358261241710:2374] connected; active server actors: 1 2025-06-30T09:18:38.843095Z node 3 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 Handle describe topics response 2025-06-30T09:18:38.843142Z node 3 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 auth is DEAD 2025-06-30T09:18:38.843172Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 auth ok: topics# 1, initDone# 0 2025-06-30T09:18:38.843614Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1196: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 register session: topic# /Root/account1/write_topic 2025-06-30T09:18:38.847002Z :INFO: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] [null] Server session id: consumer_aba_3_2_5444726892403580442_v1 2025-06-30T09:18:38.847127Z :DEBUG: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] [null] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:38.847298Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 grpc read done: success# 1, data# { read { } } 2025-06-30T09:18:38.847356Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 got read request: guid# e1683950-7b11cddb-baba94be-38f58e4f 2025-06-30T09:18:38.851027Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1699: [72075186224037894][write_topic] consumer "consumer_aba" register session for pipe [3:7521669358261241710:2374] session consumer_aba_3_2_5444726892403580442_v1 2025-06-30T09:18:38.851069Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:635: [72075186224037894][write_topic] consumer consumer_aba register readable partition 0 2025-06-30T09:18:38.851087Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:665: [72075186224037894][write_topic] consumer consumer_aba family created family=1 (Status=Free, Partitions=[0]) 2025-06-30T09:18:38.851099Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:867: [72075186224037894][write_topic] consumer consumer_aba register reading session ReadingSession "consumer_aba_3_2_5444726892403580442_v1" (Sender=[3:7521669358261241704:2374], Pipe=[3:7521669358261241710:2374], Partitions=[], ActiveFamilyCount=0) 2025-06-30T09:18:38.851105Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037894][write_topic] consumer consumer_aba rebalancing was scheduled 2025-06-30T09:18:38.851123Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037894][write_topic] consumer consumer_aba balancing. Sessions=1, Families=1, UnradableFamilies=1 [1 (0), ], RequireBalancing=0 [] 2025-06-30T09:18:38.851136Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1302: [72075186224037894][write_topic] consumer consumer_aba balancing family=1 (Status=Free, Partitions=[0]) for ReadingSession "consumer_aba_3_2_5444726892403580442_v1" (Sender=[3:7521669358261241704:2374], Pipe=[3:7521669358261241710:2374], Partitions=[], ActiveFamilyCount=0) 2025-06-30T09:18:38.851157Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:545: [72075186224037894][write_topic] consumer consumer_aba family 1 status Active partitions [0] session "consumer_aba_3_2_5444726892403580442_v1" sender [3:7521669358261241704:2374] lock partition 0 for ReadingSession "consumer_aba_3_2_5444726892403580442_v1" (Sender=[3:7521669358261241704:2374], Pipe=[3:7521669358261241710:2374], Partitions=[], ActiveFamilyCount=1) generation 1 step 1 2025-06-30T09:18:38.851174Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037894][write_topic] consumer consumer_aba start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-30T09:18:38.851180Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037894][write_topic] consumer consumer_aba balancing duration: 0.000052s 2025-06-30T09:18:38.855271Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1315: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 assign: record# { Partition: 0 TabletId: 72075186224037893 Topic: "write_topic" Generation: 1 Step: 1 Session: "consumer_aba_3_2_5444726892403580442_v1" ClientId: "consumer_aba" PipeClient { RawX1: 7521669358261241710 RawX2: 4503612512274758 } Path: "/Root/account1/write_topic" } 2025-06-30T09:18:38.855307Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:1132: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 INITING TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) 2025-06-30T09:18:38.855560Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:972: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037893 Generation: 1, pipe: [3:7521669358261241712:2377] 2025-06-30T09:18:38.856312Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: consumer_aba_3_2_5444726892403580442_v1:1 with generation 1 2025-06-30T09:18:38.860690Z node 3 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 1 WriteTimestampMS: 1751275118714 CreateTimestampMS: 1751275118715 SizeLag: 165 WriteTimestampEstimateMS: 1751275118714 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-06-30T09:18:38.860723Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 INIT DONE TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) EndOffset 1 readOffset 0 committedOffset 0 2025-06-30T09:18:38.860742Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 sending to client partition status Got new read session event: CreatePartitionStream { PartitionStreamId: 1 TopicPath: account1/write_topic Cluster: PartitionId: 0 CommittedOffset: 0 EndOffset: 1 } 2025-06-30T09:18:38.862826Z :INFO: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:38.862852Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): null:account1/write_topic:0:1:0:0 2025-06-30T09:18:38.862864Z :INFO: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] Counters: { Errors: 0 CurrentSessionLifetimeMs: 31 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:38.862888Z :NOTICE: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:18:38.862898Z :DEBUG: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] [null] Abort session to cluster 2025-06-30T09:18:38.863112Z :NOTICE: [/Root] [/Root] [f0ff17aa-ae7f53e5-98e601ad-39e045c1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:38.865145Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037894][write_topic] pipe [3:7521669358261241710:2374] disconnected; active server actors: 1 2025-06-30T09:18:38.865163Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037894][write_topic] pipe [3:7521669358261241710:2374] client consumer_aba disconnected session consumer_aba_3_2_5444726892403580442_v1 2025-06-30T09:18:38.863429Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 grpc read done: success# 0, data# { } 2025-06-30T09:18:38.863445Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 grpc read failed 2025-06-30T09:18:38.863453Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 grpc closed 2025-06-30T09:18:38.863468Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer consumer_aba session consumer_aba_3_2_5444726892403580442_v1 is DEAD 2025-06-30T09:18:38.864847Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: consumer_aba_3_2_5444726892403580442_v1 |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest >> KqpPg::InsertFromSelect_NoReorder-useSink [GOOD] >> KqpPg::InsertFromSelect_Serial+useSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain_reboots/unittest >> TSchemeShardTestExtSubdomainReboots::AlterSchemeLimits [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:16:39.051524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:39.051542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:39.051546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:39.051549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:39.051553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:39.051557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:39.051564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:39.051575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:39.051655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:39.051705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:39.061845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:16:39.061866Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:39.061947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:16:39.064237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:39.065183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:39.065210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:39.066648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:39.066690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:39.066814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:39.066871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:39.068288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:39.068334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:39.068560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:39.068570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:39.068579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:39.068586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:39.068592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:39.068613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:16:39.069878Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:16:39.089194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:39.089254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:39.089303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:39.089310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:39.089352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:39.089363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:39.089977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:39.090003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:39.090068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:39.090076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:39.090081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:39.090086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:39.090415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:39.090424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:39.090429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:39.090772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:39.090780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:39.090784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:39.090791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:39.091363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:39.093215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:39.093253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:39.093439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:39.093464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... Count reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 4] was 3 2025-06-30T09:18:38.198616Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72075186233409546, cookie: 1011 2025-06-30T09:18:38.198624Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72075186233409546, cookie: 1011 2025-06-30T09:18:38.198627Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1011 2025-06-30T09:18:38.198631Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1011, pathId: [OwnerId: 72075186233409546, LocalPathId: 5], version: 2 2025-06-30T09:18:38.198634Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 5] was 2 2025-06-30T09:18:38.198853Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72075186233409546, cookie: 1011 2025-06-30T09:18:38.198876Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72075186233409546, cookie: 1011 2025-06-30T09:18:38.198882Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1011 2025-06-30T09:18:38.198889Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1011, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 10 2025-06-30T09:18:38.198895Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 7 2025-06-30T09:18:38.198912Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1011, ready parts: 0/1, is published: true 2025-06-30T09:18:38.200414Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1011 2025-06-30T09:18:38.201398Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1011 2025-06-30T09:18:38.201436Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1011 TestModificationResult got TxId: 1011, wait until txId: 1011 TestModificationResults wait txId: 1012 2025-06-30T09:18:38.202414Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/Alice/A" OperationType: ESchemeOpMkDir MkDir { Name: "B" } } TxId: 1012 TabletId: 72075186233409546 , at schemeshard: 72075186233409546 2025-06-30T09:18:38.202490Z node 264 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/Alice/A/B, operationId: 1012:0, at schemeshard: 72075186233409546 2025-06-30T09:18:38.202531Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72075186233409546, LocalPathId: 4], parent name: A, child name: B, child id: [OwnerId: 72075186233409546, LocalPathId: 6], at schemeshard: 72075186233409546 2025-06-30T09:18:38.202545Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72075186233409546, LocalPathId: 6] was 0 2025-06-30T09:18:38.202553Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1012:0 type: TxMkDir target path: [OwnerId: 72075186233409546, LocalPathId: 6] source path: 2025-06-30T09:18:38.202568Z node 264 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1012:1, propose status:StatusAccepted, reason: , at schemeshard: 72075186233409546 2025-06-30T09:18:38.202643Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 4] was 3 2025-06-30T09:18:38.202656Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 6] was 1 2025-06-30T09:18:38.203519Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1012, response: Status: StatusAccepted TxId: 1012 SchemeshardId: 72075186233409546 PathId: 6, at schemeshard: 72075186233409546 2025-06-30T09:18:38.203588Z node 264 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1012, database: /MyRoot/Alice, subject: , status: StatusAccepted, operation: CREATE DIRECTORY, path: /MyRoot/Alice/A/B 2025-06-30T09:18:38.203598Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:271: Delay activating, operation part: 1012:0, there is await operations num 1 2025-06-30T09:18:38.203641Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-30T09:18:38.203648Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 1012, path id: [OwnerId: 72075186233409546, LocalPathId: 4] 2025-06-30T09:18:38.203696Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 1012, path id: [OwnerId: 72075186233409546, LocalPathId: 6] 2025-06-30T09:18:38.203720Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-30T09:18:38.203731Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [264:482:2432], at schemeshard: 72075186233409546, txId: 1012, path id: 4 2025-06-30T09:18:38.203738Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [264:482:2432], at schemeshard: 72075186233409546, txId: 1012, path id: 6 2025-06-30T09:18:38.203915Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72075186233409546, cookie: 1012 2025-06-30T09:18:38.203932Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72075186233409546, cookie: 1012 2025-06-30T09:18:38.203937Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1012 2025-06-30T09:18:38.203945Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1012, pathId: [OwnerId: 72075186233409546, LocalPathId: 4], version: 4 2025-06-30T09:18:38.203951Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 4] was 4 2025-06-30T09:18:38.204119Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 6 Version: 2 PathOwnerId: 72075186233409546, cookie: 1012 2025-06-30T09:18:38.204130Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 6 Version: 2 PathOwnerId: 72075186233409546, cookie: 1012 2025-06-30T09:18:38.204136Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1012 2025-06-30T09:18:38.204140Z node 264 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1012, pathId: [OwnerId: 72075186233409546, LocalPathId: 6], version: 2 2025-06-30T09:18:38.204145Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 6] was 2 2025-06-30T09:18:38.204158Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1012, ready parts: 0/1, is published: true 2025-06-30T09:18:38.205489Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1012 2025-06-30T09:18:38.205536Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1012 TestModificationResult got TxId: 1012, wait until txId: 1012 TestModificationResults wait txId: 1013 2025-06-30T09:18:38.206608Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/Alice/A" OperationType: ESchemeOpMkDir MkDir { Name: "C" } } TxId: 1013 TabletId: 72075186233409546 , at schemeshard: 72075186233409546 2025-06-30T09:18:38.206693Z node 264 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/Alice/A/C, operationId: 1013:0, at schemeshard: 72075186233409546 2025-06-30T09:18:38.206758Z node 264 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1013:1, propose status:StatusResourceExhausted, reason: Check failed: path: '/MyRoot/Alice/A/C', error: paths count limit exceeded, limit: 5, paths: 5, delta: 1, source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155, at schemeshard: 72075186233409546 2025-06-30T09:18:38.207627Z node 264 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1013, response: Status: StatusResourceExhausted Reason: "Check failed: path: \'/MyRoot/Alice/A/C\', error: paths count limit exceeded, limit: 5, paths: 5, delta: 1, source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" TxId: 1013 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:18:38.207704Z node 264 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1013, database: /MyRoot/Alice, subject: , status: StatusResourceExhausted, reason: Check failed: path: '/MyRoot/Alice/A/C', error: paths count limit exceeded, limit: 5, paths: 5, delta: 1, source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155, operation: CREATE DIRECTORY, path: /MyRoot/Alice/A/C TestModificationResult got TxId: 1013, wait until txId: 1013 |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/unittest >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query [GOOD] Test command err: 2025-06-30T09:16:41.944615Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668857529860998:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.944661Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cab/r3tmp/tmppX7QCe/pdisk_1.dat 2025-06-30T09:16:41.973812Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:16:41.993355Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28148, node 1 2025-06-30T09:16:42.014482Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002cab/r3tmp/yandex3iVB0f.tmp 2025-06-30T09:16:42.014500Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002cab/r3tmp/yandex3iVB0f.tmp 2025-06-30T09:16:42.014598Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002cab/r3tmp/yandex3iVB0f.tmp 2025-06-30T09:16:42.014656Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:42.019144Z INFO: TTestServer started on Port 5016 GrpcPort 28148 TClient is connected to server localhost:5016 PQClient connected to localhost:28148 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:16:42.046520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:42.046553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:42.047526Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:42.074236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:42.075953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:16:42.078933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-30T09:16:42.317333Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668861824829045:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.317372Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668861824829057:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.317381Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.318201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:42.319121Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668861824829091:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.319201Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.320143Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521668861824829060:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:16:42.357140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.365137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.379103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-30T09:16:42.395613Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668861824829369:2584] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } === CheckClustersList. Subcribe to ClusterTracker from [1:7521668861824829409:2612] 2025-06-30T09:16:42.946795Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:46.945063Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668857529860998:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:46.945104Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:16:47.707293Z :ReadRuleGeneration INFO: TTopicSdkTestSetup started 2025-06-30T09:16:47.738990Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:16:47.751586Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:16:47.751747Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521668883299666108:2697] connected; active server actors: 1 2025-06-30T09:16:47.751828Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:16:47.759122Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:16:47.759250Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:16:47.759339Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-30T09:16:47.759345Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:16:47.759349Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-30T09:16:47.759364Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:16:47.759374Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:47.759389Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-30T09:16:47.759438Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:16:47.759473Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-30T09:16:47.762792Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [1:7521668883299666107:2696], now have 1 active actors on pipe 2025-06-30T09:16:47.766806Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72075186224037892, NodeId 1, Generation 1 2025-06-30T09:16:47.766824Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] T ... consumer session test-consumer_11_2_608861728040434092_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:18:31.246807Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:18:31.298250Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=293, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:18:31.327575Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037896, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=1076, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:18:32.375546Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037894] server connected, pipe [11:7521669334329804608:6070], now have 1 active actors on pipe 2025-06-30T09:18:32.376010Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037896] server connected, pipe [11:7521669334329804613:6074], now have 1 active actors on pipe 2025-06-30T09:18:32.376309Z :INFO: [/Root] [/Root] [8e7d3c5a-d4f7d58d-628b8c3d-f6d0e5be] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:32.376326Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_B:0:1:9:10 2025-06-30T09:18:32.376333Z :INFO: [/Root] [/Root] [8e7d3c5a-d4f7d58d-628b8c3d-f6d0e5be] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2514 BytesRead: 101 MessagesRead: 10 BytesReadCompressed: 101 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:32.376352Z :NOTICE: [/Root] [/Root] [8e7d3c5a-d4f7d58d-628b8c3d-f6d0e5be] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:18:32.376363Z :DEBUG: [/Root] [/Root] [8e7d3c5a-d4f7d58d-628b8c3d-f6d0e5be] [] Abort session to cluster 2025-06-30T09:18:32.378911Z :NOTICE: [/Root] [/Root] [8e7d3c5a-d4f7d58d-628b8c3d-f6d0e5be] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:32.379507Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer test-consumer session test-consumer_11_2_608861728040434092_v1 grpc read done: success# 0, data# { } 2025-06-30T09:18:32.379521Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer test-consumer session test-consumer_11_2_608861728040434092_v1 grpc read failed 2025-06-30T09:18:32.379528Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer test-consumer session test-consumer_11_2_608861728040434092_v1 grpc closed 2025-06-30T09:18:32.379540Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer test-consumer session test-consumer_11_2_608861728040434092_v1 is DEAD 2025-06-30T09:18:32.380277Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037897][topic_B] pipe [11:7521669321444901885:3169] disconnected; active server actors: 1 2025-06-30T09:18:32.380287Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037897][topic_B] pipe [11:7521669321444901885:3169] client test-consumer disconnected session test-consumer_11_2_608861728040434092_v1 2025-06-30T09:18:32.380316Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037896] Destroy direct read session test-consumer_11_2_608861728040434092_v1 2025-06-30T09:18:32.380323Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521669321444901888:3172] destroyed 2025-06-30T09:18:32.380340Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_2_608861728040434092_v1 2025-06-30T09:18:32.382829Z :INFO: [/Root] [/Root] [ac8a640b-4a8e97bd-8ef0d7fb-31335ab9] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:32.382850Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:0:1 2025-06-30T09:18:32.382861Z :INFO: [/Root] [/Root] [ac8a640b-4a8e97bd-8ef0d7fb-31335ab9] Counters: { Errors: 0 CurrentSessionLifetimeMs: 4521 BytesRead: 144 MessagesRead: 1 BytesReadCompressed: 144 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:32.382880Z :NOTICE: [/Root] [/Root] [ac8a640b-4a8e97bd-8ef0d7fb-31335ab9] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:18:32.382890Z :DEBUG: [/Root] [/Root] [ac8a640b-4a8e97bd-8ef0d7fb-31335ab9] [] Abort session to cluster 2025-06-30T09:18:32.383035Z :NOTICE: [/Root] [/Root] [ac8a640b-4a8e97bd-8ef0d7fb-31335ab9] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:32.385704Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_15241194161437637581_v1 grpc read done: success# 0, data# { } 2025-06-30T09:18:32.385718Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_11_1_15241194161437637581_v1 grpc read failed 2025-06-30T09:18:32.385726Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_11_1_15241194161437637581_v1 grpc closed 2025-06-30T09:18:32.385741Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_11_1_15241194161437637581_v1 is DEAD 2025-06-30T09:18:32.385916Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_11_1_15241194161437637581_v1 2025-06-30T09:18:32.385926Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669312854967201:3134] destroyed 2025-06-30T09:18:32.385945Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_1_15241194161437637581_v1 2025-06-30T09:18:32.385997Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [11:7521669312854967198:3131] disconnected; active server actors: 1 2025-06-30T09:18:32.386000Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [11:7521669312854967198:3131] client test-consumer disconnected session test-consumer_11_1_15241194161437637581_v1 2025-06-30T09:18:32.387260Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|65dd0772-c800aaf4-641821c0-42f5a89b_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:18:32.387271Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|65dd0772-c800aaf4-641821c0-42f5a89b_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:18:32.387279Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|65dd0772-c800aaf4-641821c0-42f5a89b_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:18:32.387413Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|65dd0772-c800aaf4-641821c0-42f5a89b_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:32.387419Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|65dd0772-c800aaf4-641821c0-42f5a89b_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:18:32.387755Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|8afb9706-476a8629-d6af635b-14824a35_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:18:32.387760Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|8afb9706-476a8629-d6af635b-14824a35_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:18:32.387765Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|8afb9706-476a8629-d6af635b-14824a35_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:18:32.387854Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|8afb9706-476a8629-d6af635b-14824a35_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:32.387859Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|8afb9706-476a8629-d6af635b-14824a35_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:18:32.395100Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message_group_id|65dd0772-c800aaf4-641821c0-42f5a89b_0 grpc read done: success: 0 data: 2025-06-30T09:18:32.395118Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message_group_id|65dd0772-c800aaf4-641821c0-42f5a89b_0 grpc read failed 2025-06-30T09:18:32.395128Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message_group_id|65dd0772-c800aaf4-641821c0-42f5a89b_0 grpc closed 2025-06-30T09:18:32.395134Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message_group_id|65dd0772-c800aaf4-641821c0-42f5a89b_0 is DEAD 2025-06-30T09:18:32.395566Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|8afb9706-476a8629-d6af635b-14824a35_0 grpc read done: success: 0 data: 2025-06-30T09:18:32.395576Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|8afb9706-476a8629-d6af635b-14824a35_0 grpc read failed 2025-06-30T09:18:32.395582Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|8afb9706-476a8629-d6af635b-14824a35_0 grpc closed 2025-06-30T09:18:32.395586Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|8afb9706-476a8629-d6af635b-14824a35_0 is DEAD 2025-06-30T09:18:32.395886Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:32.395903Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:32.396092Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521669312854966982:3099] destroyed 2025-06-30T09:18:32.396100Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521669312854966985:3099] destroyed 2025-06-30T09:18:32.396378Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:18:32.400058Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:32.400075Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:32.400125Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669312854966930:3088] destroyed 2025-06-30T09:18:32.400131Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669312854966933:3088] destroyed 2025-06-30T09:18:32.400141Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> TVectorIndexTests::CreateTableWithError >> KqpPg::InsertFromSelect_Serial+useSink [GOOD] >> KqpPg::InsertFromSelect_Serial-useSink >> TVectorIndexTests::CreateTableWithError [GOOD] |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> THiveTest::TestHiveBalancerUselessNeighbourMoves [GOOD] >> THiveTest::TestHiveBalancerWithImmovableTablets >> KqpPg::PgUpdate-useSink [GOOD] >> KqpPg::JoinWithQueryService-StreamLookup |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberSeconds >> TSchemeShardSysNames::ESchemeOpMkDir-NoProtect-NoDbAdmin-anonymous >> EraseRowsTests::ConditionalEraseRowsShouldNotEraseModifiedRows ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableWithError [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:40.718212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:40.718247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:40.718255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:40.718262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:40.718269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:40.718273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:40.718285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:40.718306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:40.718437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:40.718528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:40.740983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:40.741018Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:40.745821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:40.745910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:40.745978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:40.751734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:40.751832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:40.752011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:40.752149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:40.753199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:40.753280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:40.753703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:40.753727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:40.753765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:40.753778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:40.753786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:40.753818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:40.756569Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:40.784210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:40.784310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:40.784385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:40.784395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:40.784458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:40.784478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:40.785263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:40.785316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:40.785382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:40.785394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:40.785400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:40.785407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:40.785972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:40.785988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:40.785995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:40.787081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:40.787101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:40.787109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:40.787119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:40.788006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:40.788575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:40.788623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:40.788864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:40.788900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:40.788909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:40.789006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:40.789016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:40.789066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:40.789084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:40.790869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:40.790887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:40.790958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:40.790966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-30T09:18:40.791084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:40.791094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-30T09:18:40.791115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:18:40.791121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:18:40.791130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:18:40.791135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:18:40.791141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-30T09:18:40.791149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:18:40.791155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-30T09:18:40.791160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1:0 2025-06-30T09:18:40.791181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:18:40.791193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-30T09:18:40.791199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-30T09:18:40.791689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-30T09:18:40.791713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-30T09:18:40.791719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-30T09:18:40.791726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-30T09:18:40.791733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:40.791754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-30T09:18:40.793479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-30T09:18:40.793659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-30T09:18:40.794292Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-30T09:18:40.796662Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-30T09:18:40.797970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "vectors" Columns { Name: "id" Type: "Uint64" } Columns { Name: "__ydb_parent" Type: "String" } KeyColumnNames: "id" } IndexDescription { Name: "idx_vector" KeyColumnNames: "__ydb_parent" Type: EIndexTypeGlobalVectorKmeansTree VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:40.798088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:100: TCreateTableIndex construct operation table path: /MyRoot/vectors domain path id: [OwnerId: 72057594046678944, LocalPathId: 1] domain path: /MyRoot shardsToCreate: 2 GetShardsInside: 0 MaxShards: 200000 2025-06-30T09:18:40.798123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: index key column shouldn't have a reserved name, at schemeshard: 72057594046678944 2025-06-30T09:18:40.798134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 101:1, propose status:StatusInvalidParameter, reason: index key column shouldn't have a reserved name, at schemeshard: 72057594046678944 2025-06-30T09:18:40.798604Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:18:40.799439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 101, response: Status: StatusInvalidParameter Reason: "index key column shouldn\'t have a reserved name" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:40.799516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: index key column shouldn't have a reserved name, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/vectors 2025-06-30T09:18:40.799687Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-30T09:18:40.800702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "vectors" Columns { Name: "id" Type: "Uint64" } Columns { Name: "embedding" Type: "String" } KeyColumnNames: "id" } IndexDescription { Name: "idx_vector" KeyColumnNames: "embedding" Type: EIndexTypeGlobalVectorKmeansTree DataColumnNames: "id" VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:40.800795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:100: TCreateTableIndex construct operation table path: /MyRoot/vectors domain path id: [OwnerId: 72057594046678944, LocalPathId: 1] domain path: /MyRoot shardsToCreate: 2 GetShardsInside: 0 MaxShards: 200000 2025-06-30T09:18:40.800830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 102:0, explain: the same column can't be used as key and data column for one index, for example id, at schemeshard: 72057594046678944 2025-06-30T09:18:40.800837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: the same column can't be used as key and data column for one index, for example id, at schemeshard: 72057594046678944 2025-06-30T09:18:40.801481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "the same column can\'t be used as key and data column for one index, for example id" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:40.801534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: the same column can't be used as key and data column for one index, for example id, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/vectors TestModificationResult got TxId: 102, wait until txId: 102 |79.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl >> TNetClassifierTest::TestInitFromBadlyFormattedFile |79.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl >> DistributedEraseTests::ConditionalEraseRowsShouldNotErase >> TNetClassifierTest::TestInitFromRemoteSource |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/address_classification/ut/unittest |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/address_classification/ut/unittest >> KqpPg::InsertFromSelect_Serial-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder+useSink >> TNetClassifierTest::TestInitFromBadlyFormattedFile [GOOD] >> KqpPg::TypeCoercionInsert+useSink [GOOD] >> KqpPg::TableSelect+useSink >> KqpPg::JoinWithQueryService-StreamLookup [GOOD] >> KqpPg::PgAggregate+useSink |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/address_classification/ut/unittest |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/address_classification/ut/unittest |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/address_classification/ut/unittest >> TSchemeShardSysNames::ESchemeOpMkDir-NoProtect-NoDbAdmin-anonymous [GOOD] >> TSchemeShardSysNames::ESchemeOpMkDir-NoProtect-NoDbAdmin-ordinaryuser >> TNetClassifierTest::TestInitFromFile ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromBadlyFormattedFile [GOOD] Test command err: 2025-06-30T09:18:41.499569Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669374359895623:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:41.500244Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003fe7/r3tmp/tmp6VBnLs/pdisk_1.dat 2025-06-30T09:18:41.580654Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:41.581791Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669374359895437:2080] 1751275121494639 != 1751275121494642 2025-06-30T09:18:41.593052Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/003fe7/r3tmp/yandexaiOW3D.tmp 2025-06-30T09:18:41.593064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/003fe7/r3tmp/yandexaiOW3D.tmp 2025-06-30T09:18:41.593128Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:344: invalid NetData format 2025-06-30T09:18:41.593137Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: /home/runner/.ya/build/build_root/cl3w/003fe7/r3tmp/yandexaiOW3D.tmp 2025-06-30T09:18:41.593188Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:41.648582Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:41.648618Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:41.649232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected >> KqpPg::InsertNoTargetColumns_ColumnOrder+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder-useSink |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/address_classification/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/unittest >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query [GOOD] Test command err: 2025-06-30T09:16:41.698586Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668859169436769:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.698917Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d12/r3tmp/tmpfPkEv2/pdisk_1.dat 2025-06-30T09:16:41.743246Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created TServer::EnableGrpc on GrpcPort 25469, node 1 2025-06-30T09:16:41.768175Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:41.768445Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521668859169436745:2080] 1751275001698385 != 1751275001698388 2025-06-30T09:16:41.775178Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002d12/r3tmp/yandexmDBCUG.tmp 2025-06-30T09:16:41.775195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002d12/r3tmp/yandexmDBCUG.tmp 2025-06-30T09:16:41.775282Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002d12/r3tmp/yandexmDBCUG.tmp 2025-06-30T09:16:41.775331Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:41.782011Z INFO: TTestServer started on Port 13750 GrpcPort 25469 TClient is connected to server localhost:13750 2025-06-30T09:16:41.801262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.801306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.802286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected PQClient connected to localhost:25469 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.838555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:41.840973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:16:41.848497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-30T09:16:42.213287Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668863464404817:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.213326Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.213578Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668863464404829:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.214679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:42.217899Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521668863464404831:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:16:42.275331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.286170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.296441Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668863464405026:2513] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:16:42.307970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.311270Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521668863464405050:2322], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:16:42.311912Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NzRiMmJhZGMtZGUyOWQxMGYtOWZmZGU0N2QtMjE3NmQwNjk=, ActorId: [1:7521668863464404799:2296], ActorState: ExecuteState, TraceId: 01jz01zpb2f9wk6zba7vnk8v17, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:16:42.312329Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521668863464405174:2608] 2025-06-30T09:16:42.700606Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:46.698853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668859169436769:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:46.698894Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:16:47.575688Z :WriteToTopic_Demo_13_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:16:47.587571Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:16:47.603356Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:16:47.603645Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:16:47.603710Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:16:47.603745Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-30T09:16:47.603750Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:16:47.603753Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-30T09:16:47.603767Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:16:47.603775Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:47.603788Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-30T09:16:47.645105Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521668884939241852:2689] connected; active server actors: 1 2025-06-30T ... 0. Read: {3, 2} (6-6) 2025-06-30T09:18:32.466914Z :DEBUG: [/Root] Take Data. Partition 0. Read: {3, 3} (7-7) 2025-06-30T09:18:32.467202Z :DEBUG: [/Root] Take Data. Partition 0. Read: {3, 4} (8-8) 2025-06-30T09:18:32.467220Z :DEBUG: [/Root] Take Data. Partition 0. Read: {4, 0} (9-9) 2025-06-30T09:18:32.472155Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 grpc read done: success# 1, data# { read_request { bytes_size: 16002528 } } 2025-06-30T09:18:32.472254Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 got read request: guid# 139d89c3-955a791c-4c27a18a-9b8a9ba8 2025-06-30T09:18:32.878939Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:21:0 2025-06-30T09:18:32.878972Z :INFO: [/Root] [/Root] [2e1b2e9f-d95a78d6-89f7d4e7-16c8ec75] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1004 BytesRead: 16000000 MessagesRead: 22 BytesReadCompressed: 16000000 BytesInflightUncompressed: 16000000 BytesInflightCompressed: 0 BytesInflightTotal: 16000000 MessagesInflight: 22 } 2025-06-30T09:18:32.884748Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 checking auth because of timeout 2025-06-30T09:18:32.891327Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 auth for : test-consumer 2025-06-30T09:18:32.891898Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 Handle describe topics response 2025-06-30T09:18:32.891928Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 auth is DEAD 2025-06-30T09:18:32.891951Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:18:32.898803Z :DEBUG: [/Root] Take Data. Partition 0. Read: {5, 0} (10-10) 2025-06-30T09:18:33.089213Z :DEBUG: [/Root] Take Data. Partition 0. Read: {6, 0} (11-11) 2025-06-30T09:18:33.089260Z :DEBUG: [/Root] Take Data. Partition 0. Read: {7, 0} (12-12) 2025-06-30T09:18:33.089333Z :DEBUG: [/Root] Take Data. Partition 0. Read: {7, 1} (13-13) 2025-06-30T09:18:33.089438Z :DEBUG: [/Root] Take Data. Partition 0. Read: {7, 2} (14-14) 2025-06-30T09:18:33.089485Z :DEBUG: [/Root] Take Data. Partition 0. Read: {7, 3} (15-15) 2025-06-30T09:18:33.230507Z :DEBUG: [/Root] Take Data. Partition 0. Read: {8, 0} (16-16) 2025-06-30T09:18:33.230545Z :DEBUG: [/Root] Take Data. Partition 0. Read: {9, 0} (17-17) 2025-06-30T09:18:33.230570Z :DEBUG: [/Root] Take Data. Partition 0. Read: {9, 1} (18-18) 2025-06-30T09:18:33.230619Z :DEBUG: [/Root] Take Data. Partition 0. Read: {9, 2} (19-19) 2025-06-30T09:18:33.230644Z :DEBUG: [/Root] Take Data. Partition 0. Read: {9, 3} (20-20) 2025-06-30T09:18:33.230673Z :DEBUG: [/Root] Take Data. Partition 0. Read: {9, 4} (21-21) 2025-06-30T09:18:33.230696Z :DEBUG: [/Root] [/Root] [2e1b2e9f-d95a78d6-89f7d4e7-16c8ec75] [] The application data is transferred to the client. Number of messages 22, size 16000000 bytes 2025-06-30T09:18:33.230714Z :DEBUG: [/Root] [/Root] [2e1b2e9f-d95a78d6-89f7d4e7-16c8ec75] [] Returning serverBytesSize = 0 to budget 0 22 2025-06-30T09:18:33.237624Z :DEBUG: [/Root] [/Root] [2e1b2e9f-d95a78d6-89f7d4e7-16c8ec75] [] Commit offsets [0, 22). Partition stream id: 1 2025-06-30T09:18:33.244476Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 grpc read done: success# 1, data# { commit_offset_request { commit_offsets { partition_session_id: 1 offsets { end: 22 } } } } 2025-06-30T09:18:33.244645Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) committing to position 22 prev 0 end 22 by cookie 2 2025-06-30T09:18:33.251143Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-30T09:18:33.251173Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037894] got client message batch for topic 'topic_A' partition 0 2025-06-30T09:18:33.251226Z node 11 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 22 (startOffset 22) session test-consumer_11_1_3662661878611650553_v1 2025-06-30T09:18:33.251270Z node 11 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:18:33.254927Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 22 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:18:33.254979Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:18:33.254995Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:18:33.255017Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-30T09:18:33.255929Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-30T09:18:33.255943Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 22 endOffset 22 with cookie 2 2025-06-30T09:18:33.255963Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 22 2025-06-30T09:18:33.262328Z :DEBUG: [/Root] [/Root] [2e1b2e9f-d95a78d6-89f7d4e7-16c8ec75] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 22 } } 2025-06-30T09:18:33.878511Z :INFO: [/Root] [/Root] [2e1b2e9f-d95a78d6-89f7d4e7-16c8ec75] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:33.878532Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:21:22 2025-06-30T09:18:33.878545Z :INFO: [/Root] [/Root] [2e1b2e9f-d95a78d6-89f7d4e7-16c8ec75] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2003 BytesRead: 16000000 MessagesRead: 22 BytesReadCompressed: 16000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:33.878563Z :NOTICE: [/Root] [/Root] [2e1b2e9f-d95a78d6-89f7d4e7-16c8ec75] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:18:33.878573Z :DEBUG: [/Root] [/Root] [2e1b2e9f-d95a78d6-89f7d4e7-16c8ec75] [] Abort session to cluster 2025-06-30T09:18:33.879089Z :NOTICE: [/Root] [/Root] [2e1b2e9f-d95a78d6-89f7d4e7-16c8ec75] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:33.879475Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|bd9819a5-c5f43a81-d7e6a243-a36bdb2a_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2025-06-30T09:18:33.879484Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|bd9819a5-c5f43a81-d7e6a243-a36bdb2a_0] PartitionId [0] Generation [2] Write session will now close 2025-06-30T09:18:33.879491Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|bd9819a5-c5f43a81-d7e6a243-a36bdb2a_0] PartitionId [0] Generation [2] Write session: aborting 2025-06-30T09:18:33.879620Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|bd9819a5-c5f43a81-d7e6a243-a36bdb2a_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:33.879625Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|bd9819a5-c5f43a81-d7e6a243-a36bdb2a_0] PartitionId [0] Generation [2] Write session: destroy 2025-06-30T09:18:33.891002Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 grpc read done: success# 0, data# { } 2025-06-30T09:18:33.891023Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 grpc read failed 2025-06-30T09:18:33.891032Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 grpc closed 2025-06-30T09:18:33.891044Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_11_1_3662661878611650553_v1 is DEAD 2025-06-30T09:18:33.891077Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|bd9819a5-c5f43a81-d7e6a243-a36bdb2a_0 grpc read done: success: 0 data: 2025-06-30T09:18:33.891085Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|bd9819a5-c5f43a81-d7e6a243-a36bdb2a_0 grpc read failed 2025-06-30T09:18:33.891093Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|bd9819a5-c5f43a81-d7e6a243-a36bdb2a_0 grpc closed 2025-06-30T09:18:33.891096Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|bd9819a5-c5f43a81-d7e6a243-a36bdb2a_0 is DEAD 2025-06-30T09:18:33.891406Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:33.895145Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [11:7521669330615089725:2503] disconnected; active server actors: 1 2025-06-30T09:18:33.895162Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [11:7521669330615089725:2503] client test-consumer disconnected session test-consumer_11_1_3662661878611650553_v1 2025-06-30T09:18:33.895202Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669322025155052:2475] destroyed 2025-06-30T09:18:33.895208Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_11_1_3662661878611650553_v1 2025-06-30T09:18:33.895213Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669330615089728:2506] destroyed 2025-06-30T09:18:33.895229Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_1_3662661878611650553_v1 2025-06-30T09:18:33.895240Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> TPersQueueNewSchemeCacheTest::CheckGrpcWriteNoDC [GOOD] >> TPersQueueNewSchemeCacheTest::CheckGrpcReadNoDC >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression >> EraseRowsTests::ConditionalEraseRowsShouldNotEraseModifiedRows [GOOD] >> EraseRowsTests::EraseRowsFromReplicatedTable |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/address_classification/ut/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMilliSeconds >> DistributedEraseTests::DistributedEraseTxShouldFailOnVariousErrors >> DistributedEraseTests::ConditionalEraseRowsShouldErase >> BasicUsage::BrokenCredentialsProvider [GOOD] |79.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |79.5%| [LD] {RESULT} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut >> DistributedEraseTests::ConditionalEraseRowsShouldNotErase [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnVariousErrors >> KqpPg::PgAggregate+useSink [GOOD] >> KqpPg::PgAggregate-useSink >> TSchemeShardSysNames::ESchemeOpMkDir-NoProtect-NoDbAdmin-ordinaryuser [GOOD] >> TSchemeShardSysNames::ESchemeOpMkDir-NoProtect-NoDbAdmin-dbadmin >> TNetClassifierTest::TestInitFromFile [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize+useSink >> BasicUsage::WriteSessionCloseWaitsForWrites [GOOD] >> BasicUsage::WriteSessionCloseIgnoresWrites >> EraseRowsTests::ConditionalEraseRowsShouldNotErase >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MilliSeconds ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> BasicUsage::BrokenCredentialsProvider [GOOD] Test command err: 2025-06-30T09:18:09.767967Z :MaxByteSizeEqualZero INFO: Random seed for debugging is 1751275089767960 2025-06-30T09:18:10.022576Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669240811149144:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:10.022634Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:10.042112Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669241765759216:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:10.322901Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:10.345672Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000f52/r3tmp/tmpY779rt/pdisk_1.dat 2025-06-30T09:18:10.425144Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:11.019076Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:11.042246Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:11.045395Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:11.111884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:18:11.131138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:11.131168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:11.143189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:11.179443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:11.179468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:11.189099Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:11.193999Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25034, node 1 2025-06-30T09:18:11.559038Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/000f52/r3tmp/yandexWUxkWY.tmp 2025-06-30T09:18:11.559053Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/000f52/r3tmp/yandexWUxkWY.tmp 2025-06-30T09:18:11.559116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/000f52/r3tmp/yandexWUxkWY.tmp 2025-06-30T09:18:11.559167Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:11.705757Z INFO: TTestServer started on Port 8731 GrpcPort 25034 TClient is connected to server localhost:8731 PQClient connected to localhost:25034 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:12.453156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-30T09:18:12.532988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-30T09:18:14.044185Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669258945628649:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:14.044218Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:14.048665Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669258945628661:2274], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:14.062262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:14.107109Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669258945628663:2275], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-30T09:18:14.236188Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669258945628691:2133] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:14.598523Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669257991019197:2306], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:14.599658Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669258945628698:2279], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:14.600116Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=Nzc2MjI3NWMtNGI1NzY2MzQtNmUwMjQ1YjEtNDM2NzYwMjA=, ActorId: [2:7521669258945628647:2270], ActorState: ExecuteState, TraceId: 01jz022g0h6h3tfqnw9s68325f, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:14.600212Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:14.599295Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YmExN2ZlMGMtOTNhYmEyMjItZDA0MjRkNzYtNWY4ZGQwOTQ=, ActorId: [1:7521669257991019147:2298], ActorState: ExecuteState, TraceId: 01jz022g4n0absxvghwksv6jbb, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:14.599781Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:14.600978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:14.843987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:14.996167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called a ... userAgent="pqv1 server" ip=ipv6:[::1]:41668 proto=v1 topic=test-topic durationSec=0 2025-06-30T09:18:42.603589Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:18:42.604084Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-30T09:18:42.604119Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-30T09:18:42.604121Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-30T09:18:42.604124Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-30T09:18:42.604132Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [5:7521669376577046253:2467] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-30T09:18:42.604768Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [5:7521669376577046253:2467] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-30T09:18:42.638645Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [5:7521669376577046253:2467] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-30T09:18:42.639040Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [5:7521669376577046289:2467] connected; active server actors: 1 2025-06-30T09:18:42.639118Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [5:7521669376577046253:2467] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-30T09:18:42.639124Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [5:7521669376577046253:2467] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-30T09:18:42.644438Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [5:7521669376577046289:2467] disconnected; active server actors: 1 2025-06-30T09:18:42.644445Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [5:7521669376577046289:2467] disconnected no session 2025-06-30T09:18:42.659263Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [5:7521669376577046253:2467] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-30T09:18:42.659288Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [5:7521669376577046253:2467] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-30T09:18:42.659292Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [5:7521669376577046253:2467] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-30T09:18:42.659302Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-30T09:18:42.659735Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [5:7521669376577046312:2467], now have 1 active actors on pipe 2025-06-30T09:18:42.659920Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:821: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 6, Generation: 1 2025-06-30T09:18:42.660019Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:18:42.660031Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:18:42.660068Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|66155174-a066c9da-c7ed7648-3b89e166_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-30T09:18:42.660102Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-30T09:18:42.660120Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:42.660535Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:18:42.660545Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:18:42.660569Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:42.660809Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|66155174-a066c9da-c7ed7648-3b89e166_0 2025-06-30T09:18:42.662857Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1751275122662 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:42.662923Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|66155174-a066c9da-c7ed7648-3b89e166_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-30T09:18:42.663115Z :INFO: [] MessageGroupId [src] SessionId [src|66155174-a066c9da-c7ed7648-3b89e166_0] Write session: close. Timeout = 0 ms 2025-06-30T09:18:42.663125Z :INFO: [] MessageGroupId [src] SessionId [src|66155174-a066c9da-c7ed7648-3b89e166_0] Write session will now close 2025-06-30T09:18:42.663131Z :DEBUG: [] MessageGroupId [src] SessionId [src|66155174-a066c9da-c7ed7648-3b89e166_0] Write session: aborting 2025-06-30T09:18:42.663300Z :INFO: [] MessageGroupId [src] SessionId [src|66155174-a066c9da-c7ed7648-3b89e166_0] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:42.663306Z :DEBUG: [] MessageGroupId [src] SessionId [src|66155174-a066c9da-c7ed7648-3b89e166_0] Write session: destroy 2025-06-30T09:18:42.667131Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|66155174-a066c9da-c7ed7648-3b89e166_0 grpc read done: success: 0 data: 2025-06-30T09:18:42.667144Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|66155174-a066c9da-c7ed7648-3b89e166_0 grpc read failed 2025-06-30T09:18:42.667154Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|66155174-a066c9da-c7ed7648-3b89e166_0 grpc closed 2025-06-30T09:18:42.667160Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|66155174-a066c9da-c7ed7648-3b89e166_0 is DEAD 2025-06-30T09:18:42.667447Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:42.667796Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [5:7521669376577046312:2467] destroyed 2025-06-30T09:18:42.667819Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:18:42.668575Z :INFO: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] Starting read session 2025-06-30T09:18:42.668593Z :DEBUG: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] Starting session to cluster null (localhost:25635) 2025-06-30T09:18:42.668886Z :DEBUG: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:42.668892Z :DEBUG: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:42.668896Z :DEBUG: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] [null] Reconnecting session to cluster null in 0.000000s 2025-06-30T09:18:42.668963Z :ERROR: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] [null] Got error. Status: CLIENT_UNAUTHENTICATED. Description:
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation 2025-06-30T09:18:42.668969Z :DEBUG: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:42.668971Z :DEBUG: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:42.668986Z :INFO: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] [null] Closing session to cluster: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " } Get event on client 2025-06-30T09:18:42.669024Z :NOTICE: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:42.669029Z :DEBUG: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] [null] Abort session to cluster Got close event: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " }2025-06-30T09:18:42.669040Z :INFO: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:42.669046Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:18:42.669053Z :INFO: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] Counters: { Errors: 1 CurrentSessionLifetimeMs: 0 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:42.669061Z :NOTICE: [/Root] [/Root] [de571ddf-ffb3f5c0-6c2aa5bf-a3bcda94] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } >> EraseRowsTests::EraseRowsShouldSuccess >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint32 >> EraseRowsTests::EraseRowsFromReplicatedTable [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldEraseOnUint32 >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64Seconds >> TNetClassifierTest::TestInitFromRemoteSource [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMicroSeconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMilliSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromFile [GOOD] Test command err: 2025-06-30T09:18:42.517486Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669375917600038:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:42.517624Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003fa4/r3tmp/tmplmC56U/pdisk_1.dat 2025-06-30T09:18:42.579251Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:42.580400Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669375917599816:2080] 1751275122511787 != 1751275122511790 2025-06-30T09:18:42.590952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/003fa4/r3tmp/yandexhp5FZ6.tmp 2025-06-30T09:18:42.590970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/003fa4/r3tmp/yandexhp5FZ6.tmp 2025-06-30T09:18:42.591075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/003fa4/r3tmp/yandexhp5FZ6.tmp 2025-06-30T09:18:42.591145Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:42.615200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:42.615268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:42.619331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:43.516882Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Seconds |79.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpPg::PgAggregate-useSink [GOOD] >> KqpPg::MkqlTerminate >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] >> BasicUsage::WriteSessionNoAvailableDatabase [GOOD] >> BasicUsage::WriteSessionSwitchDatabases >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit >> THiveTest::TestHiveBalancerWithImmovableTablets [GOOD] >> THiveTest::TestHiveBalancerHighUsage >> BasicUsage::GetAllStartPartitionSessions [GOOD] >> BasicUsage::PreferredDatabaseNoFallback >> BasicUsage::BasicWriteSession [GOOD] >> BasicUsage::CloseWriteSessionImmediately ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::EraseRowsFromReplicatedTable [GOOD] >> TSchemeShardSysNames::ESchemeOpMkDir-NoProtect-NoDbAdmin-dbadmin [GOOD] >> BasicUsage::FallbackToSingleDb [GOOD] Test command err: 2025-06-30T09:18:41.768935Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:41.769138Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:41.769159Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004906/r3tmp/tmpnCdRGr/pdisk_1.dat 2025-06-30T09:18:41.877145Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:41.878085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:41.894676Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:41.896005Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275121319037 != 1751275121319041 2025-06-30T09:18:41.942715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:41.942787Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:41.953446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:42.028587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:42.046875Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:18:42.046962Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:42.057444Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:42.057489Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:42.057696Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:42.057705Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:42.057714Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:42.057787Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:42.057808Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:42.057821Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:18:42.068225Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:42.072746Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:42.072825Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:42.072851Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:18:42.072856Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:42.072860Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:42.072864Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:42.072998Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:42.073019Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:42.073028Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:42.073033Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:42.073042Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:42.073046Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:42.073061Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:18:42.073168Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:42.073218Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:42.073239Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:42.073518Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:42.083779Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:42.083814Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:42.231643Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:18:42.232918Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:18:42.232956Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:42.233334Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:42.233353Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:42.233369Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:42.233458Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:42.233504Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:42.233705Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:42.233724Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:42.234266Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:42.234400Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:42.234901Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:42.234916Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:42.235066Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:42.235082Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:42.235358Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:42.235367Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:42.235373Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:42.235391Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:42.235401Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:42.235411Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:42.236073Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:42.236390Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:42.236402Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:42.236541Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:42.240707Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:42.240743Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: ... TablesActor] ActorId: [2:272:2315], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:43.406364Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:18:43.406375Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004906/r3tmp/tmpGUkSEx/pdisk_1.dat 2025-06-30T09:18:43.527064Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-30T09:18:43.527549Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:43.543506Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:43.544117Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1751275122902675 != 1751275122902678 2025-06-30T09:18:43.595403Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:43.595452Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:43.607314Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:43.692879Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:43.708492Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:629:2533] 2025-06-30T09:18:43.708566Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:43.739107Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:43.739162Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:43.739365Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:43.739378Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:43.739386Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:43.739446Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:43.739479Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:43.739492Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:645:2533] in generation 1 2025-06-30T09:18:43.749842Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:43.749874Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:43.749907Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:43.749922Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:647:2543] 2025-06-30T09:18:43.749928Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:43.749932Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:43.749938Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:43.750084Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:43.750111Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:43.750124Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:43.750131Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:43.750141Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:43.750147Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:43.750168Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:625:2530], serverId# [2:636:2537], sessionId# [0:0:0] 2025-06-30T09:18:43.750283Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:43.750342Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:43.750368Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:43.750724Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:43.762998Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:43.763049Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:43.931493Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:662:2552], serverId# [2:664:2554], sessionId# [0:0:0] 2025-06-30T09:18:43.931684Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:18:43.931698Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:43.931736Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:43.931746Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:43.931758Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:43.931837Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:43.931875Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:43.931915Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:43.931929Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:43.932053Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:43.932153Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:43.932683Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:43.932697Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:43.932887Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:43.932899Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:43.933131Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:43.933142Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:43.933149Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:43.933168Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:43.933178Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:43.933190Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:43.933536Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:43.933862Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:43.933915Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:43.933926Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:43.951852Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:698:2580], serverId# [2:699:2581], sessionId# [0:0:0] 2025-06-30T09:18:43.951913Z node 2 :TX_DATASHARD NOTICE: datashard__op_rows.cpp:168: Rejecting erase request on datashard: tablet# 72075186224037888, error# Can't execute erase at replicated table 2025-06-30T09:18:43.951944Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:698:2580], serverId# [2:699:2581], sessionId# [0:0:0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromRemoteSource [GOOD] Test command err: 2025-06-30T09:18:41.696927Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669374995402396:2166];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:41.697007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003fbc/r3tmp/tmpWZBwvS/pdisk_1.dat 2025-06-30T09:18:41.760783Z node 1 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#26,[::1]:18366) connection closed with error: Connection refused 2025-06-30T09:18:41.760993Z node 1 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:18:41.767800Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:41.771069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:41.771083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:41.771086Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:41.771133Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:41.795462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:41.795503Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:41.796626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:42.698689Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TSchemeShardSysNames::ESchemeOpMkDir-NoProtect-NoDbAdmin-clusteradmin >> BasicUsage::FallbackToSingleDbAfterBadRequest >> KqpPg::InsertNoTargetColumns_NotOneSize+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize-useSink >> DistributedEraseTests::DistributedEraseTxShouldFailOnVariousErrors [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldErase >> BasicUsage::WriteSessionWriteInHandlers [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldErase [GOOD] >> DistributedEraseTests::ConditionalEraseRowsCheckLimits |79.5%| [TA] $(B)/ydb/core/mind/address_classification/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTTLTests::CreateTableShouldSucceedOnIndexedTable >> EraseRowsTests::ConditionalEraseRowsShouldNotErase [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MilliSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds >> KqpPg::MkqlTerminate [GOOD] >> KqpPg::NoSelectFullScan ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionWriteInHandlers [GOOD] Test command err: 2025-06-30T09:18:37.295061Z :WriteSessionWriteInHandlers INFO: Random seed for debugging is 1751275117295051 2025-06-30T09:18:37.445758Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669355682341488:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:37.448051Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:37.461085Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669357372474091:2198];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d29/r3tmp/tmpbJaU5l/pdisk_1.dat 2025-06-30T09:18:37.512318Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:37.512919Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:37.515863Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:37.559776Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:37.570935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:37.570969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:37.572517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2111, node 1 2025-06-30T09:18:37.596877Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002d29/r3tmp/yandexPbLGnP.tmp 2025-06-30T09:18:37.596895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002d29/r3tmp/yandexPbLGnP.tmp 2025-06-30T09:18:37.597001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002d29/r3tmp/yandexPbLGnP.tmp 2025-06-30T09:18:37.597059Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:37.604628Z INFO: TTestServer started on Port 13171 GrpcPort 2111 2025-06-30T09:18:37.612578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:37.612612Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:37.616870Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:37.617763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13171 PQClient connected to localhost:2111 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:37.644143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-30T09:18:37.940630Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669355682342394:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.940696Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.941095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669355682342406:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.942295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:37.944363Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669355682342440:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.944462Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.949353Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669355682342408:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:18:37.995075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:38.026120Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669359977309848:2701] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:38.046566Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669359977309859:2312], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:38.047448Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZGI1ZWRjOWEtYmFiMWQ0NjUtZDE4M2JkN2QtYzIxMmFhNjU=, ActorId: [1:7521669355682342391:2295], ActorState: ExecuteState, TraceId: 01jz0237bhcfaj1a52dfmarpjm, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:38.048032Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:38.051482Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669361667441568:2275], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:38.051607Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=MmIyNDg3ZDctYjdkNWMyOTgtY2E0YTg2MDQtZjFhYTQ3OTU=, ActorId: [2:7521669361667441516:2269], ActorState: ExecuteState, TraceId: 01jz0237ed3r8gaxx077ngfw53, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:38.051873Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:38.085847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:38.187220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:66 ... tion: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:18:44.615429Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-30T09:18:44.615442Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=177, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:18:44.615477Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 Topic 'rt3.dc1--test-topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-06-30T09:18:44.615493Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 177 count 1 last offset 0, current partition end offset: 1 2025-06-30T09:18:44.615498Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-30T09:18:44.615508Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 177 accessed 0 times before, last time 2025-06-30T09:18:44.000000Z 2025-06-30T09:18:44.615511Z node 2 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-30T09:18:44.615526Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-30T09:18:44.615539Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 157 from pos 0 cbcount 1 2025-06-30T09:18:44.615554Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp done, result 1751275124612 queuesize 0 startOffset 0 2025-06-30T09:18:44.615571Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-30T09:18:44.615801Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' size 177 2025-06-30T09:18:44.615816Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-30T09:18:44.615936Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2025-06-30T09:18:44.616559Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-30T09:18:44.616714Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 1 written { } } write_statistics { persisting_time { nanos: 1000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-30T09:18:44.616722Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] OnAck: seqNo=1, txId=? 2025-06-30T09:18:44.616726Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write session: acknoledged message 1 === Inside AcksHandler 2025-06-30T09:18:44.616921Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write 1 messages with Id from 2 to 2 === Inside ReadyToAcceptHandler === AcksHandler has written a message, closing the session 2025-06-30T09:18:44.632098Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write session: try to update token 2025-06-30T09:18:44.632118Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Send 1 message(s) (0 left), first sequence number is 2 2025-06-30T09:18:44.635190Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-30T09:18:44.635321Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-30T09:18:44.635763Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:18:44.635784Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:18:44.635828Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 2 requestId: cookie: 2 2025-06-30T09:18:44.636043Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2025-06-30T09:18:44.636247Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:18:44.636254Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:18:44.636276Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72075186224037892] got client message topic: rt3.dc1--test-topic partition: 0 SourceId: '\0src_id' SeqNo: 2 partNo : 0 messageNo: 3 size 107 offset: -1 2025-06-30T09:18:44.636336Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob processing sourceId '\0src_id' seqNo 2 partNo 0 2025-06-30T09:18:44.636381Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob complete sourceId '\0src_id' seqNo 2 partNo 0 FormedBlobsCount 0 NewHead: Offset 1 PartNo 0 PackedSize 181 count 1 nextOffset 2 batches 1 2025-06-30T09:18:44.636437Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Add new write blob: topic 'rt3.dc1--test-topic' partition 0 compactOffset 1,1 HeadOffset 1 endOffset 1 curOffset 2 d0000000000_00000000000000000001_00000_0000000001_00000? size 169 WTime 1751275124635 2025-06-30T09:18:44.636471Z node 2 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:18:44.636479Z node 2 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 1 partNo 0 count 1 size 169 2025-06-30T09:18:44.639888Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 1 count 1 size 169 actorID [2:7521669383142278536:2362] 2025-06-30T09:18:44.639937Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 114 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:18:44.639950Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:18:44.639962Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0src_id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2025-06-30T09:18:44.640008Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=346, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:18:44.640021Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 3 requestId: cookie: 2 2025-06-30T09:18:44.640092Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 1 partno 0 count 1 parts 0 suffix '63' size 169 2025-06-30T09:18:44.643194Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2025-06-30T09:18:44.643693Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-30T09:18:44.643760Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 2 written { offset: 1 } } write_statistics { persisting_time { nanos: 3000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-30T09:18:44.643766Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] OnAck: seqNo=2, txId=? 2025-06-30T09:18:44.643772Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write session: acknoledged message 2 === Inside AcksHandler === Inside SessionClosedHandler 2025-06-30T09:18:44.643917Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write 1 messages with Id from 3 to 3 === SessionClosedHandler has 'written' a message 2025-06-30T09:18:44.643949Z :INFO: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write session: close. Timeout 0.000000s 2025-06-30T09:18:44.643952Z :INFO: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write session will now close 2025-06-30T09:18:44.643957Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write session: aborting 2025-06-30T09:18:44.644041Z :WARNING: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2025-06-30T09:18:44.644046Z :DEBUG: [/Root] TraceId [] SessionId [src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0] MessageGroupId [src_id] Write session: destroy 2025-06-30T09:18:44.645530Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0 grpc read done: success: 0 data: 2025-06-30T09:18:44.645544Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0 grpc read failed 2025-06-30T09:18:44.645551Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0 grpc closed 2025-06-30T09:18:44.645560Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: src_id|864bb39-77d444ed-6dabd8bc-7e8e6e6c_0 is DEAD 2025-06-30T09:18:44.645840Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:44.646150Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [1:7521669385747114774:2467] destroyed 2025-06-30T09:18:44.646184Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. >> EraseRowsTests::EraseRowsShouldSuccess [GOOD] >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors >> TSchemeShardSysNames::ESchemeOpMkDir-NoProtect-NoDbAdmin-clusteradmin [GOOD] >> TSchemeShardSysNames::ESchemeOpMkDir-NoProtect-NoDbAdmin-system >> TxUsage::WriteToTopic_Demo_24_Query [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint32 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit [GOOD] Test command err: === Server->StartServer(false); 2025-06-30T09:18:32.080226Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669336153491256:2155];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:32.076296Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669334832514011:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:32.076317Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:32.424809Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00402a/r3tmp/tmpGpX33L/pdisk_1.dat 2025-06-30T09:18:32.433311Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:32.434374Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:32.652668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:32.652693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:32.652755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:32.652762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:32.657055Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:32.671528Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:32.671597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:32.673900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11759, node 1 2025-06-30T09:18:33.079101Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:33.082788Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:33.096183Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/00402a/r3tmp/yandexu0fj1E.tmp 2025-06-30T09:18:33.096201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/00402a/r3tmp/yandexu0fj1E.tmp 2025-06-30T09:18:33.096271Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/00402a/r3tmp/yandexu0fj1E.tmp 2025-06-30T09:18:33.096328Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:33.110995Z INFO: TTestServer started on Port 12432 GrpcPort 11759 TClient is connected to server localhost:12432 PQClient connected to localhost:11759 === TenantModeEnabled() = 1 === Init PQ - start server on port 11759 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:33.264967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:18:33.265581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.265650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-30T09:18:33.265655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-30T09:18:33.266308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:18:33.266324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:33.268070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-30T09:18:33.268127Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:18:33.268182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.268192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-30T09:18:33.268195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-30T09:18:33.268199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710657:0 2 -> 3 2025-06-30T09:18:33.268958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.268981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:18:33.268985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710657:0 3 -> 128 2025-06-30T09:18:33.269453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.269463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-30T09:18:33.269466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-30T09:18:33.269486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-30T09:18:33.270500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:33.270981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-30T09:18:33.271050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:18:33.271768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 1751275113315, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-30T09:18:33.271804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1751275113315 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-30T09:18:33.271811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-30T09:18:33.271877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710657:0 128 -> 240 2025-06-30T09:18:33.271883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-30T09:18:33.271918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-30T09:18:33.271928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-30T09:18:33.272427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-30T09:18:33.272436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-30T09:18:33.272486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_bo ... opic' partition 0 part blob processing sourceId '\0001236' seqNo 1 partNo 2 2025-06-30T09:18:44.905009Z :INFO: [] MessageGroupId [1236] SessionId [1236|e5db0e04-f05d526d-e9bf402a-8b8c06d2_0] Write session will now close 2025-06-30T09:18:44.905032Z :DEBUG: [] MessageGroupId [1236] SessionId [1236|e5db0e04-f05d526d-e9bf402a-8b8c06d2_0] Write session: aborting 2025-06-30T09:18:44.905268Z :INFO: [] MessageGroupId [1236] SessionId [1236|e5db0e04-f05d526d-e9bf402a-8b8c06d2_0] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:44.905279Z :DEBUG: [] MessageGroupId [1236] SessionId [1236|e5db0e04-f05d526d-e9bf402a-8b8c06d2_0] Write session: destroy DURATION 2.920050s 2025-06-30T09:18:44.907412Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:96: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob sourceId '\0001236' seqNo 1 partNo 2 result is x0000000000_00000000000000000000_00000_0000000006_00014 size 8225586 2025-06-30T09:18:44.907440Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1117: [PQ: 72075186224037899, Partition: 0, State: StateIdle] writing blob: topic 'PQ/account3/folder1/folder2/topic' partition 0 old key x0000000000_00000000000000000000_00000_0000000006_00014 new key d0000000000_00000000000000000000_00000_0000000006_00014 size 8225586 WTime 1751275124907 2025-06-30T09:18:44.907487Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob complete sourceId '\0001236' seqNo 1 partNo 2 FormedBlobsCount 1 NewHead: Offset 6 PartNo 2 PackedSize 176227 count 1 nextOffset 7 batches 1 2025-06-30T09:18:44.907614Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:401: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Add new write blob: topic 'PQ/account3/folder1/folder2/topic' partition 0 compactOffset 6,1 HeadOffset 0 endOffset 0 curOffset 7 d0000000000_00000000000000000006_00002_0000000001_00000| size 176217 WTime 1751275124907 2025-06-30T09:18:44.907669Z node 2 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:18:44.907679Z node 2 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 0 partNo 0 count 6 size 8225586 2025-06-30T09:18:44.907684Z node 2 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 6 partNo 2 count 1 size 176217 2025-06-30T09:18:44.908496Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 22 sessionId: 1236|e5db0e04-f05d526d-e9bf402a-8b8c06d2_0 grpc read done: success: 0 data: 2025-06-30T09:18:44.908510Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 22 sessionId: 1236|e5db0e04-f05d526d-e9bf402a-8b8c06d2_0 grpc read failed 2025-06-30T09:18:44.908520Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 22 sessionId: 1236|e5db0e04-f05d526d-e9bf402a-8b8c06d2_0 grpc closed 2025-06-30T09:18:44.908525Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 22 sessionId: 1236|e5db0e04-f05d526d-e9bf402a-8b8c06d2_0 is DEAD 2025-06-30T09:18:44.908825Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037899 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:44.909386Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037899] server disconnected, pipe [1:7521669387693101722:2558] destroyed 2025-06-30T09:18:44.909406Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037899, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:18:44.948843Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669387693101737:2564], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:44.949976Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NTNjNGRjZWQtZGY3MzE3M2ItZTRiYWVhMTgtNjJkYTExMWY=, ActorId: [1:7521669387693101735:2563], ActorState: ExecuteState, TraceId: 01jz023e6fchgjwjzeb1p715w9, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:44.950122Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:44.963563Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 0 count 6 size 8225586 actorID [2:7521669373487220513:2323] 2025-06-30T09:18:44.963580Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 6 count 1 size 176217 actorID [2:7521669373487220513:2323] 2025-06-30T09:18:44.963605Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:323: [PQ: 72075186224037899, Partition: 0, State: StateIdle] compaction completed 2025-06-30T09:18:44.963735Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037899, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:18:44.963739Z node 2 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:18:44.963746Z node 2 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000006_00000_0000000001_00002?(+) to d0000000000_00000000000000000006_00000_0000000001_00002?(+) 2025-06-30T09:18:44.963750Z node 2 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000005_00000_0000000001_00002?(+) to d0000000000_00000000000000000005_00000_0000000001_00002?(+) 2025-06-30T09:18:44.963751Z node 2 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000004_00000_0000000001_00002?(+) to d0000000000_00000000000000000004_00000_0000000001_00002?(+) 2025-06-30T09:18:44.963753Z node 2 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000003_00000_0000000001_00002?(+) to d0000000000_00000000000000000003_00000_0000000001_00002?(+) 2025-06-30T09:18:44.963756Z node 2 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000002_00000_0000000001_00002?(+) to d0000000000_00000000000000000002_00000_0000000001_00002?(+) 2025-06-30T09:18:44.963757Z node 2 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000001_00000_0000000001_00002?(+) to d0000000000_00000000000000000001_00000_0000000001_00002?(+) 2025-06-30T09:18:44.963761Z node 2 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000000_00000_0000000001_00002?(+) to d0000000000_00000000000000000000_00000_0000000001_00002?(+) 2025-06-30T09:18:44.964257Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037899' partition 0 offset 0 partno 0 count 6 parts 14 suffix '0' size 8225586 2025-06-30T09:18:44.964268Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037899' partition 0 offset 6 partno 2 count 1 parts 0 suffix '124' size 176217 2025-06-30T09:18:44.965280Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 6 count 1 actorID [2:7521669373487220513:2323] 2025-06-30T09:18:44.965289Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 5 count 1 actorID [2:7521669373487220513:2323] 2025-06-30T09:18:44.965295Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 4 count 1 actorID [2:7521669373487220513:2323] 2025-06-30T09:18:44.965299Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 3 count 1 actorID [2:7521669373487220513:2323] 2025-06-30T09:18:44.965303Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 2 count 1 actorID [2:7521669373487220513:2323] 2025-06-30T09:18:44.965308Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 1 count 1 actorID [2:7521669373487220513:2323] 2025-06-30T09:18:44.965311Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 0 count 1 actorID [2:7521669373487220513:2323] 2025-06-30T09:18:44.965345Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037899, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:18:44.965357Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037899, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:18:44.965557Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 6 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-30T09:18:44.965585Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 5 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-30T09:18:44.965591Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 4 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-30T09:18:44.965596Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 3 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-30T09:18:44.965603Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 2 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-30T09:18:44.965608Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 1 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-30T09:18:44.965613Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-30T09:18:44.978893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-30T09:18:44.978978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 8 shard idx 72057594046644480:1 data size 0 row count 0 2025-06-30T09:18:44.979018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037888 maps to shardIdx: 72057594046644480:1 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 8], pathId map=user, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-30T09:18:44.979068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037888 2025-06-30T09:18:44.979308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-30T09:18:45.011229Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037889, Partition: 0, State: StateIdle] no data for compaction >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMicroSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDate32 >> TSchemeShardTTLTestsWithReboots::MoveTable >> TSchemeShardTTLTests::CreateTableShouldSucceedOnIndexedTable [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter+useSink >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Milliseconds >> DistributedEraseTests::ConditionalEraseRowsShouldEraseOnUint32 [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSchemeTx >> TSchemeShardTTLTests::ShouldSkipDroppedColumn >> THiveTest::TestHiveBalancerHighUsage [GOOD] >> THiveTest::TestHiveBalancerHighUsageAndColumnShards ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds [GOOD] Test command err: 2025-06-30T09:18:41.804281Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:41.804398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:41.804420Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004916/r3tmp/tmpUqrImX/pdisk_1.dat 2025-06-30T09:18:41.927290Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:41.928173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:41.947037Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:41.948247Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275121202933 != 1751275121202937 2025-06-30T09:18:41.991080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:41.991134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:42.001977Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:42.085257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:42.107483Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:18:42.107609Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:42.118982Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:42.119032Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:42.119287Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:42.119300Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:42.119310Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:42.119384Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:42.119413Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:42.119430Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:18:42.131032Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:42.141757Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:42.141858Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:42.141894Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:18:42.141901Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:42.141907Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:42.141913Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:42.142109Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:42.142136Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:42.142150Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:42.142158Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:42.142168Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:42.142174Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:42.142198Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:18:42.142324Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:42.142384Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:42.142405Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:42.146204Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:42.156875Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:42.156923Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:42.307825Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:18:42.308957Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:18:42.308985Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:42.309260Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:42.309271Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:42.309284Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:42.309342Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:42.309381Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:42.309531Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:42.309549Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:42.309931Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:42.310035Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:42.310368Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:42.310378Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:42.310518Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:42.310528Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:42.310994Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:42.311015Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:42.311023Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:42.311044Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:42.311056Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:42.311070Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:42.314647Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:42.315229Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:42.315253Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:42.315485Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:42.322087Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:42.322135Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: ... 24037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:45.609496Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:45.609590Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:45.609633Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:45.609658Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.609674Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:45.609822Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:45.609930Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.610427Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:45.610440Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.610786Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:45.610803Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.611026Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.611037Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.611046Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:45.611939Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:45.611969Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:45.611993Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.612453Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.612753Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:45.612766Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:45.612911Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:45.617100Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:45.617129Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:706:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:45.617142Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:45.618441Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:45.620013Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.661414Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:45.774998Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.775663Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:710:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:45.807924Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:780:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:45.826103Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz023evg86tfw0tbykzaqwfj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZGVmNjI4MjctYWExYzJmYzItZmU5Yzk5OGQtNGRjMjMxMmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:18:45.826986Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:811:2642], serverId# [3:812:2643], sessionId# [0:0:0] 2025-06-30T09:18:45.827149Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:2] at 72075186224037888 2025-06-30T09:18:45.827207Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:2] at 72075186224037888, row count=4 2025-06-30T09:18:45.837679Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.841310Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:819:2649], serverId# [3:820:2650], sessionId# [0:0:0] 2025-06-30T09:18:45.841639Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:45.852048Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:45.852071Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.852140Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:45.852149Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-30T09:18:45.852225Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.852233Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.852242Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:45.852252Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.852266Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:819:2649], serverId# [3:820:2650], sessionId# [0:0:0] 2025-06-30T09:18:45.852496Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:45.852567Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:45.852601Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.852606Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:45.852617Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-30T09:18:45.852658Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:45.852665Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.852783Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-30T09:18:45.852848Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 37, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-30T09:18:45.852874Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-30T09:18:45.852878Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-30T09:18:45.852953Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:45.852958Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-30T09:18:45.852976Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.852980Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:45.852984Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-30T09:18:45.853009Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.853015Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.853021Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceedOnIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:45.789453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:45.789484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:45.789490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:45.789496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:45.789511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:45.789517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:45.789529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:45.789546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:45.789659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:45.789743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:45.807571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:45.807602Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:45.811972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:45.812048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:45.812085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:45.817447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:45.817556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:45.817725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:45.817826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:45.818777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:45.818858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:45.819209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:45.819222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:45.819257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:45.819266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:45.819274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:45.819299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:45.822014Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:45.846548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:45.846671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:45.847208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:45.847228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:45.847296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:45.847317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:45.851302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:45.851365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:45.851456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:45.851470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:45.851477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:45.851484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:45.852982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:45.853001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:45.853011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:45.853557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:45.853569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:45.853577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:45.853586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:45.854456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:45.855026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:45.855075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:45.855320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:45.855352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:45.855361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:45.855436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:45.855447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:45.855488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:45.855502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:45.855999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:45.856009Z node 1 :FLAT_TX_SCHEMESHARD ... 0743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 101:2 129 -> 240 2025-06-30T09:18:46.000980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 330 RawX2: 4294969607 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-30T09:18:46.000986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409547, partId: 0 2025-06-30T09:18:46.000999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 330 RawX2: 4294969607 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-30T09:18:46.001005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-30T09:18:46.001015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 330 RawX2: 4294969607 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-30T09:18:46.001022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:46.001027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.001032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-30T09:18:46.001037Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 101:0 129 -> 240 2025-06-30T09:18:46.006308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:18:46.006350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:18:46.007334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:18:46.007370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:18:46.007391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-30T09:18:46.007418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.007438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-30T09:18:46.007593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-30T09:18:46.007607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:2 ProgressState 2025-06-30T09:18:46.007629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:2 progress is 2/3 2025-06-30T09:18:46.007636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-30T09:18:46.007643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:2 progress is 2/3 2025-06-30T09:18:46.007647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-30T09:18:46.007654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-06-30T09:18:46.007743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.007794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.007800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-30T09:18:46.007811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 3/3 2025-06-30T09:18:46.007816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-30T09:18:46.007823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 3/3 2025-06-30T09:18:46.007827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-30T09:18:46.007833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-06-30T09:18:46.007860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:384:2350] message: TxId: 101 2025-06-30T09:18:46.007868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-30T09:18:46.007879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-30T09:18:46.007885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 101:0 2025-06-30T09:18:46.007919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:18:46.007927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:1 2025-06-30T09:18:46.007931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 101:1 2025-06-30T09:18:46.007938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:18:46.007943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:2 2025-06-30T09:18:46.007947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 101:2 2025-06-30T09:18:46.007957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:18:46.008686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-30T09:18:46.008702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:385:2351] TestWaitNotification: OK eventTxId 101 2025-06-30T09:18:46.008885Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:46.008967Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 109us result status StatusSuccess 2025-06-30T09:18:46.009159Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldErase [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks >> TxUsage::Sinks_Oltp_WriteToTopics_2_Query [GOOD] >> KqpPg::TableArrayInsert-useSink [GOOD] >> KqpPg::Returning+useSink |79.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats >> KqpPg::NoSelectFullScan [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds [GOOD] >> KqpPg::LongDomainName >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors [GOOD] >> TSchemeShardSysNames::ESchemeOpMkDir-NoProtect-NoDbAdmin-system [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-NoDbAdmin-anonymous >> KqpPg::InsertNoTargetColumns_Alter+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter-useSink >> TxUsage::Sinks_Oltp_WriteToTopics_3_Table >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds [GOOD] |79.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query [GOOD] >> TSchemeShardTTLTests::ShouldCheckQuotas |79.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |79.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDate32 [GOOD] >> KqpPg::V1CreateTable [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 >> BasicUsage::ReadMirrored >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Milliseconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongColumnType >> KqpPg::LongDomainName [GOOD] >> TSchemeShardTTLTestsWithReboots::AlterTable >> TSchemeShardColumnTableTTL::AlterColumnTable_Negative >> TSchemeShardTTLTests::ShouldCheckQuotas [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSchemeTx [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnDeadShard >> KqpPg::Returning+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter-useSink [GOOD] >> KqpPg::ValuesInsert+useSink |79.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit [GOOD] Test command err: 2025-06-30T09:18:42.085123Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:42.085212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:42.085230Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048f7/r3tmp/tmpnNhYqF/pdisk_1.dat 2025-06-30T09:18:42.199386Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:42.200301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:42.219504Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:42.220809Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275121570522 != 1751275121570526 2025-06-30T09:18:42.265629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:42.265675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:42.276269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:42.352977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:42.374241Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:652:2550] 2025-06-30T09:18:42.374347Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:42.384744Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:42.384824Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:42.385026Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:42.385037Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:42.385044Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:42.385112Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:42.385253Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:654:2552] 2025-06-30T09:18:42.385293Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:42.387015Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:42.387043Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:691:2550] in generation 1 2025-06-30T09:18:42.387221Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:42.387246Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:42.387401Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-30T09:18:42.387410Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-30T09:18:42.387417Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-30T09:18:42.387463Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:42.387503Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:42.387512Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:695:2552] in generation 1 2025-06-30T09:18:42.387851Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:660:2554] 2025-06-30T09:18:42.387894Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:42.389413Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:42.389445Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:42.389581Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-30T09:18:42.389589Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-30T09:18:42.389597Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-30T09:18:42.389640Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:42.389661Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:42.389673Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:703:2554] in generation 1 2025-06-30T09:18:42.400559Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:42.405681Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:42.405782Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:42.405845Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:707:2581] 2025-06-30T09:18:42.405850Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:42.405856Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:42.405863Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:42.406019Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:42.406028Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-30T09:18:42.406040Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:42.406049Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:708:2582] 2025-06-30T09:18:42.406053Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:18:42.406056Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-30T09:18:42.406060Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:18:42.406165Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:42.406191Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:42.406206Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:42.406211Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-30T09:18:42.406221Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:42.406228Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:709:2583] 2025-06-30T09:18:42.406232Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-30T09:18:42.406235Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-30T09:18:42.406238Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:18:42.406282Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:42.406289Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:42.406299Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:42.406304Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:42.406311Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-30T09:18:42.406321Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-30T09:18:42.406336Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:641:2545], serverId# [1:665:2556], sessionId# [0:0:0] 2025-06-30T09:18:42.406348Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:18:42.406352Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:42.406356Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-30T09:18:42.406360Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:18:42.406503Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:42.406561Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:42.406580Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:42.406688Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server ... node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 7, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-30T09:18:46.609583Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 7, finished edge# 0, front# 0 2025-06-30T09:18:46.609741Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 8, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-30T09:18:46.609748Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 8, finished edge# 0, front# 0 2025-06-30T09:18:46.609918Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-30T09:18:46.609927Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 1001, finished edge# 0, front# 0 2025-06-30T09:18:46.610029Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:256: 72075186224037889 snapshot complete for split OpId 281474976715663 2025-06-30T09:18:46.610076Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 3 snapshot size is 12 total snapshot size is 12 for split OpId 281474976715663 2025-06-30T09:18:46.610086Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 4 snapshot size is 12 total snapshot size is 24 for split OpId 281474976715663 2025-06-30T09:18:46.610091Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 7 snapshot size is 12 total snapshot size is 36 for split OpId 281474976715663 2025-06-30T09:18:46.610097Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 8 snapshot size is 12 total snapshot size is 48 for split OpId 281474976715663 2025-06-30T09:18:46.610133Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 1001 snapshot size is 146 total snapshot size is 194 for split OpId 281474976715663 2025-06-30T09:18:46.610184Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 3 snapshot size is 12 total snapshot size is 206 for split OpId 281474976715663 2025-06-30T09:18:46.610190Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 4 snapshot size is 12 total snapshot size is 218 for split OpId 281474976715663 2025-06-30T09:18:46.610196Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 7 snapshot size is 12 total snapshot size is 230 for split OpId 281474976715663 2025-06-30T09:18:46.610201Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 8 snapshot size is 12 total snapshot size is 242 for split OpId 281474976715663 2025-06-30T09:18:46.610221Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 1001 snapshot size is 155 total snapshot size is 397 for split OpId 281474976715663 2025-06-30T09:18:46.610348Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:424: 72075186224037889 Sending snapshots from src for split OpId 281474976715663 2025-06-30T09:18:46.610381Z node 3 :TX_DATASHARD DEBUG: datashard_impl.h:2342: Sending snapshot for split opId 281474976715663 from datashard 72075186224037889 to datashard 72075186224037892 size 221 2025-06-30T09:18:46.610400Z node 3 :TX_DATASHARD DEBUG: datashard_impl.h:2342: Sending snapshot for split opId 281474976715663 from datashard 72075186224037889 to datashard 72075186224037891 size 215 2025-06-30T09:18:46.610477Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [3:1156:2866], serverId# [3:1157:2867], sessionId# [0:0:0] 2025-06-30T09:18:46.610487Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037892, clientId# [3:1155:2865], serverId# [3:1158:2868], sessionId# [0:0:0] 2025-06-30T09:18:46.610520Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:175: 72075186224037891 Received snapshot for split/merge TxId 281474976715663 from tabeltId 72075186224037889 2025-06-30T09:18:46.610646Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:175: 72075186224037892 Received snapshot for split/merge TxId 281474976715663 from tabeltId 72075186224037889 2025-06-30T09:18:46.611245Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037891 ack snapshot OpId 281474976715663 2025-06-30T09:18:46.611303Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037891 2025-06-30T09:18:46.611328Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037891 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:46.611347Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-30T09:18:46.611377Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [3:1161:2871] 2025-06-30T09:18:46.611382Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-06-30T09:18:46.611390Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037891 2025-06-30T09:18:46.611396Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-30T09:18:46.611496Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037891 for split OpId 281474976715663 2025-06-30T09:18:46.611603Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2025-06-30T09:18:46.611612Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:46.611622Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-30T09:18:46.611628Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-06-30T09:18:46.611664Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037891 time 2000 2025-06-30T09:18:46.611671Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-30T09:18:46.611746Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [3:1156:2866], serverId# [3:1157:2867], sessionId# [0:0:0] 2025-06-30T09:18:46.611779Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037892 ack snapshot OpId 281474976715663 2025-06-30T09:18:46.611798Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037892 2025-06-30T09:18:46.611812Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037892 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:46.611823Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-30T09:18:46.611830Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037892, actorId: [3:1163:2873] 2025-06-30T09:18:46.611834Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037892 2025-06-30T09:18:46.611839Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037892 2025-06-30T09:18:46.611843Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-30T09:18:46.611871Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037892 for split OpId 281474976715663 2025-06-30T09:18:46.612017Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 2000 2025-06-30T09:18:46.612024Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-30T09:18:46.612045Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [3:1155:2865], serverId# [3:1158:2868], sessionId# [0:0:0] 2025-06-30T09:18:46.612068Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037891 coordinator 72057594046316545 last step 1500 next step 2000 2025-06-30T09:18:46.612076Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037891: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-30T09:18:46.612088Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-30T09:18:46.612092Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:46.612098Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2025-06-30T09:18:46.612103Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-30T09:18:46.612149Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 1500 next step 2000 2025-06-30T09:18:46.612155Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037892: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-30T09:18:46.632818Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037889 ack split to schemeshard 281474976715663 2025-06-30T09:18:46.633764Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715663, at datashard: 72075186224037889, state: SplitSrcWaitForPartitioningChanged 2025-06-30T09:18:46.634236Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-06-30T09:18:46.634251Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037889 2025-06-30T09:18:46.634496Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:18:46.634509Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:21: Progress tx at non-ready tablet 72075186224037889 state 5 2025-06-30T09:18:46.634569Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [3:1047:2786], serverId# [3:1048:2787], sessionId# [0:0:0] 2025-06-30T09:18:46.634618Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037889 ack split partitioning changed to schemeshard 281474976715663 2025-06-30T09:18:46.634631Z node 3 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-30T09:18:46.634640Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/unittest >> TxUsage::WriteToTopic_Demo_24_Query [GOOD] Test command err: 2025-06-30T09:16:44.987465Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668870984389973:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:44.987629Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cb0/r3tmp/tmpboLhEi/pdisk_1.dat 2025-06-30T09:16:45.014890Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:16:45.032253Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:45.032600Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521668870984389949:2080] 1751275004987292 != 1751275004987295 TServer::EnableGrpc on GrpcPort 18284, node 1 2025-06-30T09:16:45.049695Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002cb0/r3tmp/yandexi1kFX3.tmp 2025-06-30T09:16:45.049714Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002cb0/r3tmp/yandexi1kFX3.tmp 2025-06-30T09:16:45.049806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002cb0/r3tmp/yandexi1kFX3.tmp 2025-06-30T09:16:45.049875Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:45.054395Z INFO: TTestServer started on Port 28922 GrpcPort 18284 TClient is connected to server localhost:28922 PQClient connected to localhost:18284 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:45.089410Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:45.089449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:45.090598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:16:45.117057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:45.122176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-30T09:16:45.133231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:16:45.202865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:16:45.361324Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668875279358012:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:45.361395Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:45.361529Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668875279358033:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:45.362489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:45.362804Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668875279358064:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:45.362825Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:45.365288Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521668875279358035:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:16:45.407026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:45.416844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:45.426627Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668875279358217:2508] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:16:45.441085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:45.447587Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521668875279358236:2321], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:16:45.448228Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZTI1Y2VjYWQtM2Y0ZTE1YTQtNGQ0YjJlZmYtOTBkODZkZjk=, ActorId: [1:7521668875279358008:2294], ActorState: ExecuteState, TraceId: 01jz01zsde9t3ekjgrw0qnhpbj, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:16:45.448733Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521668875279358382:2611] 2025-06-30T09:16:45.990669Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:49.987948Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668870984389973:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:49.987984Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:16:50.698141Z :WriteToTopic_Demo_22_RestartNo_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:16:50.701358Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:16:50.706709Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521668896754195073:2697] connected; active server actors: 1 2025-06-30T09:16:50.706933Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:16:50.707071Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:16 ... -b82f7b72] [] Returning serverBytesSize = 337 to budget 2025-06-30T09:18:43.938936Z :DEBUG: [/Root] [/Root] [f5c6b68f-ee2f42df-70d8cf8f-b82f7b72] [] In ContinueReadingDataImpl, ReadSizeBudget = 337, ReadSizeServerDelta = 52428463 2025-06-30T09:18:43.939059Z :DEBUG: [/Root] [/Root] [f5c6b68f-ee2f42df-70d8cf8f-b82f7b72] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-30T09:18:43.939108Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-30T09:18:43.939120Z :DEBUG: [/Root] [/Root] [f5c6b68f-ee2f42df-70d8cf8f-b82f7b72] [] The application data is transferred to the client. Number of messages 1, size 144 bytes 2025-06-30T09:18:43.939128Z :DEBUG: [/Root] [/Root] [f5c6b68f-ee2f42df-70d8cf8f-b82f7b72] [] Returning serverBytesSize = 0 to budget 0 1 2025-06-30T09:18:43.939160Z :DEBUG: [/Root] [/Root] [f5c6b68f-ee2f42df-70d8cf8f-b82f7b72] [] Commit offsets [0, 1). Partition stream id: 1 2025-06-30T09:18:43.939365Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 grpc read done: success# 1, data# { read_request { bytes_size: 337 } } 2025-06-30T09:18:43.939403Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 got read request: guid# e23069a8-c0d38264-49e6d452-ac083ef6 2025-06-30T09:18:43.939484Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 grpc read done: success# 1, data# { commit_offset_request { commit_offsets { partition_session_id: 1 offsets { end: 1 } } } } 2025-06-30T09:18:43.939535Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) committing to position 1 prev 0 end 1 by cookie 2 2025-06-30T09:18:43.939651Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-30T09:18:43.939660Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037894] got client message batch for topic 'topic_A' partition 0 2025-06-30T09:18:43.939701Z node 11 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 1 (startOffset 0) session test-consumer_11_1_11424405637178450922_v1 2025-06-30T09:18:43.939735Z node 11 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:18:43.946831Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 1 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:18:43.946871Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:18:43.946893Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=293, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:18:43.946946Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-30T09:18:43.947000Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-30T09:18:43.947008Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 1 endOffset 1 with cookie 2 2025-06-30T09:18:43.947027Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 1 2025-06-30T09:18:43.950872Z :DEBUG: [/Root] [/Root] [f5c6b68f-ee2f42df-70d8cf8f-b82f7b72] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 1 } } 2025-06-30T09:18:44.872793Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:18:44.873874Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 4 sessionId: test-message_group_id|40ef5b54-5caaa45-83db57cc-13bc3b0f_0 describe result for acl check 2025-06-30T09:18:44.907397Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:0:1 2025-06-30T09:18:44.907426Z :INFO: [/Root] [/Root] [f5c6b68f-ee2f42df-70d8cf8f-b82f7b72] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1001 BytesRead: 144 MessagesRead: 1 BytesReadCompressed: 144 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:44.919822Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 checking auth because of timeout 2025-06-30T09:18:44.919859Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 auth for : test-consumer 2025-06-30T09:18:44.920115Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 Handle describe topics response 2025-06-30T09:18:44.920131Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 auth is DEAD 2025-06-30T09:18:44.920149Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:18:45.978631Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037894] server connected, pipe [11:7521669391830283710:2947], now have 1 active actors on pipe 2025-06-30T09:18:45.979557Z :INFO: [/Root] [/Root] [f5c6b68f-ee2f42df-70d8cf8f-b82f7b72] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:45.979576Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:0:1 2025-06-30T09:18:45.979587Z :INFO: [/Root] [/Root] [f5c6b68f-ee2f42df-70d8cf8f-b82f7b72] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2073 BytesRead: 144 MessagesRead: 1 BytesReadCompressed: 144 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:45.979610Z :NOTICE: [/Root] [/Root] [f5c6b68f-ee2f42df-70d8cf8f-b82f7b72] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:18:45.979620Z :DEBUG: [/Root] [/Root] [f5c6b68f-ee2f42df-70d8cf8f-b82f7b72] [] Abort session to cluster 2025-06-30T09:18:45.979815Z :NOTICE: [/Root] [/Root] [f5c6b68f-ee2f42df-70d8cf8f-b82f7b72] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:45.980175Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|40ef5b54-5caaa45-83db57cc-13bc3b0f_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:18:45.980185Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|40ef5b54-5caaa45-83db57cc-13bc3b0f_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:18:45.980192Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|40ef5b54-5caaa45-83db57cc-13bc3b0f_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:18:45.980311Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|40ef5b54-5caaa45-83db57cc-13bc3b0f_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:45.980316Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|40ef5b54-5caaa45-83db57cc-13bc3b0f_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:18:45.983297Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 grpc read done: success# 0, data# { } 2025-06-30T09:18:45.983314Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 grpc read failed 2025-06-30T09:18:45.983321Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 grpc closed 2025-06-30T09:18:45.983332Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_11_1_11424405637178450922_v1 is DEAD 2025-06-30T09:18:45.983543Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message_group_id|40ef5b54-5caaa45-83db57cc-13bc3b0f_0 grpc read done: success: 0 data: 2025-06-30T09:18:45.983545Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message_group_id|40ef5b54-5caaa45-83db57cc-13bc3b0f_0 grpc read failed 2025-06-30T09:18:45.983551Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message_group_id|40ef5b54-5caaa45-83db57cc-13bc3b0f_0 grpc closed 2025-06-30T09:18:45.983553Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message_group_id|40ef5b54-5caaa45-83db57cc-13bc3b0f_0 is DEAD 2025-06-30T09:18:45.983665Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:45.983674Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:45.983735Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [11:7521669383240349008:2493] disconnected; active server actors: 1 2025-06-30T09:18:45.983738Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [11:7521669383240349008:2493] client test-consumer disconnected session test-consumer_11_1_11424405637178450922_v1 2025-06-30T09:18:45.983769Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669383240348946:2482] destroyed 2025-06-30T09:18:45.983776Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669383240348949:2482] destroyed 2025-06-30T09:18:45.983779Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_11_1_11424405637178450922_v1 2025-06-30T09:18:45.983784Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669383240349011:2496] destroyed 2025-06-30T09:18:45.983794Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:18:45.983823Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_1_11424405637178450922_v1 |79.5%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |79.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge >> KqpPg::Returning-useSink >> KqpPg::InsertNoTargetColumns_Serial+useSink >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongColumnType [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-NoDbAdmin-anonymous [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-NoDbAdmin-ordinaryuser |79.6%| [TA] {RESULT} $(B)/ydb/core/mind/address_classification/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpPg::InsertNoTargetColumns_Serial+useSink [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 [GOOD] >> TSchemeShardTTLTests::BuildAsyncIndexShouldSucceed >> KqpPg::TableSelect+useSink [GOOD] >> KqpPg::TableSelect-useSink |79.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors [GOOD] Test command err: 2025-06-30T09:18:44.742561Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:44.742678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:44.742698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048db/r3tmp/tmpUM6jZ7/pdisk_1.dat 2025-06-30T09:18:44.891025Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:44.892057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:44.921053Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:44.922492Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275124088905 != 1751275124088909 2025-06-30T09:18:44.968134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:44.968178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:44.979317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:45.059425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:45.081725Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:18:45.081830Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:45.094188Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:45.094240Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:45.094460Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:45.094470Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:45.094479Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:45.094549Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:45.094572Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:45.094587Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:18:45.107234Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:45.113065Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:45.113169Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:45.113207Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:18:45.113213Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.113218Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:45.113224Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.113420Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:45.113449Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:45.113462Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.113470Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.113480Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:45.113486Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.113509Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:18:45.113649Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:45.113710Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:45.113735Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:45.114155Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.127085Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:45.127139Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:45.289780Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:18:45.290967Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:18:45.290998Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.291300Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.291312Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:45.291325Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:45.291396Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:45.291431Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:45.291591Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.291607Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:45.292098Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:45.292213Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.292606Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:45.292615Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.292773Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:45.292787Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.294015Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.294049Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.294058Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:45.294079Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:45.294091Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:45.294105Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.295125Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.295586Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:45.295603Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:45.295793Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:45.300479Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:45.300514Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: ... shard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:46.603655Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:46.603661Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.603813Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:46.603847Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:46.603862Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:46.603871Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:46.603881Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:46.603887Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:46.603913Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:625:2530], serverId# [2:636:2537], sessionId# [0:0:0] 2025-06-30T09:18:46.604049Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:46.604118Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:46.604142Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:46.604565Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:46.615000Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:46.615058Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:46.763811Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:662:2552], serverId# [2:664:2554], sessionId# [0:0:0] 2025-06-30T09:18:46.764031Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:18:46.764045Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.764082Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:46.764090Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:46.764103Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:46.764200Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:46.764240Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:46.764282Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:46.764294Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:46.764420Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:46.764542Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:46.765095Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:46.765111Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.765338Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:46.765352Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:46.765585Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:46.765596Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:46.765603Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:46.765622Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:46.765633Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:46.765644Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.766022Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:46.766352Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:46.766399Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:46.766407Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:46.771982Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:698:2580], serverId# [2:699:2581], sessionId# [0:0:0] 2025-06-30T09:18:46.772050Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:46.794721Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:46.794787Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.794888Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:698:2580], serverId# [2:699:2581], sessionId# [0:0:0] 2025-06-30T09:18:46.797287Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:704:2586], serverId# [2:705:2587], sessionId# [0:0:0] 2025-06-30T09:18:46.797352Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:46.797420Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:46.797428Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.797464Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:704:2586], serverId# [2:705:2587], sessionId# [0:0:0] 2025-06-30T09:18:46.816286Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:709:2591], serverId# [2:710:2592], sessionId# [0:0:0] 2025-06-30T09:18:46.816360Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:46.816427Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:46.816437Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.816479Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:709:2591], serverId# [2:710:2592], sessionId# [0:0:0] 2025-06-30T09:18:46.827128Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:714:2596], serverId# [2:715:2597], sessionId# [0:0:0] 2025-06-30T09:18:46.827215Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:46.827292Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:46.827302Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.827351Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:714:2596], serverId# [2:715:2597], sessionId# [0:0:0] 2025-06-30T09:18:46.830130Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:719:2601], serverId# [2:720:2602], sessionId# [0:0:0] 2025-06-30T09:18:46.830188Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:46.830265Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:46.830274Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.830313Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:719:2601], serverId# [2:720:2602], sessionId# [0:0:0] 2025-06-30T09:18:46.856001Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:724:2606], serverId# [2:725:2607], sessionId# [0:0:0] 2025-06-30T09:18:46.856144Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:46.856245Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:46.856256Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.856306Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:724:2606], serverId# [2:725:2607], sessionId# [0:0:0] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-NoDbAdmin-ordinaryuser [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-DbAdmin-ordinaryuser ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::LongDomainName [GOOD] Test command err: Trying to start YDB, gRPC: 11883, MsgBus: 1700 2025-06-30T09:18:24.137536Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669300936155121:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:24.137569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003358/r3tmp/tmpqGgZDV/pdisk_1.dat 2025-06-30T09:18:24.263046Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:24.267849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:24.267875Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:24.269305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11883, node 1 2025-06-30T09:18:24.294908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:24.294925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:24.294927Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:24.294983Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1700 TClient is connected to server localhost:1700 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:24.360946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:24.581606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669300936155713:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:24.581615Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669300936155702:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:24.581637Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:24.582674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:24.584892Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669300936155716:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:24.640659Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669300936155767:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 19643, MsgBus: 65474 2025-06-30T09:18:24.839926Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669302009012093:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:24.839946Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003358/r3tmp/tmp6bCmAt/pdisk_1.dat 2025-06-30T09:18:24.858007Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19643, node 2 2025-06-30T09:18:24.877670Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:24.877685Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:24.877688Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:24.877755Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65474 TClient is connected to server localhost:65474 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:24.944870Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:24.944906Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:24.945326Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:24.945827Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:25.183059Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669306303979970:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:25.183059Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669306303979981:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:25.183083Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:25.183953Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:25.185639Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669306303979984:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:25.240091Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669306303980035:2326] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 9392, MsgBus: 27405 2025-06-30T09:18:25.725462Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521669302432965529:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:25.726807Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003358/r3tmp/tmpXZ4IV9/pdisk_1.dat 2025-06-30T09:18:25.753400Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9392, node 3 2025-06-30T09:18:25.774973Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:25.774989Z node 3 :NE ... r: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:46.343732Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521669394674249800:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:46.343747Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:46.344770Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:46.348011Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7521669394674249834:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:46.425531Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7521669394674249885:2326] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:46.435492Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["pgbench_accounts"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["aid (null, 3)","aid [7, 7]"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/pgbench_accounts","E-Rows":"0","Table":"pgbench_accounts","ReadRangesKeys":["aid"],"ReadColumns":["abalance"],"E-Cost":"0","ReadRangesExpectedSize":2}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":2}],"SortBy":"input.abalance","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/pgbench_accounts","reads":[{"columns":["abalance"],"scan_by":["aid (null, 3)","aid [7, 7]"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["aid (null, 3)","aid [7, 7]"],"Name":"TableRangeScan","Path":"\/Root\/pgbench_accounts","E-Rows":"0","Table":"pgbench_accounts","ReadRangesKeys":["aid"],"ReadColumns":["abalance"],"E-Cost":"0","ReadRangesExpectedSize":2}],"Node Type":"TableRangeScan"}],"Operators":[{"SortBy":"input.abalance","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["pgbench_accounts"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","ReadRange":["aid (4, 3)"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/pgbench_accounts","E-Rows":"1","Table":"pgbench_accounts","ReadColumns":["abalance"],"E-Cost":"0"}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/pgbench_accounts","reads":[{"columns":["abalance"],"scan_by":["aid (4, 3)"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Operators":[{"Scan":"Parallel","ReadRange":["aid (4, 3)"],"E-Size":"0","Name":"TableRangeScan","Path":"\/Root\/pgbench_accounts","E-Rows":"1","Table":"pgbench_accounts","ReadColumns":["abalance"],"E-Cost":"0"}],"Node Type":"TableRangeScan"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 19430, MsgBus: 14944 2025-06-30T09:18:47.069091Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7521669401019879984:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:47.069196Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003358/r3tmp/tmpanAj5S/pdisk_1.dat 2025-06-30T09:18:47.101216Z node 11 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19430, node 11 2025-06-30T09:18:47.104376Z node 11 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 11 Type# 268639257 2025-06-30T09:18:47.124845Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:47.124861Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:47.124864Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:47.124931Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14944 2025-06-30T09:18:47.169217Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:47.169247Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:47.170492Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14944 WaitRootIsUp 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'... TClient::Ls request: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_D... (TRUNCATED) WaitRootIsUp 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' success. 2025-06-30T09:18:47.235156Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:47.237354Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:47.532848Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7521669401019880388:2292], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:47.532850Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7521669401019880381:2289], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:47.532866Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:47.533573Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:47.535416Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7521669401019880410:2293], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:47.619031Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7521669401019880461:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:47.624127Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds [GOOD] Test command err: 2025-06-30T09:18:44.634880Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:44.634962Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:44.634975Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048d4/r3tmp/tmpSw1YHF/pdisk_1.dat 2025-06-30T09:18:44.744995Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:44.745989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:44.765670Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:44.769397Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275124034254 != 1751275124034258 2025-06-30T09:18:44.818873Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:44.818937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:44.833833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:44.912552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:44.936865Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:18:44.936971Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:44.948837Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:44.948880Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:44.949091Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:44.949101Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:44.949108Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:44.949169Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:44.949191Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:44.949207Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:18:44.962565Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:44.968083Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:44.968177Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:44.968214Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:18:44.968221Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:44.968228Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:44.968234Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:44.968406Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:44.968430Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:44.968443Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:44.968450Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:44.968461Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:44.968468Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:44.968490Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:18:44.968612Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:44.968671Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:44.968693Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:44.969058Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:44.983043Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:44.983085Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:45.151564Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:18:45.152671Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:18:45.152695Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.152965Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.152977Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:45.152990Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:45.153057Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:45.153099Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:45.153248Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.153265Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:45.153723Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:45.153840Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.154331Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:45.154347Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.154494Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:45.154511Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.154893Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.154911Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.154918Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:45.154939Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:45.154951Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:45.154964Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.155852Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.156298Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:45.156311Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:45.156496Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:45.161709Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:45.161742Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: ... 24037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:46.670145Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:46.670259Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:46.670302Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:46.670353Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:46.670370Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:46.670514Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:46.670624Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:46.671260Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:46.671280Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.671487Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:46.671500Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:46.671693Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:46.671703Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:46.671708Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:46.671727Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:46.671737Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:46.671748Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.672292Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:46.672558Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:46.672593Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:46.672599Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:46.676310Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:46.676341Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:706:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:46.676354Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:46.677512Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:46.679003Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:46.726038Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:46.840726Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:46.841374Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:710:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:46.880146Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:780:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:46.908102Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz023fwk9naj9wfm903hk5dj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmMzM2FmZjgtYTA2MTVjMWQtYTE5MzJmMzUtMjlmNjY2NjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:18:46.908971Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:811:2642], serverId# [2:812:2643], sessionId# [0:0:0] 2025-06-30T09:18:46.909198Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:2] at 72075186224037888 2025-06-30T09:18:46.909268Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:2] at 72075186224037888, row count=4 2025-06-30T09:18:46.919771Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.923924Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:819:2649], serverId# [2:820:2650], sessionId# [0:0:0] 2025-06-30T09:18:46.924315Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:46.935947Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:46.935983Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.936071Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:46.936083Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-30T09:18:46.936179Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:46.936190Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:46.936204Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:46.936218Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:46.936237Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:819:2649], serverId# [2:820:2650], sessionId# [0:0:0] 2025-06-30T09:18:46.936551Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:46.936673Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:46.936715Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:46.936721Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:46.936731Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-30T09:18:46.936788Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:46.936797Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:46.936956Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-30T09:18:46.937029Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-30T09:18:46.937068Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-30T09:18:46.937075Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-30T09:18:46.937113Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:46.937118Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-30T09:18:46.937186Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:46.937191Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:46.937197Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-30T09:18:46.937236Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:46.937243Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:46.937252Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 [GOOD] Test command err: 2025-06-30T09:18:44.846858Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:44.846970Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:44.846992Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048d2/r3tmp/tmpD5soGX/pdisk_1.dat 2025-06-30T09:18:45.018580Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:45.019651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:45.044524Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:45.045795Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275124263696 != 1751275124263700 2025-06-30T09:18:45.095050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:45.095090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:45.107293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:45.196298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:45.223039Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:18:45.223143Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:45.244303Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:45.244359Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:45.244617Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:45.244631Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:45.244641Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:45.244723Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:45.244747Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:45.244765Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:18:45.255094Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:45.267149Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:45.267261Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:45.267305Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:18:45.267314Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.267320Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:45.267327Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.267516Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:45.267549Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:45.267564Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.267572Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.267583Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:45.267589Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.267613Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:18:45.267762Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:45.268017Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:45.268052Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:45.268551Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.283104Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:45.283154Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:45.439031Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:18:45.439870Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:18:45.439893Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.440124Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.440135Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:45.440147Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:45.440208Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:45.440240Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:45.440404Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.440423Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:45.440765Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:45.440872Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.441162Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:45.441190Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.441299Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:45.441313Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.441563Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.441572Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.441578Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:45.441600Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:45.441611Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:45.441625Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.442407Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.442814Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:45.442845Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:45.443102Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:45.447843Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:45.447875Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: ... 24037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:47.000283Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:47.000369Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:47.000414Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:47.000460Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:47.000478Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:47.000601Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:47.000712Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:47.001358Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:47.001375Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:47.001613Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:47.001631Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:47.001920Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:47.001935Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:47.001967Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:47.001990Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:47.002005Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:47.002020Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:47.002411Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:47.003529Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:47.003608Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:47.003623Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:47.008396Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:47.008437Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:706:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:47.008451Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:47.009826Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:47.011680Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:47.056759Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:47.163026Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:47.163686Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:710:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:47.199928Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:780:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:47.230495Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz023g701hmqq04kyam95gmv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzkwZGNkZTEtOGIzOGRhNmItMTIyZjlhYWUtYzQ3ZjYxNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:18:47.235800Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:811:2642], serverId# [2:812:2643], sessionId# [0:0:0] 2025-06-30T09:18:47.235993Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:2] at 72075186224037888 2025-06-30T09:18:47.236061Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-30T09:18:47.246531Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:47.250309Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:819:2649], serverId# [2:820:2650], sessionId# [0:0:0] 2025-06-30T09:18:47.250666Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:47.261159Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:47.261188Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:47.261270Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:47.261280Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-30T09:18:47.261385Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:47.261396Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:47.261408Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:47.261424Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:47.261443Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:819:2649], serverId# [2:820:2650], sessionId# [0:0:0] 2025-06-30T09:18:47.261719Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:47.261836Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:47.261878Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:47.261884Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:47.261893Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-30T09:18:47.261960Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:47.261971Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:47.262140Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-30T09:18:47.262210Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-30T09:18:47.262287Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-30T09:18:47.262295Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-30T09:18:47.262341Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:47.262346Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-30T09:18:47.262419Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:47.262425Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:47.262432Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-30T09:18:47.262469Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:47.262478Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:47.262486Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] Test command err: 2025-06-30T09:18:44.627697Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:44.627789Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:44.627808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048d8/r3tmp/tmpCN3b1G/pdisk_1.dat 2025-06-30T09:18:44.744995Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:44.745972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:44.769680Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:44.779591Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275124003069 != 1751275124003073 2025-06-30T09:18:44.827408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:44.827461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:44.838200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:44.912418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:44.934469Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:18:44.934587Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:44.945154Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:44.945201Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:44.945403Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:44.945413Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:44.945421Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:44.945514Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:44.945535Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:44.945551Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:18:44.956061Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:44.961185Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:44.961298Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:44.961341Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:18:44.961348Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:44.961353Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:44.961361Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:44.961561Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:44.961590Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:44.961603Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:44.961611Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:44.961621Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:44.961627Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:44.961652Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:18:44.961790Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:44.961853Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:44.961874Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:44.962297Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:44.972713Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:44.972769Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:45.131739Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:18:45.133066Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:18:45.133101Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.133485Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.133507Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:45.133522Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:45.133607Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:45.133650Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:45.133868Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.133893Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:45.134489Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:45.134631Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.137417Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:45.137445Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.137655Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:45.137675Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.138163Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.138181Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.138190Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:45.138217Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:45.138232Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:45.138248Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.139585Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.140159Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:45.140185Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:45.140444Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:45.154604Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:45.154639Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: ... main_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:47.878241Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037891 2025-06-30T09:18:47.878260Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037892 2025-06-30T09:18:47.913874Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037894 actor [2:1216:2993] 2025-06-30T09:18:47.913976Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:47.918510Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:47.918559Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:47.918757Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037894 2025-06-30T09:18:47.918767Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037894 2025-06-30T09:18:47.918775Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037894 2025-06-30T09:18:47.918824Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:47.918843Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:47.918856Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037894 persisting started state actor id [2:1232:2993] in generation 1 2025-06-30T09:18:47.939448Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:47.939495Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037894 2025-06-30T09:18:47.939531Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037894 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:47.939551Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037894, actorId: [2:1234:3003] 2025-06-30T09:18:47.939557Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037894 2025-06-30T09:18:47.939562Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037894, state: WaitScheme 2025-06-30T09:18:47.939569Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-30T09:18:47.939705Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037894 2025-06-30T09:18:47.939737Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037894 2025-06-30T09:18:47.939753Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-06-30T09:18:47.939762Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:47.939773Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037894 TxInFly 0 2025-06-30T09:18:47.939780Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-06-30T09:18:47.939910Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1215:2992], serverId# [2:1221:2995], sessionId# [0:0:0] 2025-06-30T09:18:47.939943Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-30T09:18:47.940007Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037894 txId 281474976715663 ssId 72057594046644480 seqNo 2:7 2025-06-30T09:18:47.940030Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715663 at tablet 72075186224037894 2025-06-30T09:18:47.940188Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037894 2025-06-30T09:18:47.951067Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-30T09:18:47.951125Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037894 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:48.074993Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1240:3009], serverId# [2:1242:3011], sessionId# [0:0:0] 2025-06-30T09:18:48.075159Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715663 at step 4000 at tablet 72075186224037894 { Transactions { TxId: 281474976715663 AckTo { RawX1: 0 RawX2: 0 } } Step: 4000 MediatorID: 72057594046382081 TabletID: 72075186224037894 } 2025-06-30T09:18:48.075173Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-30T09:18:48.075206Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-06-30T09:18:48.075215Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:48.075228Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [4000:281474976715663] in PlanQueue unit at 72075186224037894 2025-06-30T09:18:48.075344Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037894 loaded tx from db 4000:281474976715663 keys extracted: 0 2025-06-30T09:18:48.075394Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:48.075496Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-06-30T09:18:48.075512Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037894 tableId# [OwnerId: 72057594046644480, LocalPathId: 8] schema version# 1 2025-06-30T09:18:48.075629Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037894 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:48.075733Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:48.076085Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037894 time 3500 2025-06-30T09:18:48.076100Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-30T09:18:48.076328Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037894 step# 4000} 2025-06-30T09:18:48.076346Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-06-30T09:18:48.076764Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-06-30T09:18:48.076781Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037894 2025-06-30T09:18:48.076789Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037894 2025-06-30T09:18:48.076806Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [4000 : 281474976715663] from 72075186224037894 at tablet 72075186224037894 send result to client [2:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:48.076818Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037894 Sending notify to schemeshard 72057594046644480 txId 281474976715663 state Ready TxInFly 0 2025-06-30T09:18:48.076836Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-30T09:18:48.077429Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037894 2025-06-30T09:18:48.077456Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:18:48.077494Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-30T09:18:48.077555Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037891 2025-06-30T09:18:48.077580Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037893 2025-06-30T09:18:48.077595Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:48.077717Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037892 2025-06-30T09:18:48.077749Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037894 coordinator 72057594046316545 last step 0 next step 4000 2025-06-30T09:18:48.077886Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715663 datashard 72075186224037894 state Ready 2025-06-30T09:18:48.077896Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037894 Got TEvSchemaChangedResult from SS at 72075186224037894 2025-06-30T09:18:48.083377Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1269:3032], serverId# [2:1270:3033], sessionId# [0:0:0] 2025-06-30T09:18:48.083462Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1269:3032], serverId# [2:1270:3033], sessionId# [0:0:0] 2025-06-30T09:18:48.085645Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1274:3037], serverId# [2:1275:3038], sessionId# [0:0:0] 2025-06-30T09:18:48.085715Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1274:3037], serverId# [2:1275:3038], sessionId# [0:0:0] 2025-06-30T09:18:48.087885Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1279:3042], serverId# [2:1280:3043], sessionId# [0:0:0] 2025-06-30T09:18:48.087947Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1279:3042], serverId# [2:1280:3043], sessionId# [0:0:0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds [GOOD] Test command err: 2025-06-30T09:18:45.092948Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:45.093074Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:45.093099Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048bd/r3tmp/tmphXzGq8/pdisk_1.dat 2025-06-30T09:18:45.205304Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:45.206513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:45.227058Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:45.228331Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275124475356 != 1751275124475360 2025-06-30T09:18:45.270449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:45.270485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:45.281170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:45.359506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:45.380145Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:18:45.380240Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:45.392014Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:45.392064Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:45.392285Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:45.392297Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:45.392306Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:45.392379Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:45.392400Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:45.392416Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:18:45.402718Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:45.408644Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:45.408732Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:45.408767Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:18:45.408773Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.408777Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:45.408782Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.408917Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:45.408937Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:45.408947Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.408954Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.408965Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:45.408969Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.408988Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:18:45.409100Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:45.409158Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:45.409186Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:45.409553Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.419858Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:45.419916Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:45.563384Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:18:45.564467Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:18:45.564497Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.564773Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.564789Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:45.564804Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:45.564885Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:45.564928Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:45.565093Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.565117Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:45.565555Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:45.565688Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.566093Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:45.566106Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.566231Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:45.566245Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.566524Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.566535Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.566542Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:45.566563Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:45.566575Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:45.566587Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.571806Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.572325Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:45.572343Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:45.572541Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:45.581285Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:45.581317Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: ... 24037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:46.944961Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:46.945052Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:46.945102Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:46.945138Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:46.945150Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:46.945278Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:46.945372Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:46.945892Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:46.945913Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.946181Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:46.946198Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:46.946449Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:46.946460Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:46.946466Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:46.946485Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:46.946496Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:46.946507Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:46.946885Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:46.947274Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:46.947325Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:46.947336Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:46.952558Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:46.952591Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:706:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:46.952604Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:46.953739Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:46.955477Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:47.001298Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:47.111322Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:47.111838Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:710:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:47.145930Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:780:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:47.164719Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz023g58d1mkq1yzhv6fzysd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2Y1Yzc3MjctYjgzMjk4Ni02YmJkNzQzYi02MTFkMmJkNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:18:47.165571Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:811:2642], serverId# [2:812:2643], sessionId# [0:0:0] 2025-06-30T09:18:47.165755Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:2] at 72075186224037888 2025-06-30T09:18:47.165816Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:2] at 72075186224037888, row count=4 2025-06-30T09:18:47.177141Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:47.182034Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:819:2649], serverId# [2:820:2650], sessionId# [0:0:0] 2025-06-30T09:18:47.182448Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:47.195005Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:47.195031Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:47.195117Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:47.195129Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-30T09:18:47.195228Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:47.195237Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:47.195247Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:47.195259Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:47.195278Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:819:2649], serverId# [2:820:2650], sessionId# [0:0:0] 2025-06-30T09:18:47.195600Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:47.195726Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:47.195781Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:47.195789Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:47.195824Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-30T09:18:47.195883Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:47.195893Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:47.196088Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-30T09:18:47.196154Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-30T09:18:47.196196Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-30T09:18:47.196203Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-30T09:18:47.196242Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:47.196248Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-30T09:18:47.196322Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:47.196328Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:47.196335Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-30T09:18:47.196369Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:47.196378Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:47.196388Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 [GOOD] Test command err: 2025-06-30T09:18:45.093894Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:45.094028Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:45.094051Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048ce/r3tmp/tmpfmEFyf/pdisk_1.dat 2025-06-30T09:18:45.235941Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:45.236941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:45.264009Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:45.265289Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275124465850 != 1751275124465854 2025-06-30T09:18:45.314778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:45.314838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:45.325545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:45.401292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:45.424586Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:18:45.424688Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:45.436247Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:45.436289Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:45.436506Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:45.436519Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:45.436527Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:45.436590Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:45.436612Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:45.436628Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:18:45.446997Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:45.452879Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:45.452971Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:45.453010Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:18:45.453017Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.453024Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:45.453031Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.453220Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:45.453252Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:45.453267Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.453275Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.453287Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:45.453293Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.453317Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:18:45.453466Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:45.453530Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:45.453553Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:45.453961Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.464338Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:45.464388Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:45.620952Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:18:45.622126Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:18:45.622144Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.622606Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.622624Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:45.622639Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:45.622720Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:45.622781Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:45.622963Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.623001Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:45.623552Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:45.623664Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.624053Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:45.624063Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.624189Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:45.624203Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.624503Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.624513Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.624520Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:45.624539Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:45.624550Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:45.624562Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.625423Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.625807Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:45.625822Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:45.626018Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:45.637730Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:45.637760Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: ... 24037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:48.555012Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:48.555101Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:48.555142Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:48.555165Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:48.555180Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:48.555303Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:48.555408Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:48.555886Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:48.555899Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:48.556216Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:48.556233Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:48.556443Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:48.556454Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:48.556462Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:48.556482Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:48.556494Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:48.556510Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:48.556892Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:48.557191Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:48.557208Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:48.557379Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:48.561560Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:48.561591Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:706:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:48.561603Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:48.562690Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:48.564836Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:48.613857Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:48.749118Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:48.749692Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:710:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:48.790634Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:780:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:48.823711Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz023hqh0cgprxffetg471jh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MWUwZjNiYjctM2FlNmJkMmUtOTdkNmMxZmQtMjFjODlmMjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:18:48.824565Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:811:2642], serverId# [3:812:2643], sessionId# [0:0:0] 2025-06-30T09:18:48.824742Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:2] at 72075186224037888 2025-06-30T09:18:48.824809Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-30T09:18:48.845475Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:48.849589Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:819:2649], serverId# [3:820:2650], sessionId# [0:0:0] 2025-06-30T09:18:48.849981Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:48.860632Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:48.860668Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:48.860753Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:48.860765Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-30T09:18:48.860862Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:48.860872Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:48.860885Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:48.860901Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:48.860923Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:819:2649], serverId# [3:820:2650], sessionId# [0:0:0] 2025-06-30T09:18:48.861236Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:48.861346Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:48.861388Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:48.861394Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:48.861403Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-30T09:18:48.861455Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:48.861464Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:48.861598Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-30T09:18:48.861679Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-30T09:18:48.861711Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-30T09:18:48.861718Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-30T09:18:48.861812Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:48.861818Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-30T09:18:48.861842Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:48.861847Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:48.861853Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-30T09:18:48.861886Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:48.861894Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:48.861902Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/unittest >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query [GOOD] Test command err: 2025-06-30T09:16:41.664710Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668855673493306:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.664736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:16:41.696420Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cff/r3tmp/tmplL9Voq/pdisk_1.dat TServer::EnableGrpc on GrpcPort 17260, node 1 2025-06-30T09:16:41.726207Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:41.732677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002cff/r3tmp/yandexsfbqoC.tmp 2025-06-30T09:16:41.732695Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002cff/r3tmp/yandexsfbqoC.tmp 2025-06-30T09:16:41.732795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002cff/r3tmp/yandexsfbqoC.tmp 2025-06-30T09:16:41.732853Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:41.740689Z INFO: TTestServer started on Port 16658 GrpcPort 17260 TClient is connected to server localhost:16658 PQClient connected to localhost:17260 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:16:41.767296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.767330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.768630Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.783341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:16:41.786301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:16:41.792621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-30T09:16:42.078103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668859968461352:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.078163Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668859968461364:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.078172Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.079156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:42.079344Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668859968461395:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.079365Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.081113Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521668859968461367:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-30T09:16:42.120437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.127438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.179167Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668859968461551:2508] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:16:42.196059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.196295Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521668859968461569:2321], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:16:42.196385Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NjYzYjM0ODYtNDBmODY5OWMtMzg5NjA2ZjQtZmY0YzQwOWY=, ActorId: [1:7521668859968461350:2296], ActorState: ExecuteState, TraceId: 01jz01zp6t5pxw389ng98f16j5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:16:42.196903Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521668859968461717:2611] 2025-06-30T09:16:42.667025Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:46.666823Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668855673493306:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:46.667446Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:16:47.494670Z :WriteToTopic_Demo_20_RestartNo_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:16:47.527124Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:16:47.538609Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521668881443298402:2694] connected; active server actors: 1 2025-06-30T09:16:47.538696Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:16:47.539010Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:16:47.539055Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-30T09:16:47.539396Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-30T09:16:47.543225Z node 1 ... 09:18:45.498233Z :DEBUG: [/Root] [/Root] [6a1e053f-540cf544-c358faa8-e73069e9] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-30T09:18:45.498265Z :DEBUG: [/Root] Decompression task done. Partition/PartitionSessionId: 1 (6-9) 2025-06-30T09:18:45.498269Z :DEBUG: [/Root] [/Root] [6a1e053f-540cf544-c358faa8-e73069e9] [] Returning serverBytesSize = 0 to budget 2025-06-30T09:18:45.498325Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-30T09:18:45.498494Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 grpc read done: success# 1, data# { read_request { bytes_size: 1001078 } } 2025-06-30T09:18:45.498559Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 got read request: guid# d04b1df1-9d91754a-95e1efb3-67f8034d 2025-06-30T09:18:45.498581Z :DEBUG: [/Root] Take Data. Partition 0. Read: {1, 0} (1-1) 2025-06-30T09:18:45.498798Z :DEBUG: [/Root] Take Data. Partition 0. Read: {1, 1} (2-2) 2025-06-30T09:18:45.498815Z :DEBUG: [/Root] Take Data. Partition 0. Read: {1, 2} (3-3) 2025-06-30T09:18:45.498900Z :DEBUG: [/Root] Take Data. Partition 0. Read: {1, 3} (4-4) 2025-06-30T09:18:45.498917Z :DEBUG: [/Root] Take Data. Partition 0. Read: {1, 4} (5-5) 2025-06-30T09:18:45.498932Z :DEBUG: [/Root] Take Data. Partition 0. Read: {1, 5} (6-6) 2025-06-30T09:18:45.498950Z :DEBUG: [/Root] Take Data. Partition 0. Read: {2, 0} (7-7) 2025-06-30T09:18:45.499147Z :DEBUG: [/Root] Take Data. Partition 0. Read: {2, 1} (8-8) 2025-06-30T09:18:45.499167Z :DEBUG: [/Root] Take Data. Partition 0. Read: {3, 0} (9-9) 2025-06-30T09:18:45.499186Z :DEBUG: [/Root] [/Root] [6a1e053f-540cf544-c358faa8-e73069e9] [] The application data is transferred to the client. Number of messages 10, size 1000000 bytes 2025-06-30T09:18:45.499198Z :DEBUG: [/Root] [/Root] [6a1e053f-540cf544-c358faa8-e73069e9] [] Returning serverBytesSize = 0 to budget 0 10 2025-06-30T09:18:45.499378Z :DEBUG: [/Root] [/Root] [6a1e053f-540cf544-c358faa8-e73069e9] [] Commit offsets [0, 10). Partition stream id: 1 2025-06-30T09:18:45.499640Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 grpc read done: success# 1, data# { commit_offset_request { commit_offsets { partition_session_id: 1 offsets { end: 10 } } } } 2025-06-30T09:18:45.499753Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) committing to position 10 prev 0 end 10 by cookie 2 2025-06-30T09:18:45.499829Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-30T09:18:45.499842Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037894] got client message batch for topic 'topic_A' partition 0 2025-06-30T09:18:45.499896Z node 11 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 10 (startOffset 0) session test-consumer_11_1_13996899821776990820_v1 2025-06-30T09:18:45.499946Z node 11 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:18:45.500432Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 10 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:18:45.500456Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:18:45.500458Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-30T09:18:45.500472Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=1001194, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:18:45.500491Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-30T09:18:45.500515Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 10 endOffset 10 with cookie 2 2025-06-30T09:18:45.500528Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 10 2025-06-30T09:18:45.501562Z :DEBUG: [/Root] [/Root] [6a1e053f-540cf544-c358faa8-e73069e9] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 10 } } 2025-06-30T09:18:46.483938Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:9:10 2025-06-30T09:18:46.483970Z :INFO: [/Root] [/Root] [6a1e053f-540cf544-c358faa8-e73069e9] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1001 BytesRead: 1000000 MessagesRead: 10 BytesReadCompressed: 1000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:46.489398Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 checking auth because of timeout 2025-06-30T09:18:46.489444Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 auth for : test-consumer 2025-06-30T09:18:46.489654Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 Handle describe topics response 2025-06-30T09:18:46.489680Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 auth is DEAD 2025-06-30T09:18:46.489703Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:18:47.483060Z :INFO: [/Root] [/Root] [6a1e053f-540cf544-c358faa8-e73069e9] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:47.483081Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:9:10 2025-06-30T09:18:47.483093Z :INFO: [/Root] [/Root] [6a1e053f-540cf544-c358faa8-e73069e9] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2000 BytesRead: 1000000 MessagesRead: 10 BytesReadCompressed: 1000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:47.483115Z :NOTICE: [/Root] [/Root] [6a1e053f-540cf544-c358faa8-e73069e9] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:18:47.483125Z :DEBUG: [/Root] [/Root] [6a1e053f-540cf544-c358faa8-e73069e9] [] Abort session to cluster 2025-06-30T09:18:47.483386Z :NOTICE: [/Root] [/Root] [6a1e053f-540cf544-c358faa8-e73069e9] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:47.483572Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d20395a7-71f89c0-ed819ade-81276a5d_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2025-06-30T09:18:47.483578Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d20395a7-71f89c0-ed819ade-81276a5d_0] PartitionId [0] Generation [2] Write session will now close 2025-06-30T09:18:47.483585Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|d20395a7-71f89c0-ed819ade-81276a5d_0] PartitionId [0] Generation [2] Write session: aborting 2025-06-30T09:18:47.483685Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d20395a7-71f89c0-ed819ade-81276a5d_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:47.483622Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 grpc read done: success# 0, data# { } 2025-06-30T09:18:47.483691Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|d20395a7-71f89c0-ed819ade-81276a5d_0] PartitionId [0] Generation [2] Write session: destroy 2025-06-30T09:18:47.483639Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 grpc read failed 2025-06-30T09:18:47.483649Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 grpc closed 2025-06-30T09:18:47.483666Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_11_1_13996899821776990820_v1 is DEAD 2025-06-30T09:18:47.483776Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_11_1_13996899821776990820_v1 2025-06-30T09:18:47.483794Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669390207481958:2510] destroyed 2025-06-30T09:18:47.483806Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|d20395a7-71f89c0-ed819ade-81276a5d_0 grpc read done: success: 0 data: 2025-06-30T09:18:47.483809Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|d20395a7-71f89c0-ed819ade-81276a5d_0 grpc read failed 2025-06-30T09:18:47.483815Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|d20395a7-71f89c0-ed819ade-81276a5d_0 grpc closed 2025-06-30T09:18:47.483823Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|d20395a7-71f89c0-ed819ade-81276a5d_0 is DEAD 2025-06-30T09:18:47.483974Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_1_13996899821776990820_v1 2025-06-30T09:18:47.484014Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [11:7521669390207481955:2507] disconnected; active server actors: 1 2025-06-30T09:18:47.484018Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [11:7521669390207481955:2507] client test-consumer disconnected session test-consumer_11_1_13996899821776990820_v1 2025-06-30T09:18:47.484120Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:47.484161Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669381617547288:2480] destroyed 2025-06-30T09:18:47.484197Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> DistributedEraseTests::ConditionalEraseRowsCheckLimits [GOOD] >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks [GOOD] Test command err: 2025-06-30T09:18:44.344764Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:44.344853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:44.344871Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048e8/r3tmp/tmpdL8jrZ/pdisk_1.dat 2025-06-30T09:18:44.483149Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:44.484144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:44.506079Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:44.510287Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275123254153 != 1751275123254157 2025-06-30T09:18:44.563514Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:44.563559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:44.574412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:44.665353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:44.712166Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:652:2550] 2025-06-30T09:18:44.712267Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:44.724876Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:44.724980Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:44.725183Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:44.725197Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:44.725205Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:44.725277Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:44.725435Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:654:2552] 2025-06-30T09:18:44.725479Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:44.729277Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:44.729315Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:691:2550] in generation 1 2025-06-30T09:18:44.729549Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:44.729582Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:44.729747Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-30T09:18:44.729756Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-30T09:18:44.729763Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-30T09:18:44.729816Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:44.729865Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:44.729874Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:695:2552] in generation 1 2025-06-30T09:18:44.730269Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:660:2554] 2025-06-30T09:18:44.730313Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:44.731989Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:44.732025Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:44.732167Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-30T09:18:44.732176Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-30T09:18:44.732182Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-30T09:18:44.732228Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:44.732254Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:44.732267Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:703:2554] in generation 1 2025-06-30T09:18:44.743154Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:44.749227Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:44.749333Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:44.749379Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:707:2581] 2025-06-30T09:18:44.749385Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:44.749391Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:44.749397Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:44.749534Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:44.749543Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-30T09:18:44.749555Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:44.749563Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:708:2582] 2025-06-30T09:18:44.749567Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:18:44.749571Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-30T09:18:44.749574Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:18:44.749685Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:44.749717Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:44.749734Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:44.749740Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-30T09:18:44.749751Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:44.749759Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:709:2583] 2025-06-30T09:18:44.749763Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-30T09:18:44.749767Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-30T09:18:44.749771Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:18:44.749817Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:44.749825Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:44.749835Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:44.749840Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:44.749848Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-30T09:18:44.749858Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-30T09:18:44.749873Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:641:2545], serverId# [1:665:2556], sessionId# [0:0:0] 2025-06-30T09:18:44.749882Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:18:44.749886Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:44.749890Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-30T09:18:44.749894Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:18:44.750065Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:44.750130Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:44.750150Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:44.750265Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server ... __progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:47.736888Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:47.736895Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:47.736912Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:47.736923Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:47.736937Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:47.737307Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:47.737585Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:47.737599Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:47.737751Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:47.741739Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:47.741768Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:706:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:47.741779Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:47.742941Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:47.744191Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:47.790955Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:47.904107Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:47.904849Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:710:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:47.940655Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:780:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:47.956845Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz023gxx2s3eb7yd07be3gq2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MzU1Y2I3NTUtYmZiY2UyOGEtNDg1ZDc2ZjEtZDRmOTdmYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:18:47.957620Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:811:2642], serverId# [3:812:2643], sessionId# [0:0:0] 2025-06-30T09:18:47.957779Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:2] at 72075186224037888 2025-06-30T09:18:47.957835Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:2] at 72075186224037888, row count=3 2025-06-30T09:18:47.968398Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:48.009655Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz023h54ee76n544qcdfnstx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MWU5MWIxN2QtZGI0MzA3ZDAtNjQwYmFlMDQtY2U5Njc1YmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:18:48.010420Z node 3 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715661, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] { items { uint64_value: 0 } } 2025-06-30T09:18:48.014247Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:851:2674], serverId# [3:852:2675], sessionId# [0:0:0] 2025-06-30T09:18:48.014631Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:48.025156Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:48.025190Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:48.025203Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-30T09:18:48.025436Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-30T09:18:48.025444Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:48.025504Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:48.025512Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037888 2025-06-30T09:18:48.025560Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:48.025568Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:48.025577Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:48.025584Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:48.025634Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:851:2674], serverId# [3:852:2675], sessionId# [0:0:0] 2025-06-30T09:18:48.035820Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz023h6s8451wxcj2j9dn0mx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MWU5MWIxN2QtZGI0MzA3ZDAtNjQwYmFlMDQtY2U5Njc1YmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:18:48.036673Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:6] at 72075186224037888 2025-06-30T09:18:48.036724Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:105;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=6; 2025-06-30T09:18:48.038048Z node 3 :TX_DATASHARD INFO: datashard_write_operation.cpp:720: Write transaction 6 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-06-30T09:18:48.038109Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 6 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-30T09:18:48.038158Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 6 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-30T09:18:48.038180Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:48.038241Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [3:873:2648], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:818:2648]Got LOCKS BROKEN for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:873:2648].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-30T09:18:48.038357Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:866:2648], SessionActorId: [3:818:2648], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:818:2648]. isRollback=0 2025-06-30T09:18:48.038473Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1938: SessionId: ydb://session/3?node_id=3&id=MWU5MWIxN2QtZGI0MzA3ZDAtNjQwYmFlMDQtY2U5Njc1YmI=, ActorId: [3:818:2648], ActorState: ExecuteState, TraceId: 01jz023h6s8451wxcj2j9dn0mx, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:867:2648] from: [3:866:2648] 2025-06-30T09:18:48.038524Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [3:867:2648] TxId: 281474976715662. Ctx: { TraceId: 01jz023h6s8451wxcj2j9dn0mx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MWU5MWIxN2QtZGI0MzA3ZDAtNjQwYmFlMDQtY2U5Njc1YmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-30T09:18:48.038610Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:7] at 72075186224037888 2025-06-30T09:18:48.038621Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:421: Skip empty write operation for [0:7] at 72075186224037888 2025-06-30T09:18:48.038653Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:48.038678Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=3&id=MWU5MWIxN2QtZGI0MzA3ZDAtNjQwYmFlMDQtY2U5Njc1YmI=, ActorId: [3:818:2648], ActorState: ExecuteState, TraceId: 01jz023h6s8451wxcj2j9dn0mx, Create QueryResponse for error on request, msg: ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp [GOOD] Test command err: 2025-06-30T09:18:45.319154Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:45.319256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:45.319276Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048bc/r3tmp/tmpD3vVhV/pdisk_1.dat 2025-06-30T09:18:45.431229Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:45.432282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:45.452232Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:45.453524Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275124743111 != 1751275124743115 2025-06-30T09:18:45.495942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:45.495986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:45.506676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:45.582771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:45.603148Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:18:45.603243Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:45.616517Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:45.616562Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:45.616807Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:45.616818Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:45.616827Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:45.616897Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:45.616916Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:45.616929Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:18:45.630984Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:45.636939Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:45.637050Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:45.637092Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:18:45.637099Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.637106Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:45.637113Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.641302Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:45.641344Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:45.641363Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.641372Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.641385Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:45.641392Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.641420Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:18:45.641560Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:45.641627Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:45.641649Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:45.642045Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.652439Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:45.652496Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:18:45.807525Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:18:45.808551Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:18:45.808580Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.808845Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.808856Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:45.808870Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:45.808931Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:45.808962Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:45.809117Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.809138Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:45.809485Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:45.809594Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.809991Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:45.810006Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.810163Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:45.810179Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.810484Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.810496Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.810502Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:45.810520Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:45.810529Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:45.810540Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.814120Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:45.814689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:45.814718Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:45.815630Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:45.821129Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:45.821170Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: ... 24037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:48.734097Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:18:48.734198Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:18:48.734238Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:48.734262Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:48.734276Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:18:48.734388Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:48.734519Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:48.735036Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:18:48.735051Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:48.735345Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:18:48.735359Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:48.735543Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:48.735552Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:48.735559Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:48.735612Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:48.735624Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:18:48.735637Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:48.736025Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:48.736295Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:18:48.736310Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:18:48.736451Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:18:48.749213Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:48.749241Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:706:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:48.749252Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:48.756305Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:48.760608Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:48.807724Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:48.915753Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:48.916540Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:710:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:48.948497Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:780:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:48.975928Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz023hxc2k00f9trherzydn3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGRlZmJhMTAtZDY3MTE2NmMtYWU1ZTVmYTctZmRiNTA4NjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:18:48.976860Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:811:2642], serverId# [3:812:2643], sessionId# [0:0:0] 2025-06-30T09:18:48.977155Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:2] at 72075186224037888 2025-06-30T09:18:48.977240Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-30T09:18:48.987791Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:48.992028Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:819:2649], serverId# [3:820:2650], sessionId# [0:0:0] 2025-06-30T09:18:48.992688Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-30T09:18:49.003414Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-30T09:18:49.003456Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:49.003560Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:49.003573Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-30T09:18:49.003708Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:49.003719Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:49.003732Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:49.003746Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:49.003768Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:819:2649], serverId# [3:820:2650], sessionId# [0:0:0] 2025-06-30T09:18:49.004087Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:49.004205Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:49.004254Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:49.004260Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:49.004272Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-30T09:18:49.004335Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:49.004346Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:49.004518Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-30T09:18:49.004641Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 48, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-30T09:18:49.004681Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-30T09:18:49.004690Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-30T09:18:49.004811Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:18:49.004819Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-30T09:18:49.004853Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:49.004860Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:49.004868Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-30T09:18:49.004908Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:49.004920Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:49.004931Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongColumnType [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:48.012501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:48.012527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:48.012533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:48.012538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:48.012549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:48.012553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:48.012562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:48.012577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:48.012682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:48.012745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:48.029779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:48.029801Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:48.033439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:48.033492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:48.033522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:48.035134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:48.035215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:48.035353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:48.035415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:48.036077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:48.036125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:48.036377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:48.036388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:48.036410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:48.036418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:48.036424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:48.036440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.037847Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:48.059915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:48.060022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.060092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:48.060100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:48.060145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:48.060161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:48.060912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:48.060950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:48.061009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.061018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:48.061024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:48.061029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:48.061435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.061445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:48.061451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:48.061893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.061903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.061909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:48.061915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:48.062620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:48.063084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:48.063122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:48.063307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:48.063335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:48.063342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:48.063408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:48.063416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:48.063447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:48.063459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:48.063952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:48.063962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:48.064005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:48.064011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-30T09:18:48.064099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.064107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-30T09:18:48.064121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:18:48.064125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:18:48.064131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:18:48.064135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:18:48.064140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-30T09:18:48.064146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:18:48.064152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-30T09:18:48.064156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1:0 2025-06-30T09:18:48.064169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:18:48.064177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-30T09:18:48.064182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-30T09:18:48.064511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-30T09:18:48.064526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-30T09:18:48.064532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-30T09:18:48.064538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-30T09:18:48.064543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:48.064558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-30T09:18:48.065233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-30T09:18:48.065344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-30T09:18:48.065627Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-30T09:18:48.067927Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-30T09:18:48.068772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "String" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:48.068842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.068859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "String" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" } }, at schemeshard: 72057594046678944 2025-06-30T09:18:48.069008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Unsupported column type, at schemeshard: 72057594046678944 2025-06-30T09:18:48.069391Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:18:48.070166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Unsupported column type" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:48.070234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Unsupported column type, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-30T09:18:48.070377Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ShouldCheckQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:47.680282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:47.680305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:47.680310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:47.680316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:47.680325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:47.680328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:47.680337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:47.680350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:47.680451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:47.680507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:47.695023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:47.695042Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:47.698364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:47.698413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:47.698449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:47.712507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:47.712631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:47.712761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:47.712832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:47.713537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:47.713598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:47.713868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:47.713879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:47.713902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:47.713910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:47.713916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:47.713933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:47.715495Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:47.738105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:47.738185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:47.738236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:47.738244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:47.738288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:47.738304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:47.738931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:47.738968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:47.739013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:47.739021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:47.739026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:47.739031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:47.739414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:47.739424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:47.739429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:47.739794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:47.739803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:47.739809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:47.739815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:47.740501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:47.740929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:47.740963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:47.741134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:47.741161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:47.741169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:47.741231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:47.741240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:47.741269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:47.741282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:47.741965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:47.741974Z node 1 :FLAT_TX_SCHEMESHARD ... 94046678944 2025-06-30T09:18:47.987804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:18:47.987858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:18:47.987868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-30T09:18:47.987879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-06-30T09:18:47.987891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-30T09:18:47.988264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:18:47.988285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:18:47.988291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-30T09:18:47.988297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-30T09:18:47.988304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:18:47.988322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-30T09:18:47.988529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6376: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 103 Step: 200 OrderId: 103 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 398 } } CommitVersion { Step: 200 TxId: 103 } 2025-06-30T09:18:47.988540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 103, tablet: 72075186233409549, partId: 0 2025-06-30T09:18:47.988568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 103 Step: 200 OrderId: 103 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 398 } } CommitVersion { Step: 200 TxId: 103 } 2025-06-30T09:18:47.988588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 103 Step: 200 OrderId: 103 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 398 } } CommitVersion { Step: 200 TxId: 103 } 2025-06-30T09:18:47.988731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 547 RawX2: 4294969787 } Origin: 72075186233409549 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-30T09:18:47.988738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 103, tablet: 72075186233409549, partId: 0 2025-06-30T09:18:47.988756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: Source { RawX1: 547 RawX2: 4294969787 } Origin: 72075186233409549 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-30T09:18:47.988766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-30T09:18:47.988777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 547 RawX2: 4294969787 } Origin: 72075186233409549 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-30T09:18:47.988796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 103:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:47.988801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:18:47.988808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 103:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-30T09:18:47.988817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 103:0 129 -> 240 2025-06-30T09:18:47.990697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:18:47.991374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:18:47.991405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:18:47.991432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:18:47.991525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:18:47.991537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-30T09:18:47.991556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-30T09:18:47.991562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:18:47.991568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-30T09:18:47.991573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:18:47.991578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-30T09:18:47.991600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:410:2376] message: TxId: 103 2025-06-30T09:18:47.991608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:18:47.991615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-30T09:18:47.991620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:0 2025-06-30T09:18:47.991656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:18:47.992328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:18:47.992345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:575:2511] TestWaitNotification: OK eventTxId 103 W0000 00:00:1751275127.992492 127396 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TTableDescription: 9:35: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 104 2025-06-30T09:18:47.993403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/SubDomain" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table4" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 SysSettings { RunInterval: 1799999999 } Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 104 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:47.993476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /MyRoot/SubDomain/Table4, opId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:18:47.993500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /MyRoot/SubDomain/Table4, opId: 104:0, schema: Name: "Table4" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 SysSettings { RunInterval: 1799999999 } Tiers { ApplyAfterSeconds: 3600 Delete { } } } }, at schemeshard: 72057594046678944 2025-06-30T09:18:47.993586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 104:1, propose status:StatusSchemeError, reason: TTL run interval cannot be less than limit: 1800, at schemeshard: 72057594046678944 2025-06-30T09:18:47.994890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 104, response: Status: StatusSchemeError Reason: "TTL run interval cannot be less than limit: 1800" TxId: 104 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:47.994951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 104, database: /MyRoot/SubDomain, subject: , status: StatusSchemeError, reason: TTL run interval cannot be less than limit: 1800, operation: CREATE TABLE, path: /MyRoot/SubDomain/Table4 TestModificationResult got TxId: 104, wait until txId: 104 >> TSchemeShardTTLTests::BuildAsyncIndexShouldSucceed [GOOD] >> KqpPg::Returning-useSink [GOOD] >> KqpPg::SelectIndex+useSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertNoTargetColumns_Serial+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 13359, MsgBus: 26089 2025-06-30T09:18:22.476386Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669292296576040:2216];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:22.476412Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003387/r3tmp/tmpIed0Pn/pdisk_1.dat 2025-06-30T09:18:23.392573Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13359, node 1 2025-06-30T09:18:23.479290Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:23.491259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:23.491289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:23.504662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:23.890994Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:23.891010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:23.891012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:23.891075Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26089 TClient is connected to server localhost:26089 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:24.051307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:24.057900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 16 2025-06-30T09:18:24.299545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:24.368718Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:18:24.369563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:24.377996Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:18:24.382187Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669300886511254:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:24.382190Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669300886511246:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:24.382213Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:24.383266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:24.386357Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669300886511260:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-30T09:18:24.472012Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669300886511311:2445] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } f f t t 18 2025-06-30T09:18:24.530381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:24.544464Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:18:24.545591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:24.555945Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 21 2025-06-30T09:18:24.631145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:24.662157Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:18:24.663624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:24.680065Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 23 2025-06-30T09:18:24.745988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:24.757158Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:18:24.758235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:24.768898Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 20 2025-06-30T09:18:24.827100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:24.840021Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:18:24.841622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710686:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:24.855559Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 700 2025-06-30T09:18:24.910185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:24.922393Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoi ... re not loaded TServer::EnableGrpc on GrpcPort 22110, node 11 2025-06-30T09:18:47.341998Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:47.342016Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:47.342019Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:47.342094Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2370 2025-06-30T09:18:47.380490Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:47.380525Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:47.381550Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2370 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:47.405554Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:47.408005Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:18:47.699177Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7521669398241050623:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:47.699210Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:47.699419Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7521669398241050650:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:47.700234Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:47.702500Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7521669398241050652:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-30T09:18:47.771640Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7521669398241050703:2329] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:47.784514Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:47.885462Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) Trying to start YDB, gRPC: 21162, MsgBus: 5882 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003387/r3tmp/tmpH0Gra1/pdisk_1.dat 2025-06-30T09:18:48.206825Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:18:48.211166Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21162, node 12 2025-06-30T09:18:48.225857Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:48.225874Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:48.225877Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:48.225924Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5882 TClient is connected to server localhost:5882 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:18:48.292940Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:48.292993Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:48.295794Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:48.299513Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:48.303344Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:48.608738Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7521669404008852583:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:48.608797Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:48.609026Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7521669404008852618:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:48.610004Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:48.612395Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:18:48.612452Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7521669404008852620:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:18:48.683827Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7521669404008852671:2329] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:48.690084Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TNebiusAccessServiceTest::PassRequestId [GOOD] |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnDeadShard [GOOD] >> TPersQueueNewSchemeCacheTest::CheckGrpcReadNoDC [GOOD] |79.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |79.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |79.6%| [LD] {RESULT} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BuildAsyncIndexShouldSucceed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:49.403690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:49.403711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:49.403716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:49.403719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:49.403728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:49.403731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:49.403738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:49.403751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:49.403833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:49.403890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:49.413734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:49.413763Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:49.416476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:49.416519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:49.416545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:49.418057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:49.418121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:49.418236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:49.418296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:49.418804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:49.418855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:49.419101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:49.419115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:49.419142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:49.419150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:49.419156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:49.419175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:49.420250Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:49.437842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:49.437953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:49.438032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:49.438041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:49.438097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:49.438112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:49.438979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:49.439020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:49.439075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:49.439084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:49.439090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:49.439097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:49.439521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:49.439533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:49.439540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:49.440010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:49.440027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:49.440035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:49.440042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:49.440722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:49.441212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:49.441256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:49.441409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:49.441435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:49.441442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:49.441505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:49.441511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:49.441537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:49.441547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:49.442026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:49.442037Z node 1 :FLAT_TX_SCHEMESHARD ... ion transaction is registered, txId: 281474976710760, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-06-30T09:18:49.687414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:49.687435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:49.687443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-30T09:18:49.687450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710760:0 128 -> 240 2025-06-30T09:18:49.687919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-06-30T09:18:49.687933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-06-30T09:18:49.687951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-30T09:18:49.687955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-30T09:18:49.687962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-30T09:18:49.687969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-30T09:18:49.687974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-30T09:18:49.687987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:127:2151] message: TxId: 281474976710760 2025-06-30T09:18:49.687992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-30T09:18:49.687996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-30T09:18:49.687999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710760:0 2025-06-30T09:18:49.688010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-30T09:18:49.688399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-30T09:18:49.688413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710760 2025-06-30T09:18:49.688426Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2019: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710760 2025-06-30T09:18:49.688447Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2022: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:391:2361], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0}, txId# 281474976710760 2025-06-30T09:18:49.688789Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking 2025-06-30T09:18:49.688813Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:391:2361], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:18:49.688824Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-30T09:18:49.689158Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-30T09:18:49.689181Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:391:2361], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:18:49.689186Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:336: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-30T09:18:49.689208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:18:49.689215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:480:2439] TestWaitNotification: OK eventTxId 102 2025-06-30T09:18:49.689355Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:49.689419Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 72us result status StatusSuccess 2025-06-30T09:18:49.689587Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByValue" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::PassRequestId [GOOD] Test command err: 2025-06-30T09:18:49.801433Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [1311bfbe65f0]{reqId} Connect to grpc://localhost:21282 2025-06-30T09:18:49.802338Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1311bfbe65f0]{reqId} Request AuthenticateRequest { iam_token: "**** (717F937C)" } 2025-06-30T09:18:49.809202Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [1311bfbe65f0]{reqId} Response AuthenticateResponse { account { user_account { id: "1234" } } } >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClass [GOOD] >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI >> TSchemeShardColumnTableTTL::AlterColumnTable_Negative [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-DbAdmin-ordinaryuser [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-NoDbAdmin-dbadmin >> KqpPg::SelectIndex+useSink [GOOD] >> KqpPg::SelectIndex-useSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnDeadShard [GOOD] Test command err: 2025-06-30T09:18:44.918446Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:44.918551Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:44.918576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048c1/r3tmp/tmpHMhlS1/pdisk_1.dat 2025-06-30T09:18:45.049719Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:45.050784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:45.073672Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:45.075239Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275124325090 != 1751275124325094 2025-06-30T09:18:45.123483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:45.123530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:45.135284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:45.225666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:45.256570Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:652:2550] 2025-06-30T09:18:45.256674Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:45.276520Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:45.276641Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:45.276918Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:45.276935Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:45.276945Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:45.277033Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:45.277204Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:654:2552] 2025-06-30T09:18:45.277260Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:45.287458Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:45.287497Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:691:2550] in generation 1 2025-06-30T09:18:45.287765Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:45.287804Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:45.288007Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-30T09:18:45.288018Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-30T09:18:45.288027Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-30T09:18:45.288087Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:45.288146Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:45.288157Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:695:2552] in generation 1 2025-06-30T09:18:45.288608Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:660:2554] 2025-06-30T09:18:45.288656Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:45.290581Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:45.290623Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:45.290824Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-30T09:18:45.290836Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-30T09:18:45.290845Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-30T09:18:45.290901Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:45.290933Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:45.290948Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:703:2554] in generation 1 2025-06-30T09:18:45.301370Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:45.307422Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:45.307529Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:45.307572Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:707:2581] 2025-06-30T09:18:45.307581Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:45.307589Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:45.307596Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:45.307746Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:45.307758Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-30T09:18:45.307783Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:45.307812Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:708:2582] 2025-06-30T09:18:45.307817Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:18:45.307821Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-30T09:18:45.307825Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:18:45.307967Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:45.308006Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:45.308026Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:45.308033Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-30T09:18:45.308047Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:45.308060Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:709:2583] 2025-06-30T09:18:45.308065Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-30T09:18:45.308070Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-30T09:18:45.308074Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:18:45.308130Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:45.308141Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.308154Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:45.308161Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:45.308170Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-30T09:18:45.308183Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-30T09:18:45.308202Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:641:2545], serverId# [1:665:2556], sessionId# [0:0:0] 2025-06-30T09:18:45.308210Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:18:45.308218Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:45.308222Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-30T09:18:45.308228Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:18:45.308427Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:45.308504Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:45.308531Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:45.308659Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server ... d__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-30T09:18:49.667166Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:18:49.667218Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715661 2025-06-30T09:18:49.667236Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037890 source 72075186224037890 dest 72075186224037888 consumer 72075186224037888 txId 281474976715661 2025-06-30T09:18:49.667257Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:784: [DistEraser] [3:1052:2790] HandlePropose TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037888, status# 1 2025-06-30T09:18:49.667283Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-30T09:18:49.667304Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:784: [DistEraser] [3:1052:2790] HandlePropose TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037889, status# 1 2025-06-30T09:18:49.667321Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-30T09:18:49.667328Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037890 2025-06-30T09:18:49.667345Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715661 2025-06-30T09:18:49.667353Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715661 2025-06-30T09:18:49.667360Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:784: [DistEraser] [3:1052:2790] HandlePropose TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037890, status# 1 2025-06-30T09:18:49.667367Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:901: [DistEraser] [3:1052:2790] Register plan: txId# 281474976715662, minStep# 1527, maxStep# 31527 2025-06-30T09:18:49.680829Z node 3 :TX_DATASHARD INFO: datashard.cpp:190: OnDetach: 72075186224037888 2025-06-30T09:18:49.680897Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-30T09:18:49.681683Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037888 from 72075186224037889 is reset 2025-06-30T09:18:49.681705Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037888 from 72075186224037890 is reset 2025-06-30T09:18:49.681763Z node 3 :TX_DATASHARD ERROR: datashard_distributed_erase.cpp:167: [DistEraser] [3:1052:2790] Reply: txId# 281474976715662, status# SHARD_UNKNOWN, error# Tx state unknown: reason# lost pipe while waiting for reply (plan), txId# 281474976715662, shard# 72075186224037888 2025-06-30T09:18:49.681994Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-06-30T09:18:49.682008Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037889 2025-06-30T09:18:49.682056Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:18:49.682067Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:49.682079Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 1 2025-06-30T09:18:49.682090Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:18:49.682180Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [3:1047:2786], serverId# [3:1048:2787], sessionId# [0:0:0] 2025-06-30T09:18:49.697407Z node 3 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [3:1064:2801] 2025-06-30T09:18:49.697493Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:49.698060Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:49.698519Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:49.698876Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:49.698893Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:49.698903Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:49.698987Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:49.699099Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:49.699111Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [3:1079:2801] in generation 2 2025-06-30T09:18:49.711071Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:49.711136Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037888 2025-06-30T09:18:49.711177Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:18:49.711267Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [3:1082:2809] 2025-06-30T09:18:49.711276Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:49.711284Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:18:49.711292Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:49.711368Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:711: TxInitSchemaDefaults.Execute 2025-06-30T09:18:49.711404Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:723: TxInitSchemaDefaults.Complete 2025-06-30T09:18:49.711787Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:49.711826Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:49.711851Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1526 2025-06-30T09:18:49.711860Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:49.711917Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:49.711928Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:18:49.711941Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 1 2025-06-30T09:18:49.711947Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:49.711975Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:18:49.711999Z node 3 :TX_DATASHARD DEBUG: datashard__progress_resend_rs.cpp:14: Start TTxProgressResendRS at tablet 72075186224037888 2025-06-30T09:18:49.712008Z node 3 :TX_DATASHARD INFO: datashard.cpp:4101: Resend RS at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715661 2025-06-30T09:18:49.712020Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 1 at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715661 2025-06-30T09:18:49.712073Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715661 2025-06-30T09:18:49.712092Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 1526 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-30T09:18:49.712101Z node 3 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 1526:281474976715661 at 72075186224037889 2025-06-30T09:18:49.712112Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-30T09:18:49.712120Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Ack at 72075186224037889 {TEvReadSet step# 1526 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-30T09:18:49.712138Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 1500 next step 1526 2025-06-30T09:18:49.712159Z node 3 :TX_DATASHARD DEBUG: datashard__progress_resend_rs.cpp:14: Start TTxProgressResendRS at tablet 72075186224037888 2025-06-30T09:18:49.712165Z node 3 :TX_DATASHARD INFO: datashard.cpp:4101: Resend RS at 72075186224037888 from 72075186224037888 to 72075186224037890 txId 281474976715661 2025-06-30T09:18:49.712170Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 2 at 72075186224037888 from 72075186224037888 to 72075186224037890 txId 281474976715661 2025-06-30T09:18:49.712217Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715661 2025-06-30T09:18:49.712235Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715661 2025-06-30T09:18:49.712246Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 1526 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-06-30T09:18:49.712252Z node 3 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 1526:281474976715661 at 72075186224037890 2025-06-30T09:18:49.712260Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-30T09:18:49.712266Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Ack at 72075186224037890 {TEvReadSet step# 1526 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-06-30T09:18:49.712280Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715661 |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueNewSchemeCacheTest::CheckGrpcReadNoDC [GOOD] Test command err: 2025-06-30T09:18:35.356502Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669345963476138:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:35.356597Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:35.356112Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669348567105036:2155];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004029/r3tmp/tmpFv4PmJ/pdisk_1.dat 2025-06-30T09:18:35.437996Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:35.439166Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:35.441285Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:35.471464Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:35.494987Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 11139, node 1 2025-06-30T09:18:35.501712Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004029/r3tmp/yandexegKInh.tmp 2025-06-30T09:18:35.501726Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004029/r3tmp/yandexegKInh.tmp 2025-06-30T09:18:35.501800Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004029/r3tmp/yandexegKInh.tmp 2025-06-30T09:18:35.501852Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:35.522928Z INFO: TTestServer started on Port 20466 GrpcPort 11139 2025-06-30T09:18:35.536858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:35.536883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:35.539544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:35.544641Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:35.544671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:35.546001Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:35.546477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20466 PQClient connected to localhost:11139 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:35.593948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:18:35.820383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-30T09:18:36.180523Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669352862073187:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.180563Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.180758Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669352862073222:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.181624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:36.185098Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669352862073256:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.185129Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.189852Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669352862073224:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:18:36.286958Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669352862073298:2731] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:36.315993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:36.355198Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669352862073309:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:36.356025Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:36.356164Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZGJmYzgzMDUtOTA0OTljNmMtYzIzOGI2YTEtOTllOTdiZDg=, ActorId: [1:7521669352862073183:2295], ActorState: ExecuteState, TraceId: 01jz0235mgcerqvma9nn4et4x4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:36.356682Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:36.359312Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:36.373425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:36.414073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-30T09:18:36.486801Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jz0235xj96mhndvq914bkewn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTc0NzRjNjAtZmMzOWZkNWItZWQ4ZDA5YTctZjM2OWJiZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7521669352862073752:3049] 2025-06-30T09:18:40.347361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:7 ... 2:5:0:0 null:account2/topic2:1:4:0:0 null:account2/topic2:3:3:0:0 null:account2/topic2:4:2:0:0 null:account2/topic2:0:1:3:0 2025-06-30T09:18:49.448118Z :INFO: [/Root] [/Root] [e0c27940-b3b5bbc-a201127d-af027cee] Counters: { Errors: 0 CurrentSessionLifetimeMs: 16 BytesRead: 40 MessagesRead: 4 BytesReadCompressed: 92 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:49.448136Z :NOTICE: [/Root] [/Root] [e0c27940-b3b5bbc-a201127d-af027cee] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:18:49.448147Z :DEBUG: [/Root] [/Root] [e0c27940-b3b5bbc-a201127d-af027cee] [null] Abort session to cluster 2025-06-30T09:18:49.448266Z :NOTICE: [/Root] [/Root] [e0c27940-b3b5bbc-a201127d-af027cee] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:49.448396Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer user1 session user1_3_2_17193206821482508717_v1 grpc read done: success# 0, data# { } 2025-06-30T09:18:49.448401Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer user1 session user1_3_2_17193206821482508717_v1 grpc read failed 2025-06-30T09:18:49.448404Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer user1 session user1_3_2_17193206821482508717_v1 grpc closed 2025-06-30T09:18:49.448416Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer user1 session user1_3_2_17193206821482508717_v1 is DEAD 2025-06-30T09:18:49.448500Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037904] Destroy direct read session user1_3_2_17193206821482508717_v1 2025-06-30T09:18:49.448511Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037904] server disconnected, pipe [3:7521669406845562998:2542] destroyed 2025-06-30T09:18:49.448516Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037903] Destroy direct read session user1_3_2_17193206821482508717_v1 2025-06-30T09:18:49.448520Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037903] server disconnected, pipe [3:7521669406845562991:2539] destroyed 2025-06-30T09:18:49.448522Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037903] Destroy direct read session user1_3_2_17193206821482508717_v1 2025-06-30T09:18:49.448525Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037903] server disconnected, pipe [3:7521669406845562988:2538] destroyed 2025-06-30T09:18:49.448537Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_17193206821482508717_v1 2025-06-30T09:18:49.448543Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_17193206821482508717_v1 2025-06-30T09:18:49.448544Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_17193206821482508717_v1 2025-06-30T09:18:49.448534Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037902] Destroy direct read session user1_3_2_17193206821482508717_v1 2025-06-30T09:18:49.448552Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037902] server disconnected, pipe [3:7521669406845562992:2540] destroyed 2025-06-30T09:18:49.448556Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037902] Destroy direct read session user1_3_2_17193206821482508717_v1 2025-06-30T09:18:49.448558Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037902] server disconnected, pipe [3:7521669406845562995:2541] destroyed 2025-06-30T09:18:49.448570Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_17193206821482508717_v1 2025-06-30T09:18:49.448576Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_17193206821482508717_v1 2025-06-30T09:18:49.449062Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037905][topic2] pipe [3:7521669406845562986:2535] disconnected; active server actors: 1 2025-06-30T09:18:49.449070Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037905][topic2] pipe [3:7521669406845562986:2535] client user1 disconnected session user1_3_2_17193206821482508717_v1 2025-06-30T09:18:49.460060Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7521669375908631396:2104], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:49.460103Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7521669375908631396:2104], cacheItem# { Subscriber: { Subscriber: [4:7521669380203598808:2155] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:18:49.460141Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7521669405973403276:2632], recipient# [4:7521669405973403275:2328], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:49.687363Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521669376780788484:2151], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:49.687420Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7521669376780788484:2151], cacheItem# { Subscriber: { Subscriber: [3:7521669381075756217:2447] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 29 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1751275123073 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:18:49.687534Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521669406845563015:4704], recipient# [3:7521669376780788225:2076], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:18:49.839054Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521669376780788484:2151], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:49.839110Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7521669376780788484:2151], cacheItem# { Subscriber: { Subscriber: [3:7521669381075757060:3043] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:18:49.839135Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521669406845563020:4708], recipient# [3:7521669406845563019:2544], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:49.867008Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521669376780788484:2151], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:18:49.867065Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7521669376780788484:2151], cacheItem# { Subscriber: { Subscriber: [3:7521669381075757060:3043] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:18:49.867091Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521669406845563022:4709], recipient# [3:7521669406845563021:2545], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TNebiusAccessServiceTest::Authenticate |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::Authenticate [GOOD] >> HugeBlobOnlineSizeChange::Compaction |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::AlterColumnTable_Negative [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:48.222354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:48.222381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:48.222387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:48.222392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:48.222402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:48.222408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:48.222418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:48.222434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:48.222541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:48.222613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:48.236827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:48.236849Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:48.240298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:48.240360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:48.240402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:48.242388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:48.242513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:48.242641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:48.242723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:48.243905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:48.243983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:48.244298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:48.244313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:48.244346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:48.244356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:48.244362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:48.244381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.246707Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:48.269282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:48.269366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.269430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:48.269438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:48.269480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:48.269494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:48.271162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:48.271211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:48.271269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.271279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:48.271286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:48.271293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:48.275245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.275269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:48.275280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:48.278686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.278711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.278719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:48.278729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:48.279635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:48.283196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:48.283275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:48.283548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:48.283587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:48.283599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:48.283697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:48.283709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:48.283745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:48.283759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:48.286967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:48.286982Z node 1 :FLAT_TX_SCHEMESHARD ... 0T09:18:50.271175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:50.271195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:50.271213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:50.271232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:50.271246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:50.271264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:50.271922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:50.271953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:50.271973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:50.272015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:18:50.272026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-30T09:18:50.272054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:18:50.272059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:18:50.272066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:18:50.272070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:18:50.272076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-30T09:18:50.272117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:2667:3866] message: TxId: 101 2025-06-30T09:18:50.272125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:18:50.272150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-30T09:18:50.272156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 101:0 2025-06-30T09:18:50.272447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-06-30T09:18:50.273511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-30T09:18:50.273530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:2668:3867] TestWaitNotification: OK eventTxId 101 2025-06-30T09:18:50.273736Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:50.273833Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 108us result status StatusSuccess 2025-06-30T09:18:50.274080Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 64 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "TTLEnabledTable" Schema { Columns { Id: 1 Name: "key" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "modified_at" Type: "Timestamp" TypeId: 50 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 3 Name: "str" Type: "String" TypeId: 4097 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "key" NextColumnId: 4 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } ColumnShardCount: 64 Sharding { ColumnShards: 72075186233409546 ColumnShards: 72075186233409547 ColumnShards: 72075186233409548 ColumnShards: 72075186233409549 ColumnShards: 72075186233409550 ColumnShards: 72075186233409551 ColumnShards: 72075186233409552 ColumnShards: 72075186233409553 ColumnShards: 72075186233409554 ColumnShards: 72075186233409555 ColumnShards: 72075186233409556 ColumnShards: 72075186233409557 ColumnShards: 72075186233409558 ColumnShards: 72075186233409559 ColumnShards: 72075186233409560 ColumnShards: 72075186233409561 ColumnShards: 72075186233409562 ColumnShards: 72075186233409563 ColumnShards: 72075186233409564 ColumnShards: 72075186233409565 ColumnShards: 72075186233409566 ColumnShards: 72075186233409567 ColumnShards: 72075186233409568 ColumnShards: 72075186233409569 ColumnShards: 72075186233409570 ColumnShards: 72075186233409571 ColumnShards: 72075186233409572 ColumnShards: 72075186233409573 ColumnShards: 72075186233409574 ColumnShards: 72075186233409575 ColumnShards: 72075186233409576 ColumnShards: 72075186233409577 ColumnShards: 72075186233409578 ColumnShards: 72075186233409579 ColumnShards: 72075186233409580 ColumnShards: 72075186233409581 ColumnShards: 72075186233409582 ColumnShards: 72075186233409583 ColumnShards: 72075186233409584 ColumnShards: 72075186233409585 ColumnShards: 72075186233409586 ColumnShards: 72075186233409587 ColumnShards: 72075186233409588 ColumnShards: 72075186233409589 ColumnShards: 72075186233409590 ColumnShards: 72075186233409591 ColumnShards: 72075186233409592 ColumnShards: 72075186233409593 ColumnShards: 72075186233409594 ColumnShards: 72075186233409595 ColumnShards: 72075186233409596 ColumnShards: 72075186233409597 ColumnShards: 72075186233409598 ColumnShards: 72075186233409599 ColumnShards: 72075186233409600 ColumnShards: 72075186233409601 ColumnShards: 72075186233409602 ColumnShards: 72075186233409603 ColumnShards: 72075186233409604 ColumnShards: 72075186233409605 ColumnShards: 72075186233409606 ColumnShards: 72075186233409607 ColumnShards: 72075186233409608 ColumnShards: 72075186233409609 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "key" } } StorageConfig { DataChannelCount: 64 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR W0000 00:00:1751275130.274229 127839 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TAlterColumnTable: 6:35: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 102 2025-06-30T09:18:50.275235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnTable AlterColumnTable { Name: "TTLEnabledTable" AlterTtlSettings { Enabled { ColumnName: "str" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:50.275294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: alter_table.cpp:282: TAlterColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:18:50.275385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: ttl update error: Unsupported column type. in alter constructor STANDALONE_UPDATE, at schemeshard: 72057594046678944 2025-06-30T09:18:50.276200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "ttl update error: Unsupported column type. in alter constructor STANDALONE_UPDATE" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:50.276271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: ttl update error: Unsupported column type. in alter constructor STANDALONE_UPDATE, operation: ALTER COLUMN TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 102, wait until txId: 102 |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest >> TNebiusAccessServiceTest::Authorize [GOOD] |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_stats/unittest |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-NoDbAdmin-dbadmin [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-DbAdmin-dbadmin |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::Authenticate [GOOD] Test command err: 2025-06-30T09:18:51.057864Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [335bbfbfa5f0] Connect to grpc://localhost:26556 2025-06-30T09:18:51.058642Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [335bbfbfa5f0] Request AuthenticateRequest { iam_token: "**** (3C4833B6)" } 2025-06-30T09:18:51.066870Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [335bbfbfa5f0] Status 7 Permission Denied 2025-06-30T09:18:51.067108Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [335bbfbfa5f0] Request AuthenticateRequest { iam_token: "**** (86DDB286)" } 2025-06-30T09:18:51.072948Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [335bbfbfa5f0] Response AuthenticateResponse { account { user_account { id: "1234" } } } |79.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile >> THiveTest::TestHiveBalancerHighUsageAndColumnShards [GOOD] >> THiveTest::TestHiveBalancerOneTabletHighUsage >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex [GOOD] >> KqpPg::SelectIndex-useSink [GOOD] >> KqpPg::TableDeleteAllData+useSink |79.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |79.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest |79.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::Authorize [GOOD] Test command err: 2025-06-30T09:18:51.433471Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [70c73fbfa5f0] Connect to grpc://localhost:26412 2025-06-30T09:18:51.435019Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70c73fbfa5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "path_id" } } iam_token: "**** (717F937C)" } } } 2025-06-30T09:18:51.437241Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [70c73fbfa5f0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user_id" } } } } } 2025-06-30T09:18:51.437430Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70c73fbfa5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "path_id" } } iam_token: "**** (79225CA9)" } } } 2025-06-30T09:18:51.438029Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [70c73fbfa5f0] Status 7 Permission Denied 2025-06-30T09:18:51.438131Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70c73fbfa5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "denied" } resource_path { path { id: "path_id" } } iam_token: "**** (717F937C)" } } } 2025-06-30T09:18:51.438418Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [70c73fbfa5f0] Status 7 Permission Denied 2025-06-30T09:18:51.438500Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70c73fbfa5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "p" } } iam_token: "**** (717F937C)" } } } 2025-06-30T09:18:51.438808Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [70c73fbfa5f0] Status 7 Permission Denied |79.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest >> TopicAutoscaling::ControlPlane_BackCompatibility ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex [GOOD] Test command err: 2025-06-30T09:18:43.857221Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:43.857310Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:43.857329Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0048dc/r3tmp/tmp5zjFvW/pdisk_1.dat 2025-06-30T09:18:43.975101Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:43.976108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:44.004845Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:44.006177Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275123332538 != 1751275123332542 2025-06-30T09:18:44.054335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:44.054383Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:44.067410Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:44.149385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:44.185248Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:652:2550] 2025-06-30T09:18:44.185346Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:44.225526Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:44.225621Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:44.225826Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:44.225839Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:44.225846Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:44.225921Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:44.226103Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:654:2552] 2025-06-30T09:18:44.226158Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:44.228621Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:44.228654Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:691:2550] in generation 1 2025-06-30T09:18:44.228891Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:44.228927Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:44.229091Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-30T09:18:44.229102Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-30T09:18:44.229109Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-30T09:18:44.229158Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:44.229208Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:44.229217Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:695:2552] in generation 1 2025-06-30T09:18:44.229608Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:660:2554] 2025-06-30T09:18:44.230432Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:44.233006Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:44.233047Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:44.233212Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-30T09:18:44.233222Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-30T09:18:44.233230Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-30T09:18:44.233282Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:44.233309Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:44.233323Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:703:2554] in generation 1 2025-06-30T09:18:44.243876Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:44.259861Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:44.259992Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:44.260043Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:707:2581] 2025-06-30T09:18:44.260050Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:44.260057Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:44.260064Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:44.260235Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:44.260247Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-30T09:18:44.260259Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:44.260269Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:708:2582] 2025-06-30T09:18:44.260274Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:18:44.260278Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-30T09:18:44.260281Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:18:44.260419Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:44.260453Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:44.260472Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:44.260479Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-30T09:18:44.260492Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:44.260501Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:709:2583] 2025-06-30T09:18:44.260505Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-30T09:18:44.260510Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-30T09:18:44.260513Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:18:44.260570Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:44.260579Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:44.260591Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:44.260598Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:44.260605Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-30T09:18:44.260617Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-30T09:18:44.260634Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:641:2545], serverId# [1:665:2556], sessionId# [0:0:0] 2025-06-30T09:18:44.260643Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:18:44.260647Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:44.260652Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-30T09:18:44.260657Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:18:44.260842Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:44.260915Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:44.260943Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:44.261073Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server ... datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037891 step# 2500} 2025-06-30T09:18:51.568282Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-06-30T09:18:51.568294Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 2 at 72075186224037891 from 72075186224037891 to 72075186224037893 txId 281474976715666 2025-06-30T09:18:51.568305Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-06-30T09:18:51.568328Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2500 : 281474976715666] from 72075186224037891 at tablet 72075186224037891 send result to client [3:1375:3006], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:18:51.568351Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037891, records: { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 11] SchemaVersion: 1 }, { Order: 5 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 11] SchemaVersion: 1 }, { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 11] SchemaVersion: 1 } 2025-06-30T09:18:51.568371Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-30T09:18:51.568479Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1375:3006] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715666, shard# 72075186224037891, status# 2 2025-06-30T09:18:51.568504Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037893 source 72075186224037891 dest 72075186224037893 producer 72075186224037891 txId 281474976715666 2025-06-30T09:18:51.568523Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037893 got read set: {TEvReadSet step# 2500 txid# 281474976715666 TabletSource# 72075186224037891 TabletDest# 72075186224037893 SetTabletProducer# 72075186224037891 ReadSet.Size()# 19 Seqno# 2 Flags# 0} 2025-06-30T09:18:51.568552Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037893 2025-06-30T09:18:51.568656Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037891 2025-06-30T09:18:51.568730Z node 3 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 3 change records: to# [3:1175:2888], at tablet# 72075186224037891 2025-06-30T09:18:51.568739Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 3, forgotten# 0, left# 0, at tablet# 72075186224037891 2025-06-30T09:18:51.568762Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-30T09:18:51.568770Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:18:51.568784Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [2500:281474976715666] at 72075186224037893 for LoadAndWaitInRS 2025-06-30T09:18:51.568898Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:51.569034Z node 3 :TX_DATASHARD DEBUG: datashard_change_receiving.cpp:470: Handle TEvChangeExchange::TEvApplyRecords: origin# 72075186224037891, generation# 1, at tablet# 72075186224037892 2025-06-30T09:18:51.580288Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-30T09:18:51.580340Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2500 : 281474976715666] from 72075186224037893 at tablet 72075186224037893 send result to client [3:1375:3006], exec latency: 0 ms, propose latency: 1 ms 2025-06-30T09:18:51.580359Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037893 {TEvReadSet step# 2500 txid# 281474976715666 TabletSource# 72075186224037891 TabletDest# 72075186224037893 SetTabletConsumer# 72075186224037893 Flags# 0 Seqno# 2} 2025-06-30T09:18:51.580368Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-30T09:18:51.580404Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1375:3006] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715666, shard# 72075186224037893, status# 2 2025-06-30T09:18:51.580415Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:165: [DistEraser] [3:1375:3006] Reply: txId# 281474976715666, status# OK, error# 2025-06-30T09:18:51.580444Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037891 source 72075186224037891 dest 72075186224037893 consumer 72075186224037893 txId 281474976715666 2025-06-30T09:18:51.580537Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037891 2025-06-30T09:18:51.580546Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037891 2025-06-30T09:18:51.580607Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 3, at tablet# 72075186224037891 2025-06-30T09:18:51.580613Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 4, at tablet: 72075186224037891 2025-06-30T09:18:51.580645Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 5, at tablet: 72075186224037891 2025-06-30T09:18:51.580651Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 6, at tablet: 72075186224037891 2025-06-30T09:18:51.580700Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [3:1370:3002], serverId# [3:1371:3003], sessionId# [0:0:0] 2025-06-30T09:18:51.580714Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2025-06-30T09:18:51.580723Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:51.580732Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-30T09:18:51.581047Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037893 2025-06-30T09:18:51.581158Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037893 2025-06-30T09:18:51.581205Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-30T09:18:51.581211Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:51.581223Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715667] at 72075186224037893 for WaitForStreamClearance 2025-06-30T09:18:51.581271Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:51.581282Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-30T09:18:51.581430Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037893, TxId: 281474976715667, MessageQuota: 1 2025-06-30T09:18:51.581464Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037893, TxId: 281474976715667, MessageQuota: 1 2025-06-30T09:18:51.581499Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037893 2025-06-30T09:18:51.581504Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715667, at: 72075186224037893 2025-06-30T09:18:51.581546Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-30T09:18:51.581551Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:51.581558Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715667] at 72075186224037893 for ReadTableScan 2025-06-30T09:18:51.581588Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:51.581597Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-30T09:18:51.581606Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-30T09:18:51.581871Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037892 2025-06-30T09:18:51.581927Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037892 2025-06-30T09:18:51.581977Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-30T09:18:51.581983Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:51.581990Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715668] at 72075186224037892 for WaitForStreamClearance 2025-06-30T09:18:51.582020Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:51.582029Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-30T09:18:51.582142Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037892, TxId: 281474976715668, MessageQuota: 1 2025-06-30T09:18:51.582160Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037892, TxId: 281474976715668, MessageQuota: 1 2025-06-30T09:18:51.582184Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037892 2025-06-30T09:18:51.582189Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715668, at: 72075186224037892 2025-06-30T09:18:51.582214Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-30T09:18:51.582219Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:18:51.582228Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715668] at 72075186224037892 for ReadTableScan 2025-06-30T09:18:51.582247Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:51.582254Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-30T09:18:51.582260Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 |79.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-DbAdmin-dbadmin [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-NoDbAdmin-clusteradmin >> CommitOffset::Commit_WithoutSession_TopPast >> TopicAutoscaling::ReadingAfterSplitTest_BeforeAutoscaleAwareSDK >> CommitOffset::PartitionSplit_OffsetCommit >> BasicUsage::CloseWriteSessionImmediately [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_BeforeAutoscaleAwareSDK >> TResourceBrokerInstant::Test >> TBlockBlobStorageTest::DelayedErrorsNotIgnored >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_BeforeAutoscaleAwareSDK >> TCdcStreamTests::MeteringServerless [GOOD] >> TCdcStreamTests::MeteringDedicated >> TFlatMetrics::MaximumValue3 [GOOD] >> TFlatMetrics::MaximumValue4 [GOOD] |79.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf >> TResourceBroker::TestQueueWithConfigure >> TResourceBrokerInstant::Test [GOOD] >> TResourceBrokerInstant::TestErrors >> TTabletPipeTest::TestRebootUsingTabletWithoutAcceptor |79.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |79.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf >> TTabletLabeledCountersAggregator::Version3Aggregation >> TPipeTrackerTest::TestShareTablet [GOOD] >> TPipeTrackerTest::TestIdempotentAttachDetach [GOOD] |79.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TFlatMetrics::MaximumValue4 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::CloseWriteSessionImmediately [GOOD] Test command err: 2025-06-30T09:18:36.772559Z :BasicWriteSession INFO: Random seed for debugging is 1751275116772551 2025-06-30T09:18:36.876502Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669352769417584:2084];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:36.876994Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:36.880938Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669351674474653:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:36.880971Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d55/r3tmp/tmpAxxXqG/pdisk_1.dat 2025-06-30T09:18:36.946051Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:36.946640Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:36.975784Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:36.980978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:36.981015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:36.987576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21802, node 1 2025-06-30T09:18:37.015608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002d55/r3tmp/yandexSAwR40.tmp 2025-06-30T09:18:37.015630Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002d55/r3tmp/yandexSAwR40.tmp 2025-06-30T09:18:37.015729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002d55/r3tmp/yandexSAwR40.tmp 2025-06-30T09:18:37.015790Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:37.023422Z INFO: TTestServer started on Port 16059 GrpcPort 21802 TClient is connected to server localhost:16059 PQClient connected to localhost:21802 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:18:37.042707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:37.042770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-30T09:18:37.051197Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:37.051636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:37.067747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:37.075495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-30T09:18:37.415660Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669355969442128:2268], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.415702Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.415964Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669355969442153:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.418268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:37.425096Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669355969442157:2272], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-30T09:18:37.546921Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669355969442185:2127] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:37.552955Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669355969442201:2276], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:37.553589Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=N2Y0N2YxZjMtYTQ1YjEyMGEtOTA0OGI3Y2MtOWQ5YjljNzk=, ActorId: [2:7521669355969442126:2267], ActorState: ExecuteState, TraceId: 01jz0236v675szazxdmj11akws, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:37.552991Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669357064385796:2302], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:37.553390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.553636Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NzgzNTg4MjctYzA2ZWFlNjAtMzA1MGUzNy03MTdhZmJlYQ==, ActorId: [1:7521669357064385762:2295], ActorState: ExecuteState, TraceId: 01jz0236xm484rkt8e72r91yb5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:37.554084Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:37.554100Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:37.635497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.670069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:21802", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, false, 1000); 2025-06-30T09:18:37.777026Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jz02375r5tm0k56cq5m3089m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWZiNjUxM2QtY ... 1--test-topic" name rt3.dc1--test-topic version1 CallPersQueueGRPC request to localhost:32673 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--test-topic" } } 2025-06-30T09:18:51.143130Z node 3 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--test-topic, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC CallPersQueueGRPC request to localhost:32673 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--test-topic" } } 2025-06-30T09:18:51.650987Z node 3 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--test-topic, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC CallPersQueueGRPC request to localhost:32673 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--test-topic" } } 2025-06-30T09:18:52.159517Z node 3 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 1 ErrorCode: OK MetaResponse { CmdGetTopicMetadataResult { TopicInfo { Topic: "rt3.dc1--test-topic" NumPartitions: 1 Config { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } Version: 1 LocalDC: true Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } TopicPath: "/Root/PQ/rt3.dc1--test-topic" YdbDatabasePath: "/Root" Consumers { Name: "user" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } Version: 0 Important: false } } ErrorCode: OK } } } === Topic created, have version: 1 2025-06-30T09:18:52.170926Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-30T09:18:52.171266Z :INFO: [] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-30T09:18:52.171274Z :INFO: [] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:32673 2025-06-30T09:18:52.172468Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-30T09:18:52.173691Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-30T09:18:52.173715Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-30T09:18:52.174038Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-30T09:18:52.174082Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:38446 2025-06-30T09:18:52.174086Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:38446 proto=v1 topic=test-topic durationSec=0 2025-06-30T09:18:52.174092Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:18:52.174994Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-30T09:18:52.175039Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-30T09:18:52.175041Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-30T09:18:52.175042Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-30T09:18:52.175047Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7521669422234584052:2448] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-30T09:18:52.175619Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7521669422234584052:2448] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-30T09:18:52.211121Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [3:7521669422234584052:2448] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-30T09:18:52.211624Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669422234584089:2448] connected; active server actors: 1 2025-06-30T09:18:52.211703Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [3:7521669422234584052:2448] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-30T09:18:52.211710Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7521669422234584052:2448] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-30T09:18:52.215547Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669422234584089:2448] disconnected; active server actors: 1 2025-06-30T09:18:52.215560Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669422234584089:2448] disconnected no session 2025-06-30T09:18:52.229400Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7521669422234584052:2448] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-30T09:18:52.229416Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7521669422234584052:2448] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-30T09:18:52.229419Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7521669422234584052:2448] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-30T09:18:52.229428Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-30T09:18:52.230151Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [3:7521669422234584111:2448], now have 1 active actors on pipe 2025-06-30T09:18:52.230269Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:821: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2025-06-30T09:18:52.230388Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:18:52.230398Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:18:52.230433Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|61d16cdc-d235a3ae-1241f842-e0ff2ae_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-30T09:18:52.230469Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-30T09:18:52.230494Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:52.230755Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:18:52.230761Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:18:52.230779Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:52.230973Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|61d16cdc-d235a3ae-1241f842-e0ff2ae_0 2025-06-30T09:18:52.238853Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1751275132238 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:52.238911Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|61d16cdc-d235a3ae-1241f842-e0ff2ae_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-30T09:18:52.239131Z :INFO: [] MessageGroupId [src] SessionId [src|61d16cdc-d235a3ae-1241f842-e0ff2ae_0] Write session: close. Timeout = 0 ms 2025-06-30T09:18:52.239137Z :INFO: [] MessageGroupId [src] SessionId [src|61d16cdc-d235a3ae-1241f842-e0ff2ae_0] Write session will now close 2025-06-30T09:18:52.239142Z :DEBUG: [] MessageGroupId [src] SessionId [src|61d16cdc-d235a3ae-1241f842-e0ff2ae_0] Write session: aborting 2025-06-30T09:18:52.239258Z :INFO: [] MessageGroupId [src] SessionId [src|61d16cdc-d235a3ae-1241f842-e0ff2ae_0] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:52.239264Z :DEBUG: [] MessageGroupId [src] SessionId [src|61d16cdc-d235a3ae-1241f842-e0ff2ae_0] Write session: destroy 2025-06-30T09:18:52.239509Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|61d16cdc-d235a3ae-1241f842-e0ff2ae_0 grpc read done: success: 0 data: 2025-06-30T09:18:52.239522Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|61d16cdc-d235a3ae-1241f842-e0ff2ae_0 grpc read failed 2025-06-30T09:18:52.239532Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|61d16cdc-d235a3ae-1241f842-e0ff2ae_0 grpc closed 2025-06-30T09:18:52.239537Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|61d16cdc-d235a3ae-1241f842-e0ff2ae_0 is DEAD 2025-06-30T09:18:52.239728Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:52.240095Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7521669422234584111:2448] destroyed 2025-06-30T09:18:52.240133Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. Session was created >> TResourceBroker::TestQueueWithConfigure [GOOD] >> TResourceBroker::TestOverusageDifferentResources >> TResourceBrokerInstant::TestErrors [GOOD] >> TTabletPipeTest::TestRebootUsingTabletWithoutAcceptor [GOOD] >> TTabletLabeledCountersAggregator::Version3Aggregation [GOOD] >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen >> BasicUsage::FallbackToSingleDbAfterBadRequest [GOOD] >> TResourceBroker::TestOverusageDifferentResources [GOOD] >> TTabletPipeTest::TestOpen >> TTabletPipeTest::TestSendAfterOpenUsingTabletWithoutAcceptor >> TResourceBroker::TestResubmitTask |79.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TPipeTrackerTest::TestIdempotentAttachDetach [GOOD] >> TTabletPipeTest::TestTwoNodesAndRebootOfConsumer >> TTabletPipeTest::TestOpen [GOOD] >> TTabletPipeTest::TestSendAfterOpenUsingTabletWithoutAcceptor [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestRebootUsingTabletWithoutAcceptor [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:112:2057] recipient: [1:108:2139] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:112:2057] recipient: [1:108:2139] Leader for TabletID 9437185 is [0:0:0] sender: [1:113:2057] recipient: [1:109:2140] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [1:113:2057] recipient: [1:109:2140] Leader for TabletID 9437184 is [1:120:2147] sender: [1:121:2057] recipient: [1:108:2139] Leader for TabletID 9437185 is [1:124:2149] sender: [1:125:2057] recipient: [1:109:2140] Leader for TabletID 9437184 is [1:120:2147] sender: [1:160:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:124:2149] sender: [1:162:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:124:2149] sender: [1:164:2057] recipient: [1:105:2138] Leader for TabletID 9437185 is [1:124:2149] sender: [1:167:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:124:2149] sender: [1:169:2057] recipient: [1:168:2178] Leader for TabletID 9437185 is [1:170:2179] sender: [1:171:2057] recipient: [1:168:2178] Leader for TabletID 9437185 is [1:170:2179] sender: [1:199:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:120:2147] sender: [1:202:2057] recipient: [1:104:2137] Leader for TabletID 9437184 is [1:120:2147] sender: [1:205:2057] recipient: [1:204:2202] Leader for TabletID 9437184 is [1:120:2147] sender: [1:206:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:207:2203] sender: [1:208:2057] recipient: [1:204:2202] Leader for TabletID 9437184 is [1:207:2203] sender: [1:237:2057] recipient: [1:14:2061] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TResourceBrokerInstant::TestErrors [GOOD] Test command err: 2025-06-30T09:18:53.147969Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-30T09:18:53.148082Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [1:103:2136]) priority=0 resources={100, 100} 2025-06-30T09:18:53.148090Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.148098Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {100, 100} for task task-1 (1 by [1:103:2136]) from queue queue_compaction0 2025-06-30T09:18:53.148103Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.148112Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 200.000000 (insert task task-1 (1 by [1:103:2136])) 2025-06-30T09:18:53.148127Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task task-1 (1 by [1:103:2136]) (priority=0 type=compaction0 resources={80, 70} resubmit=0) 2025-06-30T09:18:53.148131Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.148138Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 160.000000 (insert task task-1 (1 by [1:103:2136])) 2025-06-30T09:18:53.148147Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [1:103:2136]) (release resources {80, 70}) 2025-06-30T09:18:53.148152Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction0 from 160.000000 to 0.000000 (remove task task-1 (1 by [1:103:2136])) 2025-06-30T09:18:53.483439Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-30T09:18:53.483601Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [2:103:2136]) priority=0 resources={100, 100} 2025-06-30T09:18:53.483615Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.483627Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {100, 100} for task task-1 (1 by [2:103:2136]) from queue queue_compaction0 2025-06-30T09:18:53.483633Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.483649Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 200.000000 (insert task task-1 (1 by [2:103:2136])) 2025-06-30T09:18:53.483668Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [2:103:2136]) priority=0 resources={100500, 100500} 2025-06-30T09:18:53.483675Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:408: SubmitTask failed for task 1 to [2:103:2136]: task with the same ID has been already submitted 2025-06-30T09:18:53.483692Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:515: FinishTask failed for task 2 to [2:103:2136]: cannot finish unknown task 2025-06-30T09:18:53.483698Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:1080: FinishTaskInstant failed for task 2: cannot finish unknown task >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen [GOOD] >> TTabletPipeTest::TestSendWithoutWaitOpen >> TResourceBroker::TestResubmitTask [GOOD] >> TResourceBroker::TestUpdateCookie ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::FallbackToSingleDbAfterBadRequest [GOOD] Test command err: 2025-06-30T09:18:36.910877Z :FallbackToSingleDb INFO: Random seed for debugging is 1751275116910869 2025-06-30T09:18:37.078997Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669355072912862:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:37.079057Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:37.082873Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669356914168162:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:37.082903Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d7b/r3tmp/tmp5t5PI2/pdisk_1.dat 2025-06-30T09:18:37.129773Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:37.153891Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:37.205992Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23892, node 1 2025-06-30T09:18:37.235753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:37.235806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:37.243556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:37.243585Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:37.247423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:37.248079Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:37.248907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:37.253732Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002d7b/r3tmp/yandex7bmJuQ.tmp 2025-06-30T09:18:37.253745Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002d7b/r3tmp/yandex7bmJuQ.tmp 2025-06-30T09:18:37.253834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002d7b/r3tmp/yandex7bmJuQ.tmp 2025-06-30T09:18:37.253885Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:37.261494Z INFO: TTestServer started on Port 6083 GrpcPort 23892 TClient is connected to server localhost:6083 PQClient connected to localhost:23892 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:37.306577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-30T09:18:37.618509Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669355072913720:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.618555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669355072913733:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.618564Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.619602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:37.620176Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669355072913765:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.620199Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.626877Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669355072913735:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-30T09:18:37.666887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.669930Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669356914168445:2273], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:37.670650Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=N2NkN2E2MGEtZjg2ZDdjYjgtZTU5ZjY4YzEtZDU3NmMyN2U=, ActorId: [2:7521669356914168406:2267], ActorState: ExecuteState, TraceId: 01jz02372293a442jpr6h6ka2x, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:37.671678Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:37.683865Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669355072913869:2666] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:37.690210Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669355072913880:2312], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:37.690328Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZWY4YmU3ZGMtNTc4NmU4NzItNWMwYTYxMTItMTExNWI5Y2M=, ActorId: [1:7521669355072913718:2295], ActorState: ExecuteState, TraceId: 01jz02371ebhnj1pkk4n178jwv, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:37.690469Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:37.749743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.821475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === I ... AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-30T09:18:52.142091Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7521669421958483337:2470] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-30T09:18:52.142704Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7521669421958483337:2470] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-30T09:18:52.176653Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [3:7521669421958483337:2470] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-30T09:18:52.176888Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669421958483386:2470] connected; active server actors: 1 2025-06-30T09:18:52.177187Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [3:7521669421958483337:2470] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-30T09:18:52.177199Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7521669421958483337:2470] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-30T09:18:52.180718Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669421958483386:2470] disconnected; active server actors: 1 2025-06-30T09:18:52.180732Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669421958483386:2470] disconnected no session 2025-06-30T09:18:52.208174Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7521669421958483337:2470] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-30T09:18:52.208204Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7521669421958483337:2470] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-30T09:18:52.208209Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7521669421958483337:2470] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-30T09:18:52.208219Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-30T09:18:52.209019Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:821: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 3, Generation: 1 2025-06-30T09:18:52.209038Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [3:7521669421958483412:2470], now have 1 active actors on pipe 2025-06-30T09:18:52.209127Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:18:52.209137Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:18:52.209175Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|e7d55bd1-4ee9047f-7c9aeab0-44345814_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-30T09:18:52.209207Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-30T09:18:52.209226Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:52.209801Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1751275132209 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:52.209849Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|e7d55bd1-4ee9047f-7c9aeab0-44345814_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-30T09:18:52.209365Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:18:52.209370Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:18:52.209384Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:52.209414Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|e7d55bd1-4ee9047f-7c9aeab0-44345814_0 2025-06-30T09:18:52.210219Z :INFO: [] MessageGroupId [src] SessionId [src|e7d55bd1-4ee9047f-7c9aeab0-44345814_0] Write session: close. Timeout = 0 ms 2025-06-30T09:18:52.210237Z :INFO: [] MessageGroupId [src] SessionId [src|e7d55bd1-4ee9047f-7c9aeab0-44345814_0] Write session will now close 2025-06-30T09:18:52.210245Z :DEBUG: [] MessageGroupId [src] SessionId [src|e7d55bd1-4ee9047f-7c9aeab0-44345814_0] Write session: aborting 2025-06-30T09:18:52.210499Z :INFO: [] MessageGroupId [src] SessionId [src|e7d55bd1-4ee9047f-7c9aeab0-44345814_0] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:52.210505Z :DEBUG: [] MessageGroupId [src] SessionId [src|e7d55bd1-4ee9047f-7c9aeab0-44345814_0] Write session: destroy 2025-06-30T09:18:52.210713Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|e7d55bd1-4ee9047f-7c9aeab0-44345814_0 grpc read done: success: 0 data: 2025-06-30T09:18:52.210721Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|e7d55bd1-4ee9047f-7c9aeab0-44345814_0 grpc read failed 2025-06-30T09:18:52.210728Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|e7d55bd1-4ee9047f-7c9aeab0-44345814_0 grpc closed 2025-06-30T09:18:52.210750Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|e7d55bd1-4ee9047f-7c9aeab0-44345814_0 is DEAD 2025-06-30T09:18:52.210938Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:52.211414Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7521669421958483412:2470] destroyed 2025-06-30T09:18:52.211432Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. PORTS 9383 19381 Session was created >>> Ready to answer: ok 2025-06-30T09:18:53.225754Z :INFO: [/Root] OnFederationDiscovery fall back to single mode, database=/Root 2025-06-30T09:18:53.225816Z :INFO: [/Root] [] [1782bb7b-fd7f107c-b1c18531-1fcf6ccf] Open read subsessions to databases: { name: , endpoint: localhost:19381, path: /Root } 2025-06-30T09:18:53.225868Z :INFO: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] Starting read session 2025-06-30T09:18:53.225874Z :DEBUG: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] Starting single session 2025-06-30T09:18:53.226183Z :DEBUG: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] [] In Reconnect, ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2025-06-30T09:18:53.226190Z :DEBUG: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] [] New values: ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2025-06-30T09:18:53.226196Z :DEBUG: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] [] Reconnecting session to cluster in 0.000000s 2025-06-30T09:18:53.226249Z :ERROR: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] [] Got error. Status: CLIENT_CALL_UNIMPLEMENTED. Description:
: Error: GRpc error: (12):
: Error: Grpc error response on endpoint localhost:19381
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:19381. 2025-06-30T09:18:53.226259Z :DEBUG: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] [] In Reconnect, ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2025-06-30T09:18:53.226262Z :DEBUG: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] [] New values: ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2025-06-30T09:18:53.226281Z :INFO: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] [] Closing session to cluster: SessionClosed { Status: CLIENT_CALL_UNIMPLEMENTED Issues: "
: Error: Failed to establish connection to server "localhost:19381" ( cluster ). Attempts done: 1
: Error: GRpc error: (12):
: Error: Grpc error response on endpoint localhost:19381
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:19381. " } 2025-06-30T09:18:53.226610Z :NOTICE: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:53.226620Z :DEBUG: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] [] Abort session to cluster Got new read session event: SessionClosed { Status: CLIENT_CALL_UNIMPLEMENTED Issues: "
: Error: Failed to establish connection to server "localhost:19381" ( cluster ). Attempts done: 1
: Error: GRpc error: (12):
: Error: Grpc error response on endpoint localhost:19381
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:19381. " } 2025-06-30T09:18:53.226646Z :INFO: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] Closing read session. Close timeout: 0.010000s 2025-06-30T09:18:53.226655Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:18:53.226663Z :INFO: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] Counters: { Errors: 1 CurrentSessionLifetimeMs: 0 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:53.226669Z :INFO: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:53.226672Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:18:53.226675Z :INFO: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] Counters: { Errors: 1 CurrentSessionLifetimeMs: 0 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:53.226680Z :INFO: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:53.226682Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:18:53.226687Z :INFO: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] Counters: { Errors: 1 CurrentSessionLifetimeMs: 0 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:53.226692Z :NOTICE: [/Root] [/Root] [4bfcfd69-91fc04c2-34b4bd5-16c3bf37] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedSingleBucket ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TResourceBroker::TestOverusageDifferentResources [GOOD] Test command err: 2025-06-30T09:18:53.364378Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-30T09:18:53.364546Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-1 (1 by [1:103:2136]) priority=5 resources={500, 500} 2025-06-30T09:18:53.364562Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [1:103:2136]) to queue queue_default 2025-06-30T09:18:53.364573Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {500, 500} for task task-1 (1 by [1:103:2136]) from queue queue_default 2025-06-30T09:18:53.364590Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:103:2136]) to queue queue_default 2025-06-30T09:18:53.364605Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 0.000000 to 1000.000000 (insert task task-1 (1 by [1:103:2136])) 2025-06-30T09:18:53.364619Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-2 (2 by [1:103:2136]) priority=5 resources={200, 200} 2025-06-30T09:18:53.364625Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.364632Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:103:2136]) 2025-06-30T09:18:53.364641Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-3 (3 by [1:103:2136]) priority=5 resources={200, 200} 2025-06-30T09:18:53.364646Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [1:103:2136]) to queue queue_compaction1 2025-06-30T09:18:53.364652Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-3 (3 by [1:103:2136]) 2025-06-30T09:18:53.364657Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-30T09:18:53.364905Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1195: New config: Queues { Name: "queue_default" Weight: 5 Limit { Resource: 400 } } Queues { Name: "queue_compaction0" Weight: 50 Limit { Resource: 400 } } Queues { Name: "queue_compaction1" Weight: 20 Limit { Resource: 400 } } Queues { Name: "queue_scan" Weight: 20 Limit { Resource: 400 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 5000000 } Tasks { Name: "compaction0" QueueName: "queue_compaction0" DefaultDuration: 10000000 } Tasks { Name: "compaction1" QueueName: "queue_compaction1" DefaultDuration: 20000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 20000000 } ResourceLimit { Resource: 1000 Resource: 1000 } 2025-06-30T09:18:53.364957Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:103:2136]) to queue queue_default 2025-06-30T09:18:53.364967Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:706: Updated real resource usage for queue queue_default from 0.000000 to 175127513336.399994 2025-06-30T09:18:53.364974Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 0.000000 to 500.000000 (insert task task-1 (1 by [1:103:2136])) 2025-06-30T09:18:53.364979Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [1:103:2136]) to queue queue_compaction1 2025-06-30T09:18:53.364985Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.364991Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {200, 200} for task task-2 (2 by [1:103:2136]) from queue queue_compaction0 2025-06-30T09:18:53.364997Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.365004Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 40.000000 (insert task task-2 (2 by [1:103:2136])) 2025-06-30T09:18:53.365013Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {200, 200} for task task-3 (3 by [1:103:2136]) from queue queue_compaction1 2025-06-30T09:18:53.365018Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [1:103:2136]) to queue queue_compaction1 2025-06-30T09:18:53.365024Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 200.000000 (insert task task-3 (3 by [1:103:2136])) 2025-06-30T09:18:53.365033Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1240: Configure result: Success: true 2025-06-30T09:18:53.365080Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1195: New config: Queues { Name: "queue_default" Weight: 5 Limit { Resource: 400 } } Queues { Name: "queue_compaction0" Weight: 50 Limit { Resource: 400 } } Queues { Name: "queue_compaction1" Weight: 20 Limit { Resource: 400 } } Queues { Name: "queue_scan" Weight: 20 Limit { Resource: 400 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 5000000 } Tasks { Name: "compaction0" QueueName: "queue_compaction0" DefaultDuration: 10000000 } Tasks { Name: "compaction1" QueueName: "queue_default1" DefaultDuration: 20000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 20000000 } ResourceLimit { Resource: 1000 Resource: 1000 } 2025-06-30T09:18:53.365092Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1240: Configure result: Success: false Message: "task \'compaction1\' uses unknown queue \'queue_default1\'" 2025-06-30T09:18:53.365147Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1195: New config: Queues { Name: "queue_default" Weight: 5 Limit { Resource: 400 } } Queues { Name: "queue_compaction0" Weight: 50 Limit { Resource: 400 } } Queues { Name: "queue_compaction1" Weight: 20 Limit { Resource: 400 } } Queues { Name: "queue_scan" Weight: 20 Limit { Resource: 400 } } Tasks { Name: "unknown1" QueueName: "queue_default" DefaultDuration: 5000000 } Tasks { Name: "compaction0" QueueName: "queue_compaction0" DefaultDuration: 10000000 } Tasks { Name: "compaction1" QueueName: "queue_default" DefaultDuration: 20000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 20000000 } ResourceLimit { Resource: 1000 Resource: 1000 } 2025-06-30T09:18:53.365157Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1240: Configure result: Success: false Message: "task \'unknown\' is required" 2025-06-30T09:18:53.365194Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1195: New config: Queues { Name: "queue_default1" Weight: 5 Limit { Resource: 400 } } Queues { Name: "queue_compaction0" Weight: 50 Limit { Resource: 400 } } Queues { Name: "queue_compaction1" Weight: 20 Limit { Resource: 400 } } Queues { Name: "queue_scan" Weight: 20 Limit { Resource: 400 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 5000000 } Tasks { Name: "compaction0" QueueName: "queue_compaction0" DefaultDuration: 10000000 } Tasks { Name: "compaction1" QueueName: "queue_default" DefaultDuration: 20000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 20000000 } ResourceLimit { Resource: 1000 Resource: 1000 } 2025-06-30T09:18:53.365202Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1240: Configure result: Success: false Message: "task \'unknown\' uses unknown queue \'queue_default\'" 2025-06-30T09:18:53.365238Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1195: New config: Queues { Name: "queue_default" Weight: 5 Limit { Resource: 400 } } Queues { Name: "queue_compaction0" Weight: 50 Limit { Resource: 400 } } Queues { Name: "queue_compaction1" Weight: 20 Limit { Resource: 400 } } Queues { Name: "queue_scan" Weight: 20 Limit { Resource: 400 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 5000000 } Tasks { Name: "compaction0" QueueName: "queue_compaction0" DefaultDuration: 10000000 } Tasks { Name: "compaction1" QueueName: "queue_default" DefaultDuration: 20000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 20000000 } ResourceLimit { Resource: 1000 Resource: 1000 } 2025-06-30T09:18:53.365278Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:103:2136]) to queue queue_default 2025-06-30T09:18:53.365286Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:706: Updated real resource usage for queue queue_default from 0.000000 to 175127513336.399994 2025-06-30T09:18:53.365292Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 0.000000 to 500.000000 (insert task task-1 (1 by [1:103:2136])) 2025-06-30T09:18:53.365308Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [1:103:2136]) to queue queue_default 2025-06-30T09:18:53.365315Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 500.000000 to 175127514136.399994 (insert task task-3 (3 by [1:103:2136])) 2025-06-30T09:18:53.365320Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.365327Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:706: Updated real resource usage for queue queue_compaction0 from 0.000000 to 7005100533.456001 2025-06-30T09:18:53.365333Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 40.000000 (insert task task-2 (2 by [1:103:2136])) 2025-06-30T09:18:53.365339Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1240: Configure result: Success: true 2025-06-30T09:18:53.649901Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-30T09:18:53.650031Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [2:103:2136]) priority=5 resources={500, 0} 2025-06-30T09:18:53.650041Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.650049Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {500, 0} for task task-1 (1 by [2:103:2136]) from queue queue_compaction0 2025-06-30T09:18:53.650055Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.650067Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 1000.000000 (insert task task-1 (1 by [2:103:2136])) 2025-06-30T09:18:53.650076Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-2 (2 by [2:103:2136]) priority=5 resources={500, 0} 2025-06-30T09:18:53.650080Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [2:103:2136]) to queue queue_compaction1 2025-06-30T09:18:53.650086Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:103:2136]) 2025-06-30T09:18:53.650101Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [2:103:2136]) (release resources {500, 0}) 2025-06-30T09:18:53.650111Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction0 from 0.000000 to 1000.000000 2025-06-30T09:18:53.650115Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {500, 0} for task task-2 (2 by [2:103:2136]) from queue queue_compaction1 2025-06-30T09:18:53.650119Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [2:103:2136]) to queue queue_compaction1 2025-06-30T09:18:53.650125Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 1000.000000 (insert task task-2 (2 by [2:103:2136])) 2025-06-30T09:18:53.650133Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-2 (2 by [2:103:2136]) (release resources {500, 0}) 2025-06-30T09:18:53.650139Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 1000.000000 to 500.000000 (remove task task-2 (2 by [2:103:2136])) 2025-06-30T09:18:53.650143Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction1 from 0.000000 to 500.000000 2025-06-30T09:18:53.650150Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-3 (3 by [2:103:2136]) priority=5 resources={250, 0} 2025-06-30T09:18:53.650154Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [2:103:2136]) to queue queue_compaction1 2025-06-30T09:18:53.650158Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {250, 0} for task task-3 (3 by [2:103:2136]) from queue queue_compaction1 2025-06-30T09:18:53.650162Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [2:103:2136]) to queue queue_compaction1 2025-06-30T09:18:53.650167Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 500.000000 to 987.500000 (insert task task-3 (3 by [2:103:2136])) 2025-06-30T09:18:53.650174Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new scan task task-4 (4 by [2:103:2136]) priority=5 resources={0, 800} 2025-06-30T09:18:53.650178Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-4 (4 by [2:103:2136]) to queue queue_scan 2025-06-30T09:18:53.650192Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:663: Updated real resource usage for queue queue_compaction1 from 500.000000 to 750.000000 (in-fly consumption {250, 0}) 2025-06-30T09:18:53.650199Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-4 (4 by [2:103:2136]) 2025-06-30T09:18:53.650205Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-5 (5 by [2:103:2136]) priority=5 resources={250, 0} 2025-06-30T09:18:53.650209Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-5 (5 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.650215Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:663: Updated real resource usage for queue queue_compaction1 from 750.000000 to 1000.000000 (in-fly consumption {250, 0}) 2025-06-30T09:18:53.650220Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-4 (4 by [2:103:2136]) 2025-06-30T09:18:53.650224Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {250, 0} for task task-5 (5 by [2:103:2136]) from queue queue_compaction0 2025-06-30T09:18:53.650228Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-5 (5 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:53.650233Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 1000.000000 to 1500.000000 (insert task task-5 (5 by [2:103:2136])) |79.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestOpen [GOOD] >> TResourceBroker::TestUpdateCookie [GOOD] >> TTabletPipeTest::TestTwoNodesAndRebootOfConsumer [GOOD] >> TTabletPipeTest::TestSendWithoutWaitOpen [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedSingleBucket [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegular |79.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendAfterOpenUsingTabletWithoutAcceptor [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegular [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen [GOOD] Test command err: LabeledCountersByGroup { Group: "aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } LabeledCountersByGroup { Group: "cons/aaa|1|aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } CounterNames: "value1" { LabeledCountersByGroup { Group: "aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } LabeledCountersByGroup { Group: "cons/aaa|1|aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } CounterNames: "value1" } 2025-06-30T09:18:53.846864Z node 3 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437185] NodeDisconnected NodeId# 2 >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamed >> TTabletCountersAggregator::IntegralPercentileAggregationRegularCheckSingleTablet >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamed [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedNoOverflowCheck >> TTabletCountersAggregator::IntegralPercentileAggregationRegularCheckSingleTablet [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegularNoOverflowCheck >> BasicUsage::WriteSessionCloseIgnoresWrites [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestTwoNodesAndRebootOfConsumer [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:166:2058] recipient: [1:164:2139] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:166:2058] recipient: [1:164:2139] Leader for TabletID 9437184 is [1:172:2143] sender: [1:173:2058] recipient: [1:164:2139] Leader for TabletID 9437185 is [0:0:0] sender: [2:176:2049] recipient: [2:167:2097] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [2:176:2049] recipient: [2:167:2097] Leader for TabletID 9437185 is [2:188:2100] sender: [2:189:2049] recipient: [2:167:2097] Leader for TabletID 9437184 is [1:172:2143] sender: [1:216:2058] recipient: [1:15:2062] Leader for TabletID 9437185 is [2:188:2100] sender: [1:218:2058] recipient: [1:15:2062] Leader for TabletID 9437185 is [2:188:2100] sender: [2:220:2049] recipient: [2:44:2053] Leader for TabletID 9437185 is [2:188:2100] sender: [2:221:2049] recipient: [2:161:2096] Leader for TabletID 9437185 is [2:188:2100] sender: [1:224:2058] recipient: [1:15:2062] Leader for TabletID 9437185 is [2:188:2100] sender: [2:226:2049] recipient: [2:44:2053] Leader for TabletID 9437185 is [2:188:2100] sender: [2:227:2049] recipient: [2:225:2113] Leader for TabletID 9437185 is [2:228:2114] sender: [2:229:2049] recipient: [2:225:2113] Leader for TabletID 9437185 is [2:228:2114] sender: [1:258:2058] recipient: [1:15:2062] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TResourceBroker::TestUpdateCookie [GOOD] Test command err: 2025-06-30T09:18:54.036797Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-30T09:18:54.036926Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [1:103:2136]) priority=5 resources={200, 200} 2025-06-30T09:18:54.036934Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.036941Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {200, 200} for task task-1 (1 by [1:103:2136]) from queue queue_compaction0 2025-06-30T09:18:54.036946Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.036959Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 400.000000 (insert task task-1 (1 by [1:103:2136])) 2025-06-30T09:18:54.036968Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-2 (2 by [1:103:2136]) priority=5 resources={200, 200} 2025-06-30T09:18:54.036972Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.036977Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {200, 200} for task task-2 (2 by [1:103:2136]) from queue queue_compaction0 2025-06-30T09:18:54.036982Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.036988Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 400.000000 to 800.000000 (insert task task-2 (2 by [1:103:2136])) 2025-06-30T09:18:54.036996Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-3 (3 by [1:103:2136]) priority=5 resources={200, 200} 2025-06-30T09:18:54.037000Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.037006Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-3 (3 by [1:103:2136]) 2025-06-30T09:18:54.037025Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task task-2 (2 by [1:103:2136]) (priority=5 type=compaction0 resources={400, 400} resubmit=1) 2025-06-30T09:18:54.037031Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.037035Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:103:2136]) 2025-06-30T09:18:54.037043Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [1:103:2136]) (release resources {200, 200}) 2025-06-30T09:18:54.037051Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction0 from 400.000000 to 40.000000 (remove task task-1 (1 by [1:103:2136])) 2025-06-30T09:18:54.037058Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction0 from 0.000000 to 40.000000 2025-06-30T09:18:54.037064Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-2 (2 by [1:103:2136]) from queue queue_compaction0 2025-06-30T09:18:54.037069Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.037074Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 40.000000 to 804.000000 (insert task task-2 (2 by [1:103:2136])) 2025-06-30T09:18:54.037080Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-3 (3 by [1:103:2136]) 2025-06-30T09:18:54.037088Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task task-2 (2 by [1:103:2136]) (priority=5 type=compaction0 resources={200, 200} resubmit=1) 2025-06-30T09:18:54.037095Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.037100Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {200, 200} for task task-2 (2 by [1:103:2136]) from queue queue_compaction0 2025-06-30T09:18:54.037105Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.037110Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 40.000000 to 422.000000 (insert task task-2 (2 by [1:103:2136])) 2025-06-30T09:18:54.037115Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {200, 200} for task task-3 (3 by [1:103:2136]) from queue queue_compaction0 2025-06-30T09:18:54.037120Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.037126Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 422.000000 to 804.000000 (insert task task-3 (3 by [1:103:2136])) 2025-06-30T09:18:54.290400Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-30T09:18:54.290530Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [2:103:2136]) priority=5 resources={400, 400} 2025-06-30T09:18:54.290542Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.290553Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-1 (1 by [2:103:2136]) from queue queue_compaction0 2025-06-30T09:18:54.290560Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.290575Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 800.000000 (insert task task-1 (1 by [2:103:2136])) 2025-06-30T09:18:54.290594Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-2 (2 by [2:103:2136]) priority=5 resources={200, 200} 2025-06-30T09:18:54.290599Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.290605Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:103:2136]) 2025-06-30T09:18:54.290615Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:472: Update cookie for task task-2 (2 by [2:103:2136]) 2025-06-30T09:18:54.290625Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [2:103:2136]) (release resources {400, 400}) 2025-06-30T09:18:54.290633Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction0 from 800.000000 to 0.000000 (remove task task-1 (1 by [2:103:2136])) 2025-06-30T09:18:54.290639Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {200, 200} for task task-2 (2 by [2:103:2136]) from queue queue_compaction0 2025-06-30T09:18:54.290644Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.290649Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 380.000000 (insert task task-2 (2 by [2:103:2136])) 2025-06-30T09:18:54.290660Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-3 (3 by [2:103:2136]) priority=5 resources={200, 200} 2025-06-30T09:18:54.290665Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.290669Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {200, 200} for task task-3 (3 by [2:103:2136]) from queue queue_compaction0 2025-06-30T09:18:54.290674Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.290680Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 380.000000 to 760.000000 (insert task task-3 (3 by [2:103:2136])) 2025-06-30T09:18:54.290687Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:472: Update cookie for task task-2 (2 by [2:103:2136]) 2025-06-30T09:18:54.290693Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-3 (3 by [2:103:2136]) (release resources {200, 200}) 2025-06-30T09:18:54.290699Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction0 from 760.000000 to 380.000000 (remove task task-3 (3 by [2:103:2136])) 2025-06-30T09:18:54.290709Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task task-2 (2 by [2:103:2136]) (priority=5 type=compaction0 resources={400, 400} resubmit=1) 2025-06-30T09:18:54.290719Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.290724Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-2 (2 by [2:103:2136]) from queue queue_compaction0 2025-06-30T09:18:54.290729Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [2:103:2136]) to queue queue_compaction0 2025-06-30T09:18:54.290752Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 720.000000 (insert task task-2 (2 by [2:103:2136])) |79.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendWithoutWaitOpen [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedNoOverflowCheck [GOOD] |79.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::IntegralPercentileAggregationRegular [GOOD] >> TTabletResolver::TabletResolvePriority [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegularNoOverflowCheck [GOOD] >> TResourceBroker::TestRealUsage |79.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |79.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |79.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionCloseIgnoresWrites [GOOD] Test command err: 2025-06-30T09:18:35.432978Z :WriteSessionCloseWaitsForWrites INFO: Random seed for debugging is 1751275115432968 2025-06-30T09:18:36.059244Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669352771213780:2085];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:36.059273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002dac/r3tmp/tmp0dwJbf/pdisk_1.dat 2025-06-30T09:18:36.063067Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669352082277548:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:36.063088Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:36.235982Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:36.255286Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:36.328357Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:36.349678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:36.349705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:36.358127Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:36.358900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2506, node 1 2025-06-30T09:18:36.404600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002dac/r3tmp/yandexhgrR5i.tmp 2025-06-30T09:18:36.404617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002dac/r3tmp/yandexhgrR5i.tmp 2025-06-30T09:18:36.404694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002dac/r3tmp/yandexhgrR5i.tmp 2025-06-30T09:18:36.404747Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:36.411292Z INFO: TTestServer started on Port 63495 GrpcPort 2506 TClient is connected to server localhost:63495 PQClient connected to localhost:2506 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:36.447826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:36.455023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:36.455055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting waiting... 2025-06-30T09:18:36.463474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... waiting... waiting... 2025-06-30T09:18:36.726929Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669352771214672:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.727034Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.727359Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669352771214686:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.728272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:36.735959Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669352771214688:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720661 completed, doublechecking } 2025-06-30T09:18:36.787315Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669352082277825:2273], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:36.787455Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=YmY3MmUxOGUtMTFmYjI3NTktMWQyMjJlYTAtNjliMGM2MGM=, ActorId: [2:7521669352082277799:2267], ActorState: ExecuteState, TraceId: 01jz023666f9hqcfydjmmvetx5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:36.787899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:36.788029Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:36.804321Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669352771214872:2717] txid# 281474976720663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:36.809232Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669352771214887:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:36.809334Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZDJlMGE1NDUtNWMwZmYzNTMtM2U4Y2FkMzMtNGFjYmFjOGM=, ActorId: [1:7521669352771214654:2294], ActorState: ExecuteState, TraceId: 01jz02365h20s5jv3k0t3dm46t, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:36.809496Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:36.821846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:36.914587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:2506", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, false, 1000); 2025-06-30T09:18:36.950067Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720666. Ctx: { TraceId: 01jz0236c42gdtsa06sfspr3k0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDc1YTQxYy0zZjA4YTMwZC05ZmQwMjU5ZS0yNGNkMTg2NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker fr ... picInfo { Topic: "rt3.dc1--test-topic" NumPartitions: 1 Config { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } Version: 1 LocalDC: true Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } TopicPath: "/Root/PQ/rt3.dc1--test-topic" YdbDatabasePath: "/Root" Consumers { Name: "user" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } Version: 0 Important: false } } ErrorCode: OK } } } === Topic created, have version: 1 2025-06-30T09:18:51.370315Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-30T09:18:51.370654Z :INFO: [] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-30T09:18:51.370662Z :INFO: [] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:18324 2025-06-30T09:18:51.371425Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-30T09:18:51.371686Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-30T09:18:51.371708Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-30T09:18:51.371882Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-30T09:18:51.371915Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:60900 2025-06-30T09:18:51.371921Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:60900 proto=v1 topic=test-topic durationSec=0 2025-06-30T09:18:51.371925Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:18:51.372385Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-30T09:18:51.372422Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-30T09:18:51.372428Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-30T09:18:51.372430Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-30T09:18:51.372435Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7521669416146288980:2452] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-30T09:18:51.373053Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7521669416146288980:2452] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-30T09:18:51.398054Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [3:7521669416146288980:2452] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-30T09:18:51.398220Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669416146289016:2452] connected; active server actors: 1 2025-06-30T09:18:51.398241Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [3:7521669416146288980:2452] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-30T09:18:51.398250Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7521669416146288980:2452] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-30T09:18:51.398339Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669416146289016:2452] disconnected; active server actors: 1 2025-06-30T09:18:51.398348Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669416146289016:2452] disconnected no session 2025-06-30T09:18:51.417003Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7521669416146288980:2452] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-30T09:18:51.417023Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7521669416146288980:2452] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-30T09:18:51.417027Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7521669416146288980:2452] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-30T09:18:51.417038Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-30T09:18:51.418378Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [3:7521669416146289038:2452], now have 1 active actors on pipe 2025-06-30T09:18:51.418450Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:821: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2025-06-30T09:18:51.418555Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:18:51.418571Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:18:51.418613Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|5300691f-381ba46f-9f3a94b2-44db64ed_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-30T09:18:51.418669Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-30T09:18:51.418700Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:51.418941Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:18:51.418953Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:18:51.418974Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:51.419099Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|5300691f-381ba46f-9f3a94b2-44db64ed_0 2025-06-30T09:18:51.419566Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1751275131419 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:51.419613Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|5300691f-381ba46f-9f3a94b2-44db64ed_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-30T09:18:51.421420Z :INFO: [] MessageGroupId [src] SessionId [src|5300691f-381ba46f-9f3a94b2-44db64ed_0] Write session: close. Timeout = 0 ms 2025-06-30T09:18:51.421435Z :INFO: [] MessageGroupId [src] SessionId [src|5300691f-381ba46f-9f3a94b2-44db64ed_0] Write session will now close 2025-06-30T09:18:51.421442Z :DEBUG: [] MessageGroupId [src] SessionId [src|5300691f-381ba46f-9f3a94b2-44db64ed_0] Write session: aborting 2025-06-30T09:18:51.421630Z :INFO: [] MessageGroupId [src] SessionId [src|5300691f-381ba46f-9f3a94b2-44db64ed_0] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:51.421637Z :DEBUG: [] MessageGroupId [src] SessionId [src|5300691f-381ba46f-9f3a94b2-44db64ed_0] Write session: destroy 2025-06-30T09:18:51.421913Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|5300691f-381ba46f-9f3a94b2-44db64ed_0 grpc read done: success: 0 data: 2025-06-30T09:18:51.421926Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|5300691f-381ba46f-9f3a94b2-44db64ed_0 grpc read failed 2025-06-30T09:18:51.421947Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|5300691f-381ba46f-9f3a94b2-44db64ed_0 grpc closed 2025-06-30T09:18:51.421965Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|5300691f-381ba46f-9f3a94b2-44db64ed_0 is DEAD 2025-06-30T09:18:51.422269Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:51.422493Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7521669416146289038:2452] destroyed 2025-06-30T09:18:51.422520Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. Session was created >>> Ready to answer: ok 2025-06-30T09:18:51.482317Z :ERROR: [/Root] OnFederationDiscovery: Got error. Status: UNAVAILABLE. Description: 2025-06-30T09:18:54.631218Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1986: ActorId: [3:7521669429031191071:2483] TxId: 281474976715692. Ctx: { TraceId: 01jz023qkffwtem80ek2yv3q6p, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2I5MjE5ZC1jNWU5MmUwMy0xZjhjMDAwZi1kZTVjNGJhMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 4 2025-06-30T09:18:54.631409Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7521669429031191075:2483], TxId: 281474976715692, task: 3. Ctx: { SessionId : ydb://session/3?node_id=3&id=N2I5MjE5ZC1jNWU5MmUwMy0xZjhjMDAwZi1kZTVjNGJhMQ==. CustomerSuppliedId : . TraceId : 01jz023qkffwtem80ek2yv3q6p. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:7521669429031191071:2483], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> TBlockBlobStorageTest::DelayedErrorsNotIgnored [GOOD] >> TFlatMetrics::DecayingAverageAvg [GOOD] >> DataShardVolatile::DistributedWriteThenImmediateUpsert >> DataShardVolatile::DistributedWrite |79.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletResolver::TabletResolvePriority [GOOD] |79.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedNoOverflowCheck [GOOD] >> TResourceBroker::TestRealUsage [GOOD] >> TResourceBroker::TestRandomQueue >> TTabletPipeTest::TestPipeWithVersionInfo >> TResourceBrokerInstant::TestMerge |79.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::IntegralPercentileAggregationRegularNoOverflowCheck [GOOD] >> TResourceBrokerConfig::UpdateQueues [GOOD] >> TResourceBrokerConfig::DefaultConfig [GOOD] >> TTabletPipeTest::TestPipeWithVersionInfo [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TFlatMetrics::DecayingAverageAvg [GOOD] Test command err: ... waiting for all block results ... passing block result OK for [1:105:2137] ... blocking block result NO_GROUP for [1:107:2137] ... blocking block result NO_GROUP for [1:108:2137] ... blocking block result NO_GROUP for [1:106:2137] >> TResourceBrokerInstant::TestMerge [GOOD] >> TTabletCountersAggregator::ColumnShardCounters >> TFlatMetrics::TimeSeriesKV2 [GOOD] >> TPipeCacheTest::TestAutoConnect |79.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |79.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |79.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql >> TResourceBroker::TestRandomQueue [GOOD] >> TTabletCountersAggregator::ColumnShardCounters [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TResourceBrokerConfig::DefaultConfig [GOOD] Test command err: Queues { Name: "queue_default" Weight: 30 Limit { Cpu: 2 } } Queues { Name: "queue_compaction_gen0" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_compaction_gen1" Weight: 100 Limit { Cpu: 6 } } Queues { Name: "queue_compaction_gen2" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_gen3" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_borrowed" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_cs_indexation" Weight: 100 Limit { Cpu: 3 Memory: 1073741824 } } Queues { Name: "queue_cs_ttl" Weight: 100 Limit { Cpu: 3 Memory: 1073741824 } } Queues { Name: "queue_cs_general" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_scan_read" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_normalizer" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_transaction" Weight: 100 Limit { Cpu: 4 } } Queues { Name: "queue_background_compaction" Weight: 10 Limit { Cpu: 1 } } Queues { Name: "queue_scan" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_backup" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_restore" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_kqp_resource_manager" Weight: 30 Limit { Cpu: 4 Memory: 10737418240 } } Queues { Name: "queue_build_index" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_ttl" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_datashard_build_stats" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_cdc_initial_scan" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_statistics_scan" Weight: 100 Limit { Cpu: 1 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 60000000 } Tasks { Name: "compaction_gen0" QueueName: "queue_compaction_gen0" DefaultDuration: 10000000 } Tasks { Name: "compaction_gen1" QueueName: "queue_compaction_gen1" DefaultDuration: 30000000 } Tasks { Name: "compaction_gen2" QueueName: "queue_compaction_gen2" DefaultDuration: 120000000 } Tasks { Name: "compaction_gen3" QueueName: "queue_compaction_gen3" DefaultDuration: 600000000 } Tasks { Name: "compaction_borrowed" QueueName: "queue_compaction_borrowed" DefaultDuration: 600000000 } Tasks { Name: "CS::TTL" QueueName: "queue_cs_ttl" DefaultDuration: 600000000 } Tasks { Name: "CS::INDEXATION" QueueName: "queue_cs_indexation" DefaultDuration: 600000000 } Tasks { Name: "CS::GENERAL" QueueName: "queue_cs_general" DefaultDuration: 600000000 } Tasks { Name: "CS::SCAN_READ" QueueName: "queue_cs_scan_read" DefaultDuration: 600000000 } Tasks { Name: "CS::NORMALIZER" QueueName: "queue_cs_normalizer" DefaultDuration: 600000000 } Tasks { Name: "transaction" QueueName: "queue_transaction" DefaultDuration: 600000000 } Tasks { Name: "background_compaction" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen0" QueueName: "queue_background_compaction" DefaultDuration: 10000000 } Tasks { Name: "background_compaction_gen1" QueueName: "queue_background_compaction" DefaultDuration: 20000000 } Tasks { Name: "background_compaction_gen2" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen3" QueueName: "queue_background_compaction" DefaultDuration: 300000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 300000000 } Tasks { Name: "backup" QueueName: "queue_backup" DefaultDuration: 300000000 } Tasks { Name: "restore" QueueName: "queue_restore" DefaultDuration: 300000000 } Tasks { Name: "kqp_query" QueueName: "queue_kqp_resource_manager" DefaultDuration: 600000000 } Tasks { Name: "build_index" QueueName: "queue_build_index" DefaultDuration: 600000000 } Tasks { Name: "ttl" QueueName: "queue_ttl" DefaultDuration: 300000000 } Tasks { Name: "datashard_build_stats" QueueName: "queue_datashard_build_stats" DefaultDuration: 5000000 } Tasks { Name: "cdc_initial_scan" QueueName: "queue_cdc_initial_scan" DefaultDuration: 600000000 } Tasks { Name: "statistics_scan" QueueName: "queue_statistics_scan" DefaultDuration: 600000000 } ResourceLimit { Cpu: 256 Memory: 17179869184 } Total queues cpu: 89 |79.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestPipeWithVersionInfo [GOOD] >> KqpPg::TableSelect-useSink [GOOD] >> KqpPg::TableInsert+useSink >> KqpQueryPerf::Delete-QueryService-UseSink >> KqpQueryPerf::Insert-QueryService-UseSink >> KqpPg::ValuesInsert+useSink [GOOD] >> KqpPg::ValuesInsert-useSink >> KqpQueryPerf::UpdateOn-QueryService-UseSink |79.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |79.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |79.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |79.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |79.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |79.7%| [LD] {RESULT} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::ColumnShardCounters [GOOD] Test command err: 2025-06-30T09:18:55.870827Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-30T09:18:55.870963Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [1:103:2136]) priority=0 resources={100, 200} 2025-06-30T09:18:55.870972Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:55.870980Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {100, 200} for task task-1 (1 by [1:103:2136]) from queue queue_compaction0 2025-06-30T09:18:55.870985Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:55.870996Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 400.000000 (insert task task-1 (1 by [1:103:2136])) 2025-06-30T09:18:55.871011Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-2 (2 by [1:103:2136]) priority=0 resources={100, 100} 2025-06-30T09:18:55.871016Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:55.871021Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {100, 100} for task task-2 (2 by [1:103:2136]) from queue queue_compaction0 2025-06-30T09:18:55.871025Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:55.871030Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 400.000000 to 600.000000 (insert task task-2 (2 by [1:103:2136])) 2025-06-30T09:18:55.871042Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task task-1 (1 by [1:103:2136]) (priority=0 type=compaction0 resources={200, 300} resubmit=0) 2025-06-30T09:18:55.871046Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:55.871051Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 200.000000 to 800.000000 (insert task task-1 (1 by [1:103:2136])) 2025-06-30T09:18:55.871056Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-2 (2 by [1:103:2136]) (release resources {100, 100}) 2025-06-30T09:18:55.871062Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction0 from 800.000000 to 600.000000 (remove task task-2 (2 by [1:103:2136])) 2025-06-30T09:18:55.871073Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-3 (3 by [1:103:2136]) priority=0 resources={10, 20} 2025-06-30T09:18:55.871077Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [1:103:2136]) to queue queue_compaction1 2025-06-30T09:18:55.871082Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {10, 20} for task task-3 (3 by [1:103:2136]) from queue queue_compaction1 2025-06-30T09:18:55.871087Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [1:103:2136]) to queue queue_compaction1 2025-06-30T09:18:55.871092Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 40.000000 (insert task task-3 (3 by [1:103:2136])) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TResourceBroker::TestRandomQueue [GOOD] Test command err: 2025-06-30T09:18:55.492236Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-30T09:18:55.492333Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [1:103:2136]) priority=5 resources={400, 400} 2025-06-30T09:18:55.492340Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:55.492345Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-1 (1 by [1:103:2136]) from queue queue_compaction0 2025-06-30T09:18:55.492348Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:55.492357Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 800.000000 (insert task task-1 (1 by [1:103:2136])) 2025-06-30T09:18:55.492363Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-2 (2 by [1:103:2136]) priority=5 resources={400, 400} 2025-06-30T09:18:55.492366Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [1:103:2136]) to queue queue_compaction1 2025-06-30T09:18:55.492370Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:103:2136]) 2025-06-30T09:18:55.492374Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-3 (3 by [1:103:2136]) priority=5 resources={400, 400} 2025-06-30T09:18:55.492376Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:55.492379Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:103:2136]) 2025-06-30T09:18:55.492381Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-30T09:18:55.492385Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-4 (4 by [1:103:2136]) priority=5 resources={400, 400} 2025-06-30T09:18:55.492387Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-4 (4 by [1:103:2136]) to queue queue_compaction1 2025-06-30T09:18:55.492390Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:103:2136]) 2025-06-30T09:18:55.492392Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-30T09:18:55.492396Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-5 (5 by [1:103:2136]) priority=5 resources={400, 400} 2025-06-30T09:18:55.492399Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-5 (5 by [1:103:2136]) to queue queue_compaction0 2025-06-30T09:18:55.492401Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:103:2136]) 2025-06-30T09:18:55.492403Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-30T09:18:55.492407Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-6 (6 by [1:103:2136]) priority=5 resources={400, 400} 2025-06-30T09:18:55.492409Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-6 (6 by [1:103:2136]) to queue queue_compaction1 2025-06-30T09:18:55.492411Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:103:2136]) 2025-06-30T09:18:55.492414Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-30T09:18:55.492423Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [1:103:2136]) (release resources {400, 400}) 2025-06-30T09:18:55.492429Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction0 from 0.000000 to 800.000000 2025-06-30T09:18:55.492432Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-2 (2 by [1:103:2136]) from queue queue_compaction1 2025-06-30T09:18:55.492435Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [1:103:2136]) to queue queue_compaction1 2025-06-30T09:18:55.492438Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 800.000000 (insert task task-2 (2 by [1:103:2136])) 2025-06-30T09:18:55.492441Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-4 (4 by [1:103:2136]) 2025-06-30T09:18:55.492443Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-30T09:18:55.492447Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-2 (2 by [1:103:2136]) (release resources {400, 400}) 2025-06-30T09:18:55.492451Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 800.000000 to 280.000000 (remove task task-2 (2 by [1:103:2136])) 2025-06-30T09:18:55.492454Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction1 from 0.000000 to 280.000000 2025-06-30T09:18:55.492457Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-4 (4 by [1:103:2136]) from queue queue_compaction1 2025-06-30T09:18:55.492459Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-4 (4 by [1:103:2136]) to queue queue_compaction1 2025-06-30T09:18:55.492462Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 280.000000 to 1054.000000 (insert task task-4 (4 by [1:103:2136])) 2025-06-30T09:18:55.492464Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-3 (3 by [1:103:2136]) 2025-06-30T09:18:55.492466Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction1 blocked by an earlier queue 2025-06-30T09:18:55.492470Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-4 (4 by [1:103:2136]) (release resources {400, 400}) 2025-06-30T09:18:55.492473Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 1054.000000 to 560.000000 (remove task task-4 (4 by [1:103:2136])) 2025-06-30T09:18:55.492476Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction1 from 280.000000 to 560.000000 2025-06-30T09:18:55.492478Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-6 (6 by [1:103:2136]) from queue queue_compaction1 2025-06-30T09:18:55.492481Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-6 (6 by [1:103:2136]) to queue queue_compaction1 2025-06-30T09:18:55.492484Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 560.000000 to 1308.000000 (insert task task-6 (6 by [1:103:2136])) 2025-06-30T09:18:55.492486Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-3 (3 by [1:103:2136]) 2025-06-30T09:18:55.745428Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-30T09:18:55.745562Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-1 (1 by [2:103:2136]) priority=0 resources={391, 489} 2025-06-30T09:18:55.745572Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [2:103:2136]) to queue queue_compaction1 2025-06-30T09:18:55.745581Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {391, 489} for task task-1 (1 by [2:103:2136]) from queue queue_compaction1 2025-06-30T09:18:55.745586Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:103:2136]) to queue queue_compaction1 2025-06-30T09:18:55.745598Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 978.000000 (insert task task-1 (1 by [2:103:2136])) 2025-06-30T09:18:55.745608Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-2 (2 by [2:103:2136]) priority=0 resources={479, 277} 2025-06-30T09:18:55.745613Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [2:103:2136]) to queue queue_default 2025-06-30T09:18:55.745618Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:103:2136]) 2025-06-30T09:18:55.745624Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-3 (3 by [2:103:2136]) priority=2 resources={481, 489} 2025-06-30T09:18:55.745627Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [2:103:2136]) to queue queue_default 2025-06-30T09:18:55.745632Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:103:2136]) 2025-06-30T09:18:55.745637Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-4 (4 by [2:103:2136]) priority=2 resources={228, 36} 2025-06-30T09:18:55.745643Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-4 (4 by [2:103:2136]) to queue queue_default 2025-06-30T09:18:55.745647Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:103:2136]) 2025-06-30T09:18:55.745652Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-5 (5 by [2:103:2136]) priority=4 resources={270, 469} 2025-06-30T09:18:55.745656Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-5 (5 by [2:103:2136]) to queue queue_default 2025-06-30T09:18:55.745660Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:103:2136]) 2025-06-30T09:18:55.745666Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-6 (6 by [2:103:2136]) priority=0 resources={108, 409} 2025-06-30T09:18:55.745669Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-6 (6 by [2:103:2136]) to queue queue_compaction1 2025-06-30T09:18:55.745673Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:103:2136]) 2025-06-30T09:18:55.745677Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction1 blocked by an earlier queue 2025-06-30T09:18:55.745694Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-7 (7 by [2:103:2136]) priority=3 resources={302, 84} 2025-06-30T09:18:55.745699Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-7 (7 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-30T09:18:55.745703Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:103:2136]) 2025-06-30T09:18:55.745710Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction1 blocked by an earlier queue 2025-06-30T09:18:55.745717Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown ... d resource usage for queue queue_default from 741026.163200 to 743636.761600 (insert task task-908 (908 by [2:103:2136])) 2025-06-30T09:18:55.935216Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-912 (912 by [2:103:2136]) 2025-06-30T09:18:55.935223Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-908 (908 by [2:103:2136]) (release resources {344, 250}) 2025-06-30T09:18:55.935229Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_default from 743636.761600 to 743402.979200 (remove task task-908 (908 by [2:103:2136])) 2025-06-30T09:18:55.935234Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 742072.524800 to 743402.979200 2025-06-30T09:18:55.935239Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {345, 26} for task task-912 (912 by [2:103:2136]) from queue queue_default 2025-06-30T09:18:55.935245Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-912 (912 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-30T09:18:55.935253Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 743402.979200 to 745003.503200 (insert task task-912 (912 by [2:103:2136])) 2025-06-30T09:18:55.935258Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-920 (920 by [2:103:2136]) 2025-06-30T09:18:55.935265Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-912 (912 by [2:103:2136]) (release resources {345, 26}) 2025-06-30T09:18:55.935271Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_default from 745003.503200 to 743917.581200 (remove task task-912 (912 by [2:103:2136])) 2025-06-30T09:18:55.935276Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 743402.979200 to 743917.581200 2025-06-30T09:18:55.935281Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {294, 473} for task task-920 (920 by [2:103:2136]) from queue queue_default 2025-06-30T09:18:55.935286Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-920 (920 by [2:103:2136]) to queue queue_default 2025-06-30T09:18:55.935291Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 743917.581200 to 745995.375600 (insert task task-920 (920 by [2:103:2136])) 2025-06-30T09:18:55.935296Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-922 (922 by [2:103:2136]) 2025-06-30T09:18:55.935302Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-920 (920 by [2:103:2136]) (release resources {294, 473}) 2025-06-30T09:18:55.935308Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 743917.581200 to 746840.532000 2025-06-30T09:18:55.935313Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {214, 492} for task task-922 (922 by [2:103:2136]) from queue queue_default 2025-06-30T09:18:55.935317Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-922 (922 by [2:103:2136]) to queue queue_default 2025-06-30T09:18:55.935323Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 745995.375600 to 749031.900000 (insert task task-922 (922 by [2:103:2136])) 2025-06-30T09:18:55.935327Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-935 (935 by [2:103:2136]) 2025-06-30T09:18:55.935337Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-922 (922 by [2:103:2136]) (release resources {214, 492}) 2025-06-30T09:18:55.935343Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 746840.532000 to 750548.834400 2025-06-30T09:18:55.935348Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {102, 184} for task task-935 (935 by [2:103:2136]) from queue queue_default 2025-06-30T09:18:55.935352Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-935 (935 by [2:103:2136]) to queue queue_default 2025-06-30T09:18:55.935358Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 749031.900000 to 751416.210400 (insert task task-935 (935 by [2:103:2136])) 2025-06-30T09:18:55.935364Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-942 (942 by [2:103:2136]) 2025-06-30T09:18:55.935371Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-935 (935 by [2:103:2136]) (release resources {102, 184}) 2025-06-30T09:18:55.935377Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 750548.834400 to 751429.826400 2025-06-30T09:18:55.935384Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {165, 442} for task task-942 (942 by [2:103:2136]) from queue queue_default 2025-06-30T09:18:55.935389Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-942 (942 by [2:103:2136]) to queue queue_default 2025-06-30T09:18:55.935396Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 751416.210400 to 753467.092800 (insert task task-942 (942 by [2:103:2136])) 2025-06-30T09:18:55.935401Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-949 (949 by [2:103:2136]) 2025-06-30T09:18:55.935407Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-942 (942 by [2:103:2136]) (release resources {165, 442}) 2025-06-30T09:18:55.935414Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 751429.826400 to 754648.293600 2025-06-30T09:18:55.935419Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {138, 493} for task task-949 (949 by [2:103:2136]) from queue queue_default 2025-06-30T09:18:55.935424Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-949 (949 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-30T09:18:55.935430Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 753467.092800 to 757054.330800 (insert task task-949 (949 by [2:103:2136])) 2025-06-30T09:18:55.935434Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-950 (950 by [2:103:2136]) 2025-06-30T09:18:55.935440Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-949 (949 by [2:103:2136]) (release resources {138, 493}) 2025-06-30T09:18:55.935447Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_default from 757054.330800 to 756545.160400 (remove task task-949 (949 by [2:103:2136])) 2025-06-30T09:18:55.935452Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 754648.293600 to 756545.160400 2025-06-30T09:18:55.935457Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {384, 195} for task task-950 (950 by [2:103:2136]) from queue queue_default 2025-06-30T09:18:55.935461Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-950 (950 by [2:103:2136]) to queue queue_default 2025-06-30T09:18:55.935468Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 756545.160400 to 758426.914000 (insert task task-950 (950 by [2:103:2136])) 2025-06-30T09:18:55.935472Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-966 (966 by [2:103:2136]) 2025-06-30T09:18:55.935479Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-950 (950 by [2:103:2136]) (release resources {384, 195}) 2025-06-30T09:18:55.935485Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 756545.160400 to 758612.616400 2025-06-30T09:18:55.935490Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {199, 363} for task task-966 (966 by [2:103:2136]) from queue queue_default 2025-06-30T09:18:55.935495Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-966 (966 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-30T09:18:55.935501Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 758426.914000 to 760458.834400 (insert task task-966 (966 by [2:103:2136])) 2025-06-30T09:18:55.935509Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-967 (967 by [2:103:2136]) 2025-06-30T09:18:55.935515Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-966 (966 by [2:103:2136]) (release resources {199, 363}) 2025-06-30T09:18:55.935522Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_default from 760458.834400 to 759567.451600 (remove task task-966 (966 by [2:103:2136])) 2025-06-30T09:18:55.935528Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 758612.616400 to 759567.451600 2025-06-30T09:18:55.935533Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {52, 238} for task task-967 (967 by [2:103:2136]) from queue queue_default 2025-06-30T09:18:55.935539Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-967 (967 by [2:103:2136]) to queue queue_default 2025-06-30T09:18:55.935545Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 759567.451600 to 760722.322800 (insert task task-967 (967 by [2:103:2136])) 2025-06-30T09:18:55.935551Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:630: Skip queue queue_default due to exceeded limits 2025-06-30T09:18:55.935557Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-967 (967 by [2:103:2136]) (release resources {52, 238}) 2025-06-30T09:18:55.935564Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 759567.451600 to 761376.918000 2025-06-30T09:18:55.935569Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {366, 112} for task task-975 (975 by [2:103:2136]) from queue queue_default 2025-06-30T09:18:55.935575Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-975 (975 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-30T09:18:55.935581Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 760722.322800 to 763151.432400 (insert task task-975 (975 by [2:103:2136])) 2025-06-30T09:18:55.935588Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-975 (975 by [2:103:2136]) (release resources {366, 112}) 2025-06-30T09:18:55.935594Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_default from 763151.432400 to 762104.818800 (remove task task-975 (975 by [2:103:2136])) 2025-06-30T09:18:55.935600Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 761376.918000 to 762104.818800 >> KqpQueryPerf::IndexDeleteOn+QueryService-UseSink >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-NoDbAdmin-clusteradmin [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-DbAdmin-clusteradmin >> KqpWorkload::STOCK >> KqpQueryPerf::RangeRead-QueryService >> KqpQueryPerf::Replace+QueryService+UseSink >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup-QueryService >> BasicUsage::ReadMirrored [GOOD] >> KqpQueryPerf::UpdateOn+QueryService-UseSink >> TPipeCacheTest::TestAutoConnect [GOOD] >> THiveTest::TestHiveBalancerOneTabletHighUsage [GOOD] >> DataShardVolatile::DistributedWriteThenImmediateUpsert [GOOD] >> DataShardVolatile::DistributedWriteThenSplit >> KqpQueryPerf::Insert-QueryService-UseSink [GOOD] >> KqpQueryPerf::Insert-QueryService+UseSink >> KqpQueryPerf::Delete-QueryService-UseSink [GOOD] >> KqpQueryPerf::Delete-QueryService+UseSink >> KqpQueryPerf::Insert+QueryService-UseSink >> KqpQueryPerf::DeleteOn+QueryService-UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::ReadMirrored [GOOD] Test command err: 2025-06-30T09:18:36.572140Z :PropagateSessionClosed INFO: Random seed for debugging is 1751275116572128 2025-06-30T09:18:36.795808Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669353156886342:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:36.795829Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:36.799684Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669353497556041:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:36.799705Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:36.832879Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d43/r3tmp/tmpbFB6k9/pdisk_1.dat 2025-06-30T09:18:36.847302Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:36.866445Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:36.898171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:36.898204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 20139, node 1 2025-06-30T09:18:36.900757Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:36.911042Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002d43/r3tmp/yandexpYs9LV.tmp 2025-06-30T09:18:36.911063Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002d43/r3tmp/yandexpYs9LV.tmp 2025-06-30T09:18:36.911135Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002d43/r3tmp/yandexpYs9LV.tmp 2025-06-30T09:18:36.911179Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:36.917784Z INFO: TTestServer started on Port 64109 GrpcPort 20139 TClient is connected to server localhost:64109 PQClient connected to localhost:20139 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:18:36.939918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:36.939945Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:36.941721Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:36.942211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:36.956034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-30T09:18:37.220983Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669357451854528:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.220986Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669357451854520:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.221014Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.222500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:37.225015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669357451854567:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.225052Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.230691Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669357451854534:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:18:37.270194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.274223Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669357792523626:2273], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:37.274351Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=OTY2Mjk1OWYtYjA4MmNhMGItN2M3NDIyMDItNTI5ODYw, ActorId: [2:7521669357792523561:2265], ActorState: ExecuteState, TraceId: 01jz0236nvcjd5dag02k3csvdj, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:37.275052Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:37.332663Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669357451854679:2698] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:37.339458Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669357451854690:2312], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:37.340232Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=MzMzZWVlMjMtNjZmNjJiOTMtODlhOTRiYmUtZjYxYzdiNDY=, ActorId: [1:7521669357451854517:2295], ActorState: ExecuteState, TraceId: 01jz0236n21sxjm6vj0c8c5r9a, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:37.340429Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:37.382475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.434982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === ... UG: [/Root] MessageGroupId [src_id] SessionId [src_id|c451f91a-635d80a8-45fe7e22-b6cfd48_0] Write session: acknoledged message 5 2025-06-30T09:18:56.726969Z :DEBUG: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] [] Got ReadResponse, serverBytesSize = 1450, now ReadSizeBudget = 0, ReadSizeServerDelta = 8387158 2025-06-30T09:18:56.727010Z :DEBUG: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 8387158 2025-06-30T09:18:56.727067Z :DEBUG: [/Root] Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-30T09:18:56.727078Z :DEBUG: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] [] Returning serverBytesSize = 1450 to budget 2025-06-30T09:18:56.727083Z :DEBUG: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] [] In ContinueReadingDataImpl, ReadSizeBudget = 1450, ReadSizeServerDelta = 8387158 2025-06-30T09:18:56.727152Z :DEBUG: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 8388608 2025-06-30T09:18:56.727166Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 0} (1-1) 2025-06-30T09:18:56.727171Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 1} (2-2) 2025-06-30T09:18:56.727176Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 2} (3-3) 2025-06-30T09:18:56.727180Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 3} (4-4) >>> event from dataHandler: DataReceived { Partition session id: 1 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 Message { Data: ..130 bytes.. Information: { Offset: 1 ProducerId: "src_id" SeqNo: 2 CreateTime: 2025-06-30T09:18:56.719000Z WriteTime: 2025-06-30T09:18:56.722000Z MessageGroupId: "src_id" Meta: { "ident": "unknown", "server": "ipv6:[::1]:60414", "_ip": "ipv6:[::1]:60414", "logtype": "unknown" } MessageMeta: { } } Partition session id: 1 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 } Message { Data: ..240 bytes.. Information: { Offset: 2 ProducerId: "src_id" SeqNo: 3 CreateTime: 2025-06-30T09:18:56.719000Z WriteTime: 2025-06-30T09:18:56.722000Z MessageGroupId: "src_id" Meta: { "ident": "unknown", "server": "ipv6:[::1]:60414", "_ip": "ipv6:[::1]:60414", "logtype": "unknown" } MessageMeta: { } } Partition session id: 1 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 } Message { Data: ..350 bytes.. Information: { Offset: 3 ProducerId: "src_id" SeqNo: 4 CreateTime: 2025-06-30T09:18:56.719000Z WriteTime: 2025-06-30T09:18:56.722000Z MessageGroupId: "src_id" Meta: { "ident": "unknown", "server": "ipv6:[::1]:60414", "_ip": "ipv6:[::1]:60414", "logtype": "unknown" } MessageMeta: { } } Partition session id: 1 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 } Message { Data: ..460 bytes.. Information: { Offset: 4 ProducerId: "src_id" SeqNo: 5 CreateTime: 2025-06-30T09:18:56.719000Z WriteTime: 2025-06-30T09:18:56.722000Z MessageGroupId: "src_id" Meta: { "ident": "unknown", "server": "ipv6:[::1]:60414", "_ip": "ipv6:[::1]:60414", "logtype": "unknown" } MessageMeta: { } } Partition session id: 1 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 } } >>> get 4 messages in this event 2025-06-30T09:18:56.727257Z :DEBUG: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] [] The application data is transferred to the client. Number of messages 4, size 1180 bytes 2025-06-30T09:18:56.727261Z :DEBUG: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] [] Returning serverBytesSize = 0 to budget 2025-06-30T09:18:56.727278Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_10449229285530724723_v1 grpc read done: success# 1, data# { read_request { bytes_size: 1450 } } 2025-06-30T09:18:56.727355Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/user session shared/user_1_1_10449229285530724723_v1 got read request: guid# 52f905cf-71bbcb8e-413a402-2457e48f 2025-06-30T09:18:56.819456Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|c451f91a-635d80a8-45fe7e22-b6cfd48_0] Write session will now close 2025-06-30T09:18:56.819482Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|c451f91a-635d80a8-45fe7e22-b6cfd48_0] Write session: aborting 2025-06-30T09:18:56.819706Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|c451f91a-635d80a8-45fe7e22-b6cfd48_0] Write session: gracefully shut down, all writes complete >>> Writes to test-topic-mirrored-from-dc3 successful 2025-06-30T09:18:56.819719Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|c451f91a-635d80a8-45fe7e22-b6cfd48_0] Write session: destroy 2025-06-30T09:18:56.819777Z :INFO: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] Closing read session. Close timeout: 18446744073709.551615s 2025-06-30T09:18:56.819796Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:3:4:0 -:test-topic-mirrored-from-dc2:0:2:4:0 -:test-topic-mirrored-from-dc3:0:1:4:0 2025-06-30T09:18:56.819808Z :INFO: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] Counters: { Errors: 0 CurrentSessionLifetimeMs: 345 BytesRead: 3600 MessagesRead: 15 BytesReadCompressed: 3600 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:56.819956Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|c451f91a-635d80a8-45fe7e22-b6cfd48_0 grpc read done: success: 0 data: 2025-06-30T09:18:56.819976Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: src_id|c451f91a-635d80a8-45fe7e22-b6cfd48_0 grpc read failed 2025-06-30T09:18:56.819987Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: src_id|c451f91a-635d80a8-45fe7e22-b6cfd48_0 grpc closed 2025-06-30T09:18:56.819993Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: src_id|c451f91a-635d80a8-45fe7e22-b6cfd48_0 is DEAD 2025-06-30T09:18:56.820076Z :INFO: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:56.820083Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:3:4:0 -:test-topic-mirrored-from-dc2:0:2:4:0 -:test-topic-mirrored-from-dc3:0:1:4:0 2025-06-30T09:18:56.820086Z :INFO: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] Counters: { Errors: 0 CurrentSessionLifetimeMs: 345 BytesRead: 3600 MessagesRead: 15 BytesReadCompressed: 3600 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:56.820090Z :INFO: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] Closing read session. Close timeout: 0.000000s 2025-06-30T09:18:56.820093Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:3:4:0 -:test-topic-mirrored-from-dc2:0:2:4:0 -:test-topic-mirrored-from-dc3:0:1:4:0 2025-06-30T09:18:56.820094Z :INFO: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] Counters: { Errors: 0 CurrentSessionLifetimeMs: 345 BytesRead: 3600 MessagesRead: 15 BytesReadCompressed: 3600 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:56.820107Z :NOTICE: [/Root] [/Root] [a656dcca-13330693-b9a317b2-a0046638] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:18:56.820116Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_10449229285530724723_v1 grpc read done: success# 0, data# { } 2025-06-30T09:18:56.820129Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_1_1_10449229285530724723_v1 grpc read failed 2025-06-30T09:18:56.820137Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_1_1_10449229285530724723_v1 grpc closed 2025-06-30T09:18:56.820156Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_1_1_10449229285530724723_v1 is DEAD 2025-06-30T09:18:56.820339Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:56.820471Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [1:7521669437606142933:2533] disconnected; active server actors: 1 2025-06-30T09:18:56.820480Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [1:7521669437606142933:2533] client user disconnected session shared/user_1_1_10449229285530724723_v1 2025-06-30T09:18:56.820480Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][rt3.dc1--test-topic-mirrored-from-dc2] pipe [1:7521669437606142934:2533] disconnected; active server actors: 1 2025-06-30T09:18:56.820485Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][rt3.dc1--test-topic-mirrored-from-dc2] pipe [1:7521669437606142934:2533] client user disconnected session shared/user_1_1_10449229285530724723_v1 2025-06-30T09:18:56.820507Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037897][rt3.dc1--test-topic-mirrored-from-dc3] pipe [1:7521669437606142932:2533] disconnected; active server actors: 1 2025-06-30T09:18:56.820515Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037897][rt3.dc1--test-topic-mirrored-from-dc3] pipe [1:7521669437606142932:2533] client user disconnected session shared/user_1_1_10449229285530724723_v1 2025-06-30T09:18:56.820661Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session shared/user_1_1_10449229285530724723_v1 2025-06-30T09:18:56.820671Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037896] Destroy direct read session shared/user_1_1_10449229285530724723_v1 2025-06-30T09:18:56.820684Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [1:7521669437606142950:2541] destroyed 2025-06-30T09:18:56.820685Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [1:7521669437606142942:2538] destroyed 2025-06-30T09:18:56.820696Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_1_1_10449229285530724723_v1 2025-06-30T09:18:56.820697Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [1:7521669437606143090:2554] destroyed 2025-06-30T09:18:56.820700Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [1:7521669437606142946:2540] destroyed 2025-06-30T09:18:56.820717Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_10449229285530724723_v1 2025-06-30T09:18:56.820718Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:18:56.820720Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_10449229285530724723_v1 2025-06-30T09:18:56.820724Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_10449229285530724723_v1 |79.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TPipeCacheTest::TestAutoConnect [GOOD] >> DataShardVolatile::DistributedWrite [GOOD] >> DataShardVolatile::DistributedWriteBrokenLock >> KqpQueryPerf::Upsert-QueryService+UseSink >> KqpQueryPerf::UpdateOn-QueryService-UseSink [GOOD] >> KqpQueryPerf::Replace+QueryService+UseSink [GOOD] >> KqpQueryPerf::RangeRead-QueryService [GOOD] >> KqpQueryPerf::IndexInsert-QueryService-UseSink >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup-QueryService [GOOD] >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup+QueryService >> KqpQueryPerf::IndexDeleteOn+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexDeleteOn+QueryService+UseSink |79.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |79.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |79.8%| [LD] {RESULT} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::UpdateOn-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 28515, MsgBus: 13263 2025-06-30T09:18:56.585532Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669437658114508:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:56.585572Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a79/r3tmp/tmplDf5Qb/pdisk_1.dat TServer::EnableGrpc on GrpcPort 28515, node 1 2025-06-30T09:18:56.652157Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:56.656662Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:56.656676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:56.656678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:56.656740Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13263 2025-06-30T09:18:56.688303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:56.688346Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:56.689416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13263 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:56.730538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:56.733797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:56.740015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:56.805285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:56.867442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:56.881178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.087050Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441953083390:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.087092Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.130996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.143793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.157807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.168664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.190678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.252723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.280485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.311081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441953084046:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.311123Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.311337Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441953084051:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.312484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:57.316902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:18:57.317028Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669441953084053:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:57.416292Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669441953084104:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:57.591007Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Replace+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 9538, MsgBus: 13795 2025-06-30T09:18:56.931720Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669437940235461:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:56.931765Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a70/r3tmp/tmp00AFf4/pdisk_1.dat TServer::EnableGrpc on GrpcPort 9538, node 1 2025-06-30T09:18:57.002429Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:57.002575Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669437940235440:2080] 1751275136931519 != 1751275136931522 2025-06-30T09:18:57.005227Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:57.005238Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:57.005240Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:57.005284Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13795 2025-06-30T09:18:57.034379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:57.034412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:57.035516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13795 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:57.076473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:57.099830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.168912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.199539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.213769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.508502Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669442235204355:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.508597Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.519898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.549138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.569001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.581736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.596568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.611552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.624070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.644062Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669442235205008:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.644084Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.644215Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669442235205013:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.645166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:57.648954Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669442235205015:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:57.726181Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669442235205066:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:57.935520Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::RangeRead-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 18296, MsgBus: 23824 2025-06-30T09:18:56.976204Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669437074653693:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:56.976236Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a67/r3tmp/tmpEhR8rX/pdisk_1.dat 2025-06-30T09:18:57.040026Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:57.040243Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669437074653673:2080] 1751275136976073 != 1751275136976076 TServer::EnableGrpc on GrpcPort 18296, node 1 2025-06-30T09:18:57.053474Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:57.053487Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:57.053490Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:57.053549Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23824 2025-06-30T09:18:57.078784Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:57.078812Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:57.079939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23824 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:57.127943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:57.131033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:57.133618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.173052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.207509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.222572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.466650Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441369622566:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.466695Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.551330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.567924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.597430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.617998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.642757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.663578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.681476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.707980Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441369623222:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.708006Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.708165Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441369623227:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.709162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:57.713305Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669441369623229:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:57.785775Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669441369623280:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:57.980173Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpQueryPerf::UpdateOn+QueryService-UseSink [GOOD] >> KqpQueryPerf::Insert-QueryService+UseSink [GOOD] >> KqpQueryPerf::Delete-QueryService+UseSink [GOOD] |79.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |79.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |79.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Insert-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 17035, MsgBus: 25092 2025-06-30T09:18:56.454232Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669438985115623:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:56.454258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a76/r3tmp/tmpbpJ0da/pdisk_1.dat 2025-06-30T09:18:56.516783Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:56.519623Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669438985115602:2080] 1751275136454088 != 1751275136454091 TServer::EnableGrpc on GrpcPort 17035, node 1 2025-06-30T09:18:56.533924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:56.533951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:56.533953Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:56.533998Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25092 TClient is connected to server localhost:25092 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:18:56.594993Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:56.595030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:56.596242Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:56.606378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:56.619701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:56.687818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:56.714810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:18:56.729631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:56.859871Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669438985117208:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:56.859901Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:56.912913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:56.922790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:56.934507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:56.949003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:56.957979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:56.970275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:56.984895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.007531Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669443280085157:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.007568Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.007597Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669443280085162:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.008596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:57.011001Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669443280085164:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:57.087013Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669443280085215:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:57.466323Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 15268, MsgBus: 5531 2025-06-30T09:18:57.787035Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669442165506373:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:57.787058Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a76/r3tmp/tmpVOBlMR/pdisk_1.dat 2025-06-30T09:18:57.805262Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15268, node 2 2025-06-30T09:18:57.818538Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:57.818550Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:57.818552Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:57.818612Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5531 TClient is connected to server localhost:5531 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:57.892138Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:57.892175Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting waiting... 2025-06-30T09:18:57.892499Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:57.893077Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:57.895529Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:57.904362Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.962365Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.989628Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.012492Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.218168Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669446460475225:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.218240Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.231506Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.239260Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.251447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.265670Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.279498Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.294407Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.307197Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.322804Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669446460475878:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.322839Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.322859Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669446460475883:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.323700Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:58.326586Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669446460475885:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:58.399662Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669446460475936:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::UpdateOn+QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 30062, MsgBus: 12965 2025-06-30T09:18:57.395242Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669440286165674:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:57.395367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a61/r3tmp/tmp1tkGhs/pdisk_1.dat 2025-06-30T09:18:57.507656Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:57.507899Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669440286165551:2080] 1751275137392063 != 1751275137392066 TServer::EnableGrpc on GrpcPort 30062, node 1 2025-06-30T09:18:57.554956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:57.554969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:57.554972Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:57.555022Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:57.566812Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:57.566838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:57.575117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12965 TClient is connected to server localhost:12965 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:57.678479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:57.684927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:57.699934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.771649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.836143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.901279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.047426Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669444581134463:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.047467Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.103443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.119018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.136424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.149990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.170926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.197413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.222925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.243535Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669444581135117:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.243567Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.243725Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669444581135122:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.244634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:58.247837Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669444581135124:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:58.337289Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669444581135175:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:58.389872Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TSchemeShardUserAttrsTest::UserConditionsAtCreateDropOps >> KqpQueryPerf::Insert+QueryService-UseSink [GOOD] >> KqpQueryPerf::Insert+QueryService+UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Delete-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 2486, MsgBus: 12881 2025-06-30T09:18:56.459982Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669436736581487:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:56.460002Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a86/r3tmp/tmpPwlinA/pdisk_1.dat 2025-06-30T09:18:56.516973Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:56.518856Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669436736581463:2080] 1751275136459571 != 1751275136459574 TServer::EnableGrpc on GrpcPort 2486, node 1 2025-06-30T09:18:56.544848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:56.544875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:56.544878Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:56.544939Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:56.564613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:56.564655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:56.565708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12881 TClient is connected to server localhost:12881 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:56.618654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:56.623942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:56.689847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:56.714098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:56.729131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:56.959100Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669436736583064:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:56.959127Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.014494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.022822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.079347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.088993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.103297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.116994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.131475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.151917Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441031551012:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.151950Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.152051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441031551017:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.153082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:57.160061Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669441031551019:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:57.252160Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669441031551070:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:57.461515Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 64899, MsgBus: 24085 2025-06-30T09:18:57.808760Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669442356322153:2157];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a86/r3tmp/tmpwh43MY/pdisk_1.dat 2025-06-30T09:18:57.811879Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:57.824990Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:57.828104Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521669442356322009:2080] 1751275137805697 != 1751275137805700 TServer::EnableGrpc on GrpcPort 64899, node 2 2025-06-30T09:18:57.835415Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:57.835429Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:57.835431Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:57.835486Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24085 TClient is connected to server localhost:24085 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:57.911854Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:57.912196Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:57.913602Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting waiting... 2025-06-30T09:18:57.914308Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:57.915243Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:57.922766Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:18:57.936047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.967620Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.980962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.238485Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669446651290895:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.238520Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.255824Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.265927Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.279466Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.293387Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.307368Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.320834Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.335195Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.351956Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669446651291548:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.351992Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669446651291553:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.351993Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.352957Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:58.362104Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669446651291555:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:58.430033Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669446651291606:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> KqpQueryPerf::Upsert-QueryService+UseSink [GOOD] >> KqpQueryPerf::Update-QueryService+UseSink >> KqpPg::TableDeleteAllData+useSink [GOOD] >> KqpPg::TableDeleteAllData-useSink >> TopicAutoscaling::ControlPlane_BackCompatibility [GOOD] >> TopicAutoscaling::ControlPlane_AutoscalingWithStorageSizeRetention >> KqpQueryPerf::DeleteOn+QueryService-UseSink [GOOD] >> KqpQueryPerf::DeleteOn+QueryService+UseSink >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup+QueryService [GOOD] >> DataShardVolatile::DistributedWriteBrokenLock [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan+UseSink >> TSchemeShardUserAttrsTest::UserConditionsAtCreateDropOps [GOOD] >> DataShardVolatile::DistributedWriteThenSplit [GOOD] >> DataShardVolatile::DistributedWriteThenReadIterator >> TSchemeShardUserAttrsTest::VariousUse >> TSchemeShardUserAttrsTest::UserConditionsAtAlter >> KqpQueryPerf::IndexDeleteOn+QueryService+UseSink [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-DbAdmin-clusteradmin [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-NoDbAdmin-system ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Upsert-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 11851, MsgBus: 1155 2025-06-30T09:18:58.082991Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669445652154593:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:58.083155Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a58/r3tmp/tmpcdeRQ7/pdisk_1.dat 2025-06-30T09:18:58.182040Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:58.182375Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669445652154401:2080] 1751275138071699 != 1751275138071702 TServer::EnableGrpc on GrpcPort 11851, node 1 2025-06-30T09:18:58.210981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:58.210998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:58.211001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:58.211060Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1155 2025-06-30T09:18:58.251328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:58.251360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:58.252477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1155 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:58.332817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:58.338425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.408684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.471917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.485976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.583648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669445652156015:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.583690Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.646822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.661108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.673683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.685040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.699652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.716323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.727552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.748003Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669445652156666:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.748036Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.748117Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669445652156671:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.749066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:58.754302Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669445652156673:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:58.835147Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669445652156724:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:59.079540Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpQueryPerf::IndexInsert-QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexInsert-QueryService+UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::UserConditionsAtCreateDropOps [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:59.579862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:59.579894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:59.579899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:59.579902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:59.579907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:59.579910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:59.579917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:59.579930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:59.580023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:59.580079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:59.590928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:59.590961Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:59.594808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:59.594893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:59.594940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:59.596712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:59.596821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:59.596970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:59.597054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:59.597767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:59.597826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:59.598169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:59.598187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:59.598214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:59.598226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:59.598233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:59.598257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:59.599880Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:59.620584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:59.620682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:59.620758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:59.620768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:59.620815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:59.620831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:59.622721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:59.622849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:59.622953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:59.622968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:59.622975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:59.622982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:59.624996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:59.625029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:59.625042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:59.625777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:59.625806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:59.625815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:59.625823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:59.704466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:59.707454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:59.707524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:59.707777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:59.707819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:59.707828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:59.707910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:59.707920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:59.707960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:59.707973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:59.711353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:59.711373Z node 1 :FLAT_TX_SCHEMESHARD ... DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-30T09:18:59.796757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:0 2025-06-30T09:18:59.796762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 105:0 2025-06-30T09:18:59.796774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:18:59.796781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 105, publications: 2, subscribers: 0 2025-06-30T09:18:59.796787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-30T09:18:59.796792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:18:59.797382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:18:59.797443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:18:59.797532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:59.797540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:59.797589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:18:59.797623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:59.797630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 105, path id: 1 2025-06-30T09:18:59.797635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 105, path id: 4 FAKE_COORDINATOR: Erasing txId 105 2025-06-30T09:18:59.797786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:18:59.797800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:18:59.797806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 105 2025-06-30T09:18:59.797811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-30T09:18:59.797818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:18:59.797896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:18:59.797907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:18:59.797912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-30T09:18:59.797917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:18:59.797922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:18:59.797954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-30T09:18:59.797994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:18:59.798001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:18:59.798013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:18:59.798511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:18:59.798889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:18:59.798920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-30T09:18:59.799008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-30T09:18:59.799018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-30T09:18:59.799146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-30T09:18:59.799171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-30T09:18:59.799178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:406:2395] TestWaitNotification: OK eventTxId 105 2025-06-30T09:18:59.799287Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirC" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:59.799345Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirC" took 48us result status StatusPathDoesNotExist 2025-06-30T09:18:59.799398Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/DirC\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/DirC" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:18:59.799480Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:18:59.799511Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 33us result status StatusSuccess 2025-06-30T09:18:59.799624Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 9 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardUserAttrsTest::UserConditionsAtAlter [GOOD] >> TSchemeShardUserAttrsTest::VariousUse [GOOD] >> KqpQueryPerf::Insert+QueryService+UseSink [GOOD] |79.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_user_attributes/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 28970, MsgBus: 19623 2025-06-30T09:18:57.150913Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669442214785420:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:57.151073Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a64/r3tmp/tmpULyxwh/pdisk_1.dat 2025-06-30T09:18:57.222510Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28970, node 1 2025-06-30T09:18:57.251158Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:57.251192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:57.252864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:57.255035Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:57.255045Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:57.255047Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:57.255099Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19623 TClient is connected to server localhost:19623 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:57.369861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:57.375225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:57.390296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.428798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.475955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.501809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.641238Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669442214786795:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.641275Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.695399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.712493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.742583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.759863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.777867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.797834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.814960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.835689Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669442214787447:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.835719Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.835843Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669442214787452:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.836953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:57.840565Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669442214787454:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:57.911900Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669442214787505:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:58.150087Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 23819, MsgBus: 19484 2025-06-30T09:18:58.471444Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669445062787561:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:58.471837Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a64/r3tmp/tmpx496vq/pdisk_1.dat 2025-06-30T09:18:58.488025Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23819, node 2 2025-06-30T09:18:58.500006Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:58.500025Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:58.500027Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:58.500086Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19484 TClient is connected to server localhost:19484 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:18:58.580519Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:58.580551Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:58.581455Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:58.583233Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:58.584565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:58.586310Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.601845Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:18:58.632883Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.695505Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.884924Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669445062789068:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.884949Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.894439Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.914795Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.972137Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.979969Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.993303Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.009280Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.068763Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.087293Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669449357757024:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:59.087322Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:59.087498Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669449357757029:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:59.088624Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:59.092429Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669449357757031:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:59.181463Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669449357757082:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:59.477599Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexDeleteOn+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 16276, MsgBus: 27330 2025-06-30T09:18:56.742521Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669437358925991:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:56.743814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a73/r3tmp/tmpJ2jPC9/pdisk_1.dat 2025-06-30T09:18:56.836718Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:56.836830Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669437358925934:2080] 1751275136741625 != 1751275136741628 TServer::EnableGrpc on GrpcPort 16276, node 1 2025-06-30T09:18:56.845136Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:56.845166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:56.846231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:56.848094Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:56.848105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:56.848107Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:56.848151Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27330 TClient is connected to server localhost:27330 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:56.924257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:56.935889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.002759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.026658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.039817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:57.252156Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441653894830:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.252213Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.305507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.316443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.327988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.387631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.453833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.473881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.489110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.522987Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441653895487:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.523025Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.523226Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441653895492:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.524380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:57.528611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:18:57.528727Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669441653895494:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:57.623979Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669441653895545:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:57.746930Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:57.894069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.954964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 720575940 ... bMiCXd/pdisk_1.dat 2025-06-30T09:18:58.486372Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:58.488986Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521669446410554922:2080] 1751275138470923 != 1751275138470926 TServer::EnableGrpc on GrpcPort 11445, node 2 2025-06-30T09:18:58.502764Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:58.502778Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:58.502781Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:58.502832Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9891 TClient is connected to server localhost:9891 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:18:58.575927Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:58.575972Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:58.576855Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:18:58.583774Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:58.585631Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:58.587107Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.605692Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.636511Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.661680Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.913840Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669446410556513:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.913873Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.917338Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.928232Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.942084Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.953458Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.965161Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.021043Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.035266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.052486Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669450705524466:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:59.052533Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:59.052675Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669450705524471:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:59.053594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:59.063244Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669450705524473:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:59.115635Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669450705524524:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:59.462834Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.476636Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:59.493858Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.521523Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::UserConditionsAtAlter [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:19:00.354059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:19:00.354088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:19:00.354094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:19:00.354103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:19:00.354109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:19:00.354113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:19:00.354124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:19:00.354140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:19:00.354263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:19:00.354337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:19:00.371129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:19:00.371155Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:00.374823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:19:00.374902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:19:00.374943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:19:00.376928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:19:00.377010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:19:00.377153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:00.377234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:19:00.377967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:00.378015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:19:00.378302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:00.378314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:00.378346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:19:00.378355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:19:00.378362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:19:00.378383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.381251Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:19:00.398857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:19:00.398942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.399008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:19:00.399017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:19:00.399064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:19:00.399079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:00.400039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:00.400089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:19:00.400169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.400181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:19:00.400187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:19:00.400194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:19:00.400806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.400823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:19:00.400829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:19:00.401299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.401311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.401318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:00.401325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:19:00.402157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:19:00.402635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:19:00.402721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:19:00.403009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:00.403045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:00.403054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:00.403116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:19:00.403125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:00.403159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:19:00.403172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:19:00.403848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:00.403870Z node 1 :FLAT_TX_SCHEMESHARD ... f { PathId: 2 PathVersion: 4 } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:19:00.426162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_user_attrs.cpp:26: TAlterUserAttrs Propose, path: /MyRoot/DirA, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.426180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-30T09:19:00.426187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 103:0 type: TxAlterUserAttributes target path: [OwnerId: 72057594046678944, LocalPathId: 2] source path: 2025-06-30T09:19:00.426216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:19:00.426226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 103:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-30T09:19:00.427413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:00.427453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAccepted, operation: ALTER USER ATTRIBUTES, path: /MyRoot/DirA 2025-06-30T09:19:00.427496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.427502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_user_attrs.cpp:97: TAlterUserAttrs ProgressState, opId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.427510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 103 ready parts: 1/1 2025-06-30T09:19:00.427533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 103 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:19:00.427920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-06-30T09:19:00.427944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 2025-06-30T09:19:00.428014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:00.428034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:00.428042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_user_attrs.cpp:114: TAlterUserAttrs HandleReply TEvOperationPlan, opId: 103:0, stepId:5000004, at schemeshard: 72057594046678944 2025-06-30T09:19:00.428078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-30T09:19:00.428083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:19:00.428088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-30T09:19:00.428093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:19:00.428102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:19:00.428113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-30T09:19:00.428123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:19:00.428128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:19:00.428133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-30T09:19:00.428153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:0 2025-06-30T09:19:00.428163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:19:00.428168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 103, publications: 1, subscribers: 0 2025-06-30T09:19:00.428173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-30T09:19:00.428556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:00.428566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:19:00.428593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:00.428599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 2 FAKE_COORDINATOR: Erasing txId 103 2025-06-30T09:19:00.428690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:19:00.428701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:19:00.428706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-30T09:19:00.428712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-30T09:19:00.428717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:19:00.428732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-30T09:19:00.429065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-30T09:19:00.429144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-30T09:19:00.429157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-30T09:19:00.429253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-30T09:19:00.429271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:19:00.429277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:354:2343] TestWaitNotification: OK eventTxId 103 2025-06-30T09:19:00.429361Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:19:00.429388Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 37us result status StatusSuccess 2025-06-30T09:19:00.429478Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 3 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrA2" Value: "ValA2" } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::VariousUse [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:19:00.351212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:19:00.351237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:19:00.351242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:19:00.351246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:19:00.351251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:19:00.351254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:19:00.351262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:19:00.351275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:19:00.351385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:19:00.351446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:19:00.364495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:19:00.364523Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:00.368782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:19:00.368862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:19:00.368903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:19:00.370532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:19:00.370609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:19:00.370784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:00.370884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:19:00.371643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:00.371691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:19:00.371994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:00.372007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:00.372036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:19:00.372045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:19:00.372052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:19:00.372073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.373577Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:19:00.398800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:19:00.398905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.398988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:19:00.398997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:19:00.399053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:19:00.399069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:00.400039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:00.400089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:19:00.400169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.400180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:19:00.400187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:19:00.400194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:19:00.400768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.400784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:19:00.400792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:19:00.401202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.401214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.401220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:00.401230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:19:00.402086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:19:00.402571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:19:00.402616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:19:00.402871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:00.402906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:00.402914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:00.402981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:19:00.402990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:00.403029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:19:00.403043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:19:00.403577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:00.403589Z node 1 :FLAT_TX_SCHEMESHARD ... ishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-30T09:19:00.449731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:00.449741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:19:00.449772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:19:00.449780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:19:00.449807Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:00.449813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 1 2025-06-30T09:19:00.449819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 3 2025-06-30T09:19:00.449824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 4 FAKE_COORDINATOR: Erasing txId 112 2025-06-30T09:19:00.449961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:19:00.449970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:19:00.449976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 112 2025-06-30T09:19:00.449980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-30T09:19:00.449984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:19:00.450038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:19:00.450043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:19:00.450046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 112 2025-06-30T09:19:00.450048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-30T09:19:00.450051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:19:00.450113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:19:00.450120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:19:00.450122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 112 2025-06-30T09:19:00.450125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:19:00.450127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:19:00.450135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 112, subscribers: 0 2025-06-30T09:19:00.450179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:19:00.450184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:19:00.450191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:19:00.450407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-30T09:19:00.450695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-30T09:19:00.450711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-30T09:19:00.450722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 112, wait until txId: 112 TestWaitNotification wait txId: 112 2025-06-30T09:19:00.450811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 112: send EvNotifyTxCompletion 2025-06-30T09:19:00.450820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 112 2025-06-30T09:19:00.450923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 112, at schemeshard: 72057594046678944 2025-06-30T09:19:00.450941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2025-06-30T09:19:00.450946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [1:500:2489] TestWaitNotification: OK eventTxId 112 2025-06-30T09:19:00.451025Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:19:00.451048Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirB" took 31us result status StatusSuccess 2025-06-30T09:19:00.451111Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirB" PathDescription { Self { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000008 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrB1" Value: "ValB1" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 113 2025-06-30T09:19:00.451932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "DirB" } ApplyIf { PathId: 2 PathVersion: 8 } ApplyIf { PathId: 3 PathVersion: 7 } ApplyIf { PathId: 4 PathVersion: 3 } } TxId: 113 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:19:00.451979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:28: TRmDir Propose, path: /MyRoot/DirB, pathId: 0, opId: 113:0, at schemeshard: 72057594046678944 2025-06-30T09:19:00.451998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 113:1, propose status:StatusPreconditionFailed, reason: fail user constraint: ApplyIf section: no path with id [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:19:00.452520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 113, response: Status: StatusPreconditionFailed Reason: "fail user constraint: ApplyIf section: no path with id [OwnerId: 72057594046678944, LocalPathId: 4]" TxId: 113 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:00.452571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 113, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: fail user constraint: ApplyIf section: no path with id [OwnerId: 72057594046678944, LocalPathId: 4], operation: DROP DIRECTORY, path: /MyRoot/DirB TestModificationResult got TxId: 113, wait until txId: 113 >> KqpQueryPerf::Update-QueryService+UseSink [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Insert+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 2417, MsgBus: 27556 2025-06-30T09:18:57.867022Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669441026869361:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:57.867055Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a5b/r3tmp/tmpO0eh90/pdisk_1.dat 2025-06-30T09:18:58.019196Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:58.019568Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669441026869339:2080] 1751275137866880 != 1751275137866883 TServer::EnableGrpc on GrpcPort 2417, node 1 2025-06-30T09:18:58.031979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:58.031997Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:58.031999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:58.032054Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27556 2025-06-30T09:18:58.176390Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:58.176425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:58.179182Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27556 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:58.211138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:58.214657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:58.219571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.290149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.321433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.336341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.540782Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669445321838251:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.540810Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.604856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.619734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.632296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.645631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.658707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.672780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.732244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.763343Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669445321838908:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.763375Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.763575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669445321838913:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.764665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:58.771193Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669445321838915:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:58.852724Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669445321838966:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:58.871706Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 28936, MsgBus: 25370 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a5b/r3tmp/tmpKuDoU7/pdisk_1.dat 2025-06-30T09:18:59.495410Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669449808159966:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:59.495782Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:59.529301Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28936, node 2 2025-06-30T09:18:59.536805Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:59.536821Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:59.536823Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:59.536883Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25370 TClient is connected to server localhost:25370 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:59.592390Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:59.592426Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:59.593104Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:18:59.603528Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:59.607359Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:59.620174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.641829Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:59.668033Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:59.684850Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:59.945041Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669449808161479:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:59.945066Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:59.957191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.970150Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.986154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.016998Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.082326Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.095428Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.111092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.129593Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669454103129430:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.129623Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.129796Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669454103129435:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.130900Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:00.134793Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:19:00.134865Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669454103129437:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:19:00.227857Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669454103129488:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> KqpYql::BinaryJsonOffsetBound >> KqpScripting::ScriptingCreateAndAlterTableTest >> TSchemeShardUserAttrsTest::MkDir >> KqpQueryPerf::DeleteOn+QueryService+UseSink [GOOD] |79.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |79.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |79.8%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |79.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |79.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Update-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 31599, MsgBus: 20585 2025-06-30T09:18:59.540390Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669450592846388:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:59.540425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a4c/r3tmp/tmpGKQI6D/pdisk_1.dat 2025-06-30T09:18:59.603744Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31599, node 1 2025-06-30T09:18:59.626819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:59.626832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:59.626835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:59.626883Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:59.643260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:59.643289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:59.644728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20585 TClient is connected to server localhost:20585 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:59.859940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:59.867413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:59.883869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:59.947644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:19:00.024014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.091845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:00.220561Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669454887815279:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.220587Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.284914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.302000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.326960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.345265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.360221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.424688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.439663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.455667Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669454887815932:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.455713Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.455726Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669454887815937:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.456806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:00.461956Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669454887815939:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:19:00.516872Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669454887815990:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:00.542837Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |79.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage >> TSchemeShardUserAttrsTest::MkDir [GOOD] >> Cache::Test4 [GOOD] >> Cache::Test5 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::DeleteOn+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 3200, MsgBus: 10301 2025-06-30T09:18:57.901294Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669443377674619:2145];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a60/r3tmp/tmpSAe8n8/pdisk_1.dat 2025-06-30T09:18:58.035892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:58.086578Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:58.089740Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669443377674499:2080] 1751275137889113 != 1751275137889116 TServer::EnableGrpc on GrpcPort 3200, node 1 2025-06-30T09:18:58.111466Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:58.111485Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:58.111488Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:58.111531Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:58.128609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:58.128654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:58.133781Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10301 TClient is connected to server localhost:10301 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:58.287731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:58.295372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:58.308131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.375441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:18:58.405713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.422313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.590860Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669447672643395:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.590893Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.643301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.667332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.722532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.737517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.763880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.775038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.836070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.871231Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669447672644056:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.871252Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.871387Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669447672644061:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.872128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:58.874432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:18:58.874532Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669447672644063:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:58.889637Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:58.954497Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669447672644114:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 13683, MsgBus: 63659 2025-06-30T09:18:59.842198Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669451572160494:2246];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a60/r3tmp/tmpYpnHCA/pdisk_1.dat 2025-06-30T09:18:59.851235Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:59.890455Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:59.894464Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521669451572160259:2080] 1751275139836328 != 1751275139836331 TServer::EnableGrpc on GrpcPort 13683, node 2 2025-06-30T09:18:59.910630Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:59.910647Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:59.910650Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:59.910694Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:59.945761Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:59.945801Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:59.946248Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63659 TClient is connected to server localhost:63659 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:00.086385Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:00.091258Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:00.097175Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:00.140244Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:00.200128Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:00.229521Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:00.652861Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669455867129154:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.652886Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.668766Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.678686Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.688491Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.701950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.715513Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.729632Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.786979Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.802608Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669455867129808:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.802652Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669455867129813:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.802663Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:00.803671Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:00.812660Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669455867129815:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:19:00.835830Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:00.908927Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669455867129875:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> IcebergClusterProcessor::ValidateDdlCreationForHiveWithS3 [GOOD] >> IcebergClusterProcessor::ValidateRiseErrors [GOOD] >> EscapingBasics::HideSecretsOverEncloseSecretShouldWork [GOOD] >> EscapingBasics::EscapeStringShouldWork [GOOD] >> Cache::Test1 [GOOD] >> Cache::Test2 [GOOD] >> Cache::Test3 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::MkDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:19:01.424875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:19:01.424901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:19:01.424906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:19:01.424910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:19:01.424914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:19:01.424918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:19:01.424925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:19:01.424948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:19:01.425067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:19:01.425136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:19:01.443606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:19:01.443637Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:01.448079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:19:01.448185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:19:01.448232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:19:01.454383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:19:01.454527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:19:01.454714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:01.454853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:19:01.456566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:01.456634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:19:01.456991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:01.457006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:01.457040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:19:01.457049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:19:01.457055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:19:01.457080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:19:01.462509Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:19:01.485680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:19:01.485787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:01.485865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:19:01.485874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:19:01.485946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:19:01.485963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:01.489340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:01.489406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:19:01.489508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:01.489523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:19:01.489530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:19:01.489536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:19:01.490587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:01.490615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:19:01.490627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:19:01.491350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:01.491368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:01.491375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:01.491383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:19:01.492245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:19:01.492890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:19:01.492939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:19:01.493164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:01.493199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:01.493207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:01.493279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:19:01.493288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:01.493324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:19:01.493336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:19:01.493923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:01.493953Z node 1 :FLAT_TX_SCHEMESHARD ... Step: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:01.521435Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:19:01.521456Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 23us result status StatusSuccess 2025-06-30T09:19:01.521512Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 } ChildrenExist: true } Children { Name: "SubDirA" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrA1" Value: "ValA1" } UserAttributes { Key: "AttrA2" Value: "ValA2" } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:01.521579Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:19:01.521593Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirB" took 16us result status StatusSuccess 2025-06-30T09:19:01.521633Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirB" PathDescription { Self { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrB1" Value: "ValB1" } UserAttributes { Key: "AttrB2" Value: "ValB2" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:01.521703Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/SubDirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:19:01.521717Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/SubDirA" took 15us result status StatusSuccess 2025-06-30T09:19:01.521762Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/SubDirA" PathDescription { Self { Name: "SubDirA" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "DirB" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 103 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrAA1" Value: "ValAA1" } UserAttributes { Key: "AttrAA2" Value: "ValAA2" } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:01.521807Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/SubDirA/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:19:01.521818Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/SubDirA/DirB" took 13us result status StatusSuccess 2025-06-30T09:19:01.521855Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/SubDirA/DirB" PathDescription { Self { Name: "DirB" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 103 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrAB1" Value: "ValAB1" } UserAttributes { Key: "AttrAB2" Value: "ValAB2" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DataShardVolatile::DistributedWriteShardRestartBeforePlan+UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan-UseSink |79.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> EscapingBasics::EscapeStringShouldWork [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> IcebergClusterProcessor::ValidateRiseErrors [GOOD] Test command err: test case: 1 test case: 2 test case: 3 test case: 4 test case: 5 test case: 6 test case: 7 test case: 8 test case: 9 >> KqpQueryPerf::IndexInsert-QueryService+UseSink [GOOD] >> DataShardVolatile::DistributedWriteThenReadIterator [GOOD] >> DataShardVolatile::DistributedWriteThenReadIteratorStream |79.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> Cache::Test3 [GOOD] |79.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Cache::Test5 [GOOD] >> EntityId::CheckId [GOOD] >> KqpYql::BinaryJsonOffsetBound [GOOD] >> KqpYql::AnsiIn >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_PQv1 >> Donor::SlayAfterWiping ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexInsert-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 32695, MsgBus: 2890 2025-06-30T09:18:58.434717Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669444337868988:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:58.434936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a54/r3tmp/tmp1mqnXL/pdisk_1.dat 2025-06-30T09:18:58.495672Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669444337868773:2080] 1751275138431545 != 1751275138431548 2025-06-30T09:18:58.495905Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32695, node 1 2025-06-30T09:18:58.512228Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:58.512242Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:58.512244Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:58.512297Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2890 TClient is connected to server localhost:2890 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:58.576830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:58.576855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:58.577512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:58.578330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:18:58.580235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:58.589064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.613047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.654151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.668683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:58.895635Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669444337870368:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.895671Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:58.951357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.966850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.979974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:58.994051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.050036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.066386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.077288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.103606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669448632838324:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:59.103643Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:59.103700Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669448632838329:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:59.104680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:59.107153Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669448632838331:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:18:59.192837Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669448632838382:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:59.399237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:59.434383Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:59.459282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 720575940466 ... 2G7WIU/pdisk_1.dat TServer::EnableGrpc on GrpcPort 27974, node 2 2025-06-30T09:19:00.511708Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:00.512017Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521669453106977501:2080] 1751275140448742 != 1751275140448745 2025-06-30T09:19:00.516570Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:00.516587Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:00.516591Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:00.516650Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2076 2025-06-30T09:19:00.578892Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:00.578933Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:00.580109Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2076 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:00.635427Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:00.659114Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:00.687776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:00.756353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:19:00.783229Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:00.795662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:01.013676Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669457401946389:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.013707Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.020662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.030011Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.044948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.058076Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.072514Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.086726Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.100694Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.117384Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669457401947042:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.117408Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.117426Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669457401947047:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.118655Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:01.127525Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669457401947049:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:19:01.214789Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669457401947100:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:01.376657Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.392326Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.449077Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:01.461417Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpScripting::ScriptingCreateAndAlterTableTest [GOOD] >> KqpScripting::SecondaryIndexes >> Donor::ConsistentWritesWhenSwitchingToDonorMode |79.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> EntityId::CheckId [GOOD] >> Donor::SlayAfterWiping [GOOD] |79.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |79.8%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |79.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id >> Donor::ContinueWithFaultyDonor >> Donor::CheckOnlineReadRequestToDonor ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestHiveBalancerOneTabletHighUsage [GOOD] Test command err: 2025-06-30T09:17:51.823154Z node 3 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:51.831439Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:51.831539Z node 3 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 3 PDiskId# 1 Path# "SectorMap:2:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:51.831871Z node 3 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:51.831952Z node 3 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:51.832065Z node 3 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [3:74:2076] ControllerId# 72057594037932033 2025-06-30T09:17:51.832069Z node 3 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:51.832094Z node 3 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:51.832111Z node 3 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:51.839370Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:51.839397Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:51.840031Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:81:2080] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.840073Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:82:2081] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.840099Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:83:2082] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.840131Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:84:2083] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.840164Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:85:2084] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.840193Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:86:2085] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.840227Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:73:2075] Create Queue# [3:87:2086] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.840235Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-30T09:17:51.840256Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [3:74:2076] 2025-06-30T09:17:51.840262Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [3:74:2076] 2025-06-30T09:17:51.840273Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-30T09:17:51.840284Z node 3 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:17:51.840426Z node 3 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:17:51.840463Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:51.841130Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:51.841185Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:51.841350Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:51.841423Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:17:51.842003Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-30T09:17:51.842021Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:51.842190Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:98:2078] ControllerId# 72057594037932033 2025-06-30T09:17:51.842197Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:51.842218Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:51.842242Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:51.844480Z node 3 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:51.844536Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [3:74:2076] 2025-06-30T09:17:51.844551Z node 3 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:51.844557Z node 3 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:17:51.844597Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:67:2065] 2025-06-30T09:17:51.844604Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:67:2065] 2025-06-30T09:17:51.844614Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:17:51.909963Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:51.910007Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:51.910371Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:105:2083] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.910406Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:106:2084] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.910435Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:107:2085] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.910464Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:108:2086] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.910494Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:109:2087] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.910524Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:110:2088] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.910553Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:97:2077] Create Queue# [1:111:2089] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:51.910560Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-30T09:17:51.910578Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:98:2078] 2025-06-30T09:17:51.910585Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:98:2078] 2025-06-30T09:17:51.910595Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-30T09:17:51.910605Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:17:51.910915Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:17:51.910937Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:51.970425Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:51.970497Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:51.970686Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:51.970837Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:51.971478Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:122:2077] ControllerId# 72057594037932033 2025-06-30T09:17:51.971491Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:51.971512Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:51.971539Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:51.973597Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:17:52.071228Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:52.071259Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:52.072382Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:121:2076] Create Queue# [2:129:2081] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:52.072421Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:121:2076] Create Queue# [2:130:2082] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:52.073297Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:121:2076] C ... alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-30T09:18:56.806010Z node 57 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:60} Tx{558, NKikimr::NHive::TTxUpdateTabletMetrics} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:18:56.806037Z node 57 :HIVE TRACE: hive_impl.cpp:769: HIVE#72057594037927937 THive::Handle::TEvTabletMetrics, NodeId 58 TotalNodeUsage: 0.05 2025-06-30T09:18:56.806042Z node 57 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:60} Tx{559, NKikimr::NHive::TTxUpdateTabletMetrics} queued, type NKikimr::NHive::TTxUpdateTabletMetrics 2025-06-30T09:18:56.806045Z node 57 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:60} Tx{559, NKikimr::NHive::TTxUpdateTabletMetrics} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:18:56.806049Z node 57 :HIVE TRACE: tx__update_tablet_metrics.cpp:66: HIVE#72057594037927937 THive::TTxUpdateTabletMetrics UpdateResourceTotalUsage node 58 value (0,0,0,0) accumulated to (0,0,0,0) 2025-06-30T09:18:56.806054Z node 57 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:60} Tx{559, NKikimr::NHive::TTxUpdateTabletMetrics} hope 1 -> done Change{496, redo 82b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-30T09:18:56.806058Z node 57 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:60} Tx{559, NKikimr::NHive::TTxUpdateTabletMetrics} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:18:56.816424Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:417: TClient[72057594037936129] client retry [59:93:2066] 2025-06-30T09:18:56.816452Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [59:93:2066] 2025-06-30T09:18:56.816491Z node 59 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:18:56.816572Z node 59 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:18:56.816745Z node 57 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-30T09:18:56.816759Z node 57 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-30T09:18:56.816766Z node 57 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 2} 2025-06-30T09:18:56.816878Z node 59 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:18:56.816921Z node 59 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:18:56.816953Z node 59 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:18:56.816997Z node 59 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037936129 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037936129 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[57:2199047599219:0] : 11}, {[57:24343667:0] : 5}, {[57:1099535971443:0] : 8}}}} 2025-06-30T09:18:56.817008Z node 59 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037936129 followers: 0 2025-06-30T09:18:56.817023Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037936129] forward result error, check reconnect [59:93:2066] 2025-06-30T09:18:56.817031Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037936129] schedule retry [59:93:2066] 2025-06-30T09:18:56.858824Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:417: TClient[72057594037936129] client retry [58:155:2066] 2025-06-30T09:18:56.858848Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [58:155:2066] 2025-06-30T09:18:56.858892Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:18:56.858950Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:18:56.859138Z node 57 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-30T09:18:56.859154Z node 57 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-30T09:18:56.859161Z node 57 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 2} 2025-06-30T09:18:56.859247Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:18:56.859292Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:18:56.859304Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:18:56.859328Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037936129 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037936129 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[57:2199047599219:0] : 11}, {[57:24343667:0] : 5}, {[57:1099535971443:0] : 8}}}} 2025-06-30T09:18:56.859335Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037936129 followers: 0 2025-06-30T09:18:56.859349Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037936129] forward result error, check reconnect [58:155:2066] 2025-06-30T09:18:56.859355Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037936129] schedule retry [58:155:2066] 2025-06-30T09:18:56.869906Z node 57 :HIVE DEBUG: hive_impl.cpp:2398: HIVE#72057594037927937 ProcessTabletBalancer MaxUsage=1.004142857 on #57 MinUsage=0.050000000 on #58 Scatter=0.000000000 2025-06-30T09:18:56.869947Z node 57 :HIVE DEBUG: hive_impl.cpp:2431: HIVE#72057594037927937 Nodes [57] with usage over limit 0.9 - triggered balancer 2025-06-30T09:18:56.870007Z node 57 :HIVE TRACE: balancer.cpp:237: HIVE#72057594037927937 Balancer selected node 57 2025-06-30T09:18:56.870019Z node 57 :HIVE TRACE: balancer.cpp:253: HIVE#72057594037927937 Balancer on node 57: 1/1 tablets are suitable for balancing 2025-06-30T09:18:56.870033Z node 57 :HIVE TRACE: balancer.cpp:319: HIVE#72057594037927937 Balancer selected tablet Dummy.72075186224037888.Leader.2 2025-06-30T09:18:56.870038Z node 57 :HIVE DEBUG: hive_impl.cpp:1225: HIVE#72057594037927937 [FBN] Finding best node for tablet Dummy.72075186224037888.Leader.2 2025-06-30T09:18:56.870047Z node 57 :HIVE TRACE: hive_impl.cpp:1226: HIVE#72057594037927937 [FBN] Tablet Dummy.72075186224037888.Leader.2 family {Dummy.72075186224037888.Leader.2 Running on node 57} 2025-06-30T09:18:56.870059Z node 57 :HIVE TRACE: hive_impl.cpp:1333: HIVE#72057594037927937 [FBN] Tablet Dummy.72075186224037888.Leader.2 node 59 is not able to run the tablet 2025-06-30T09:18:56.870066Z node 57 :HIVE TRACE: hive_impl.cpp:1333: HIVE#72057594037927937 [FBN] Tablet Dummy.72075186224037888.Leader.2 node 58 is not able to run the tablet 2025-06-30T09:18:56.870075Z node 57 :HIVE TRACE: hive_impl.cpp:1333: HIVE#72057594037927937 [FBN] Tablet Dummy.72075186224037888.Leader.2 node 60 is not able to run the tablet 2025-06-30T09:18:56.870080Z node 57 :HIVE TRACE: hive_impl.cpp:1333: HIVE#72057594037927937 [FBN] Tablet Dummy.72075186224037888.Leader.2 node 57 is not able to run the tablet 2025-06-30T09:18:56.870085Z node 57 :HIVE TRACE: hive_impl.cpp:1357: HIVE#72057594037927937 [FBN] Tablet Dummy.72075186224037888.Leader.2 selected nodes count 0 2025-06-30T09:18:56.870090Z node 57 :HIVE TRACE: hive_impl.cpp:1389: HIVE#72057594037927937 [FBN] Tablet Dummy.72075186224037888.Leader.2 no node was selected 2025-06-30T09:18:56.870098Z node 57 :HIVE INFO: balancer.cpp:137: HIVE#72057594037927937 Balancer finished with 0 movements made 2025-06-30T09:18:56.870103Z node 57 :HIVE DEBUG: balancer.cpp:149: HIVE#72057594037927937 Balancer suggests scale-up 2025-06-30T09:18:56.870174Z node 57 :HIVE DEBUG: hive_impl.cpp:382: HIVE#72057594037927937 Handle BalancerOut 2025-06-30T09:18:56.871408Z node 57 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [57:3075:3194] 2025-06-30T09:18:56.871426Z node 57 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [57:3075:3194] 2025-06-30T09:18:56.871451Z node 57 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:18:56.871463Z node 57 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 57 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [57:454:2171] 2025-06-30T09:18:56.871486Z node 57 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [57:3075:3194] 2025-06-30T09:18:56.871511Z node 57 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [57:3075:3194] 2025-06-30T09:18:56.871532Z node 57 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [57:3075:3194] 2025-06-30T09:18:56.871539Z node 57 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [57:3075:3194] 2025-06-30T09:18:56.871570Z node 57 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [57:3075:3194] 2025-06-30T09:18:56.871634Z node 57 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [57:3075:3194] 2025-06-30T09:18:56.871641Z node 57 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [57:3075:3194] 2025-06-30T09:18:56.871647Z node 57 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [57:3075:3194] 2025-06-30T09:18:56.871654Z node 57 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [57:3075:3194] 2025-06-30T09:18:56.871659Z node 57 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [57:3075:3194] 2025-06-30T09:18:56.871669Z node 57 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [57:451:2169] EventType# 268697616 2025-06-30T09:18:56.871777Z node 57 :HIVE TRACE: hive_impl.cpp:118: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([57:3075:3194]) [57:3076:3195] distribution: *1 5 5 5 Final distribution: 1 5 5 5 |79.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> KqpPg::TableInsert+useSink [GOOD] >> KqpPg::TableInsert-useSink >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI [GOOD] |79.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |79.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |79.8%| [LD] {RESULT} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::SlayAfterWiping [GOOD] Test command err: RandomSeed# 17844180176396393303 2025-06-30T09:19:02.997002Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:02.997432Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 6483975194566914875] 2025-06-30T09:19:02.998727Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> KqpYql::AnsiIn [GOOD] >> Donor::MultipleEvicts >> Donor::CheckOnlineReadRequestToDonor [GOOD] |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::ContinueWithFaultyDonor [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan-UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation+UseSink >> TSchemeShardTTLTestsWithReboots::AlterTable [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-NoDbAdmin-system [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-DbAdmin-system ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/yql/unittest >> KqpYql::AnsiIn [GOOD] Test command err: Trying to start YDB, gRPC: 4976, MsgBus: 27478 2025-06-30T09:19:01.280238Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669457503392284:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:01.280279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00381d/r3tmp/tmpju1J8L/pdisk_1.dat 2025-06-30T09:19:01.357755Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4976, node 1 2025-06-30T09:19:01.382976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:01.382994Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:01.382996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:01.383045Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27478 2025-06-30T09:19:01.429250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:01.429284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:01.430379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27478 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:01.460784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:01.467250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:01.471755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:01.546376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:01.606964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:01.623315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:01.805940Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669457503393860:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.805969Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.858347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.867473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.925525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.936028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.947200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.960640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.969706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.987796Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669457503394513:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.987825Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.987926Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669457503394518:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.989007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:01.995130Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669457503394520:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:19:02.093151Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669461798361867:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:02.282580Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:02.308268Z node 1 :KQP_SESSION ERROR: kqp_session_actor.cpp:2908: SessionId: ydb://session/3?node_id=1&id=NWViZjc2OWUtODJkY2ExMDktNzMyMjZiNzUtZWExMWRhNTQ=, ActorId: [1:7521669461798362140:2470], ActorState: ExecuteState, TraceId: 01jz023z4g4wzy2ajcwr6hsgtj, Internal error, message: yql/essentials/types/binary_json/read.cpp:161: StringOffset must be inside buffer 2025-06-30T09:19:02.308288Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=NWViZjc2OWUtODJkY2ExMDktNzMyMjZiNzUtZWExMWRhNTQ=, ActorId: [1:7521669461798362140:2470], ActorState: ExecuteState, TraceId: 01jz023z4g4wzy2ajcwr6hsgtj, Create QueryResponse for error on request, msg: yql/essentials/types/binary_json/read.cpp:161: StringOffset must be inside buffer Trying to start YDB, gRPC: 24356, MsgBus: 10178 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00381d/r3tmp/tmp9JwK16/pdisk_1.dat 2025-06-30T09:19:02.708539Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:02.710044Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24356, node 2 2025-06-30T09:19:02.722193Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:02.722210Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:02.722213Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:02.722277Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10178 TClient is connected to server localhost:10178 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:19:02.791606Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:02.791645Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:02.792993Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:02.794973Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:02.802077Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:02.820061Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:02.843872Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:19:02.903344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.173711Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669469320040500:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:03.173748Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:03.185983Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.196220Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.206623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.221523Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.235386Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.249438Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.263880Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.279694Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669469320041152:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:03.279731Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:03.279804Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669469320041157:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:03.280746Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:03.290859Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669469320041159:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:19:03.345302Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669469320041210:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:03.681585Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI [GOOD] Test command err: 2025-06-30T09:18:32.933171Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669333069601969:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:32.933188Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:33.062626Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669339725356677:2173];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:33.064064Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00402f/r3tmp/tmp4CMa4R/pdisk_1.dat 2025-06-30T09:18:33.193611Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:33.211018Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:33.316297Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:33.322537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:33.322567Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:33.324309Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:33.324948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27899, node 1 2025-06-30T09:18:33.415001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:33.415032Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:33.416394Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:33.582954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/00402f/r3tmp/yandexKIs41z.tmp 2025-06-30T09:18:33.582967Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/00402f/r3tmp/yandexKIs41z.tmp 2025-06-30T09:18:33.617502Z INFO: TTestServer started on Port 6065 GrpcPort 27899 2025-06-30T09:18:33.660330Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/00402f/r3tmp/yandexKIs41z.tmp 2025-06-30T09:18:33.660498Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6065 PQClient connected to localhost:27899 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:33.934917Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:33.940065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:18:34.052465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:34.063076Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... waiting... 2025-06-30T09:18:35.034279Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669345954504900:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:35.034305Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:35.035199Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669345954504929:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:35.036345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:35.049187Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669345954504931:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:18:35.220702Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669345954505015:2795] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:35.230207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:35.247001Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669345954505026:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:35.247936Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YTdhM2U0YTAtNjJkNThhY2MtZGRkODYxM2MtZGRlMzBmZA==, ActorId: [1:7521669341659537600:2301], ActorState: ExecuteState, TraceId: 01jz0234g1d7z2zdkpth4m83xs, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:35.248501Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:35.324320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:35.362869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-30T09:18:35.471194Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jz0234xka4739ep5tgs4262v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWUwMTQzNDQtMmE4YTEyZDQtOGVmZjFjNGItMmQ5YmE3ZmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7521669345954505393:3064] 2025-06-30T09:18:37.933425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669333069601969:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:37.933518Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:18:38.050819Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7521669339725356677:2173];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:38.050978Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migra ... sage_size_bytes","bin":"20480"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"204800"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"2097152"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"5120"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"51200"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"524288"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"5242880"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"67108864"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"99999999"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.messages"},"value":4,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"0"},"value":4,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"1"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"10"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"100"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"1000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"10000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"20"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"2500"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"5"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"50"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"500"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"5000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"999999"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.uncompressed_bytes"},"value":56,"kind":"RATE"}]} ===Request counters with query: /counters/counters=datastreams/database=%2FRoot/cloud_id=somecloud/folder_id=somefolder/database_id=root/topic=account2%2Ftopic2/consumer=some@random@consumer/json counters: {"sensors":[{"labels":{"name":"api.grpc.topic.stream_read.bytes"},"value":490,"kind":"RATE"},{"labels":{"name":"api.grpc.topic.stream_read.messages"},"value":4,"kind":"RATE"},{"labels":{"name":"topic.read.bytes"},"value":490,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"100"},"value":4,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"1000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"10000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"180000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"200"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"2000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"30000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"500"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"5000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"60000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"999999"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.messages"},"value":4,"kind":"RATE"}]} ===Request counters with query: /counters/counters=pqproxy/subsystem=userAgents/json counters: {"sensors":[{"labels":{"sensor":"BytesReadByUserAgent","consumer":"some@random@consumer","sdk_build_info":"ydb-cpp-sdk\/dev","protocol":"topic","user_agent":"test-client\/v0.1","host":""},"value":511,"kind":"RATE"},{"labels":{"topic":"\/Root\/account2\/topic2","sensor":"BytesWrittenByUserAgent","sdk_build_info":"ydb-cpp-sdk\/dev","protocol":"topic","user_agent":"test-client\/v0.1","host":""},"value":460,"kind":"RATE"}]} ===Request counters with query: /counters/counters=pqproxy/subsystem=userAgents/json counters: {"sensors":[{"labels":{"sensor":"BytesReadByUserAgent","consumer":"some@random@consumer","sdk_build_info":"ydb-cpp-sdk\/dev","protocol":"topic","user_agent":"test-client\/v0.1","host":""},"value":511,"kind":"RATE"},{"labels":{"topic":"\/Root\/account2\/topic2","sensor":"BytesWrittenByUserAgent","sdk_build_info":"ydb-cpp-sdk\/dev","protocol":"topic","user_agent":"test-client\/v0.1","host":""},"value":460,"kind":"RATE"}]} 2025-06-30T09:19:03.341680Z :INFO: [/Root] [/Root] [b68e3e34-14f91b49-4eb105df-96970] Closing read session. Close timeout: 0.000000s 2025-06-30T09:19:03.341700Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:account2/topic2:1:5:0:0 -:account2/topic2:2:4:0:0 -:account2/topic2:4:3:0:0 -:account2/topic2:3:2:0:0 -:account2/topic2:0:1:3:0 2025-06-30T09:19:03.341709Z :INFO: [/Root] [/Root] [b68e3e34-14f91b49-4eb105df-96970] Counters: { Errors: 0 CurrentSessionLifetimeMs: 20 BytesRead: 40 MessagesRead: 4 BytesReadCompressed: 40 BytesInflightUncompressed: 30 BytesInflightCompressed: 0 BytesInflightTotal: 30 MessagesInflight: 3 } 2025-06-30T09:19:03.341727Z :NOTICE: [/Root] [/Root] [b68e3e34-14f91b49-4eb105df-96970] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:19:03.341740Z :DEBUG: [/Root] [/Root] [b68e3e34-14f91b49-4eb105df-96970] [] Returning serverBytesSize = 0 to budget 2025-06-30T09:19:03.341791Z :DEBUG: [/Root] [/Root] [b68e3e34-14f91b49-4eb105df-96970] [] Abort session to cluster 2025-06-30T09:19:03.341955Z :NOTICE: [/Root] [/Root] [b68e3e34-14f91b49-4eb105df-96970] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:19:03.342323Z :INFO: [/Root] TraceId [] SessionId [123|7648603e-1c5406e5-82962412-64c3bc2c_0] MessageGroupId [123] Write session: close. Timeout 0.000000s 2025-06-30T09:19:03.342330Z :INFO: [/Root] TraceId [] SessionId [123|7648603e-1c5406e5-82962412-64c3bc2c_0] MessageGroupId [123] Write session will now close 2025-06-30T09:19:03.342336Z :DEBUG: [/Root] TraceId [] SessionId [123|7648603e-1c5406e5-82962412-64c3bc2c_0] MessageGroupId [123] Write session: aborting 2025-06-30T09:19:03.342416Z :INFO: [/Root] TraceId [] SessionId [123|7648603e-1c5406e5-82962412-64c3bc2c_0] MessageGroupId [123] Write session: gracefully shut down, all writes complete 2025-06-30T09:19:03.342421Z :DEBUG: [/Root] TraceId [] SessionId [123|7648603e-1c5406e5-82962412-64c3bc2c_0] MessageGroupId [123] Write session: destroy 2025-06-30T09:19:03.342191Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer some@random@consumer session some@random@consumer_7_1_3158798044846553352_v1 grpc read done: success# 0, data# { } 2025-06-30T09:19:03.342202Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer some@random@consumer session some@random@consumer_7_1_3158798044846553352_v1 grpc read failed 2025-06-30T09:19:03.342208Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer some@random@consumer session some@random@consumer_7_1_3158798044846553352_v1 grpc closed 2025-06-30T09:19:03.342233Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer some@random@consumer session some@random@consumer_7_1_3158798044846553352_v1 is DEAD 2025-06-30T09:19:03.342484Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037896] Destroy direct read session some@random@consumer_7_1_3158798044846553352_v1 2025-06-30T09:19:03.342493Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [7:7521669468063957471:2490] destroyed 2025-06-30T09:19:03.342506Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037895] Destroy direct read session some@random@consumer_7_1_3158798044846553352_v1 2025-06-30T09:19:03.342507Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: some@random@consumer_7_1_3158798044846553352_v1 2025-06-30T09:19:03.342513Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: some@random@consumer_7_1_3158798044846553352_v1 2025-06-30T09:19:03.342513Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037895] server disconnected, pipe [7:7521669468063957464:2487] destroyed 2025-06-30T09:19:03.342519Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037895] Destroy direct read session some@random@consumer_7_1_3158798044846553352_v1 2025-06-30T09:19:03.342522Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037895] server disconnected, pipe [7:7521669468063957461:2486] destroyed 2025-06-30T09:19:03.342523Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: some@random@consumer_7_1_3158798044846553352_v1 2025-06-30T09:19:03.342671Z node 7 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: 123|7648603e-1c5406e5-82962412-64c3bc2c_0 grpc read done: success: 0 data: 2025-06-30T09:19:03.342675Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: 123|7648603e-1c5406e5-82962412-64c3bc2c_0 grpc read failed 2025-06-30T09:19:03.342683Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: 123|7648603e-1c5406e5-82962412-64c3bc2c_0 grpc closed 2025-06-30T09:19:03.342686Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: 123|7648603e-1c5406e5-82962412-64c3bc2c_0 is DEAD 2025-06-30T09:19:03.343010Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session some@random@consumer_7_1_3158798044846553352_v1 2025-06-30T09:19:03.343030Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [7:7521669468063957467:2489] destroyed 2025-06-30T09:19:03.343037Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session some@random@consumer_7_1_3158798044846553352_v1 2025-06-30T09:19:03.343040Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [7:7521669468063957465:2488] destroyed 2025-06-30T09:19:03.343052Z node 8 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037897][topic2] pipe [7:7521669468063957455:2483] disconnected; active server actors: 1 2025-06-30T09:19:03.343057Z node 8 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037897][topic2] pipe [7:7521669468063957455:2483] client some@random@consumer disconnected session some@random@consumer_7_1_3158798044846553352_v1 2025-06-30T09:19:03.343089Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: some@random@consumer_7_1_3158798044846553352_v1 2025-06-30T09:19:03.343093Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: some@random@consumer_7_1_3158798044846553352_v1 2025-06-30T09:19:03.342961Z node 7 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:19:03.343103Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [7:7521669468063957441:2476] destroyed 2025-06-30T09:19:03.343117Z node 7 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::CheckOnlineReadRequestToDonor [GOOD] Test command err: RandomSeed# 13753196975427374888 2025-06-30T09:19:03.879986Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:03.880424Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 13845552952754150370] 2025-06-30T09:19:03.881496Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:0:0:0:2097152:1] 2025-06-30T09:19:03.881550Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 1 PartsResurrected# 1 >> KqpScripting::SecondaryIndexes [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::ContinueWithFaultyDonor [GOOD] Test command err: RandomSeed# 13010684439843955223 2025-06-30T09:19:03.752759Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:03.753185Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 13380358196946042619] 2025-06-30T09:19:03.754419Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> Donor::SkipBadDonor >> Donor::MultipleEvicts [GOOD] >> DataShardVolatile::DistributedWriteThenReadIteratorStream [GOOD] >> DataShardVolatile::DistributedWriteThenScanQuery >> TopicAutoscaling::ReadingAfterSplitTest_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::AlterTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:18:48.211178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:48.211204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:48.211210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:48.211216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:48.211227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:48.211232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:48.211242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:48.211259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:48.211376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:48.211461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:48.229274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:18:48.229306Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:48.229458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:18:48.233239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:48.234363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:48.234408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:48.236986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:48.237042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:48.237195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:48.237258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:48.238165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:48.238227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:48.238501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:48.238512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:48.238522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:48.238530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:48.238537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:48.238564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:18:48.240136Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:18:48.257649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:48.257717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.257765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:48.257771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:48.257810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:48.257820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:48.258468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:48.258501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:48.258545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.258553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:48.258558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:48.258562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:48.258959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.258969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:48.258973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:48.259392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.259410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:48.259418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:48.259426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:48.261585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:48.262192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:48.262241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:48.262456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:48.262479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... T09:19:04.004340Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:19:04.004350Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:19:04.004359Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:19:04.004365Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-30T09:19:04.004372Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:19:04.004391Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-30T09:19:04.004579Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6376: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 226 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-30T09:19:04.004587Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-30T09:19:04.004602Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 226 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-30T09:19:04.004612Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 226 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-30T09:19:04.004860Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 336 RawX2: 219043334417 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-30T09:19:04.004865Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-30T09:19:04.004877Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Source { RawX1: 336 RawX2: 219043334417 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-30T09:19:04.004882Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-30T09:19:04.004887Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 336 RawX2: 219043334417 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-30T09:19:04.004901Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1003:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:04.004904Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:19:04.004907Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:19:04.004911Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 129 -> 240 2025-06-30T09:19:04.005316Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:19:04.005336Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:19:04.005351Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:19:04.005405Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:19:04.005411Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:19:04.005420Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:19:04.005423Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:19:04.005426Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:19:04.005429Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:19:04.005434Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-30T09:19:04.005440Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:19:04.005446Z node 51 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:19:04.005451Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:19:04.005473Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:19:04.006130Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:19:04.006141Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:19:04.006204Z node 51 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:19:04.006237Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:19:04.006243Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [51:457:2428] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:19:04.006313Z node 51 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:19:04.006376Z node 51 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 71us result status StatusSuccess 2025-06-30T09:19:04.006521Z node 51 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> Acceleration::TestAccelerationMirror3dcPutAsyncBlob1Slow >> TSchemeShardTTLTestsWithReboots::MoveTable [GOOD] >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests2Inflight2BlobSize1000 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/yql/unittest >> KqpScripting::SecondaryIndexes [GOOD] Test command err: Trying to start YDB, gRPC: 15260, MsgBus: 19638 2025-06-30T09:19:01.282928Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669457047188193:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:01.282980Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0037e5/r3tmp/tmp9OX4AZ/pdisk_1.dat 2025-06-30T09:19:01.340250Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15260, node 1 2025-06-30T09:19:01.359033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:01.359050Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:01.359054Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:01.359102Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19638 2025-06-30T09:19:01.385345Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:01.385374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:01.386688Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19638 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:01.431639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:01.435694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:01.447136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:01.516057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:01.540989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:01.553260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:01.692099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669457047189761:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.692129Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.768612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.781296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.793906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.808646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.822899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.837204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.850438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:01.867665Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669457047190414:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.867696Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.867864Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669457047190419:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:01.868657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:01.875880Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669457047190421:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:19:01.966065Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669457047190472:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:02.130794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:02.260729Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275142302, txId: 281474976715674] shutting down 2025-06-30T09:19:02.272353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:17 ... ion=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0037e5/r3tmp/tmpqxBUY0/pdisk_1.dat 2025-06-30T09:19:02.887005Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28242, node 2 2025-06-30T09:19:02.904335Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:02.904349Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:02.904353Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:02.904406Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7924 2025-06-30T09:19:02.968522Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:02.968548Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:02.969576Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7924 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:19:02.986667Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:02.991292Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:02.996499Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.028342Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:03.057950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:03.074604Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:03.257160Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669468855955674:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:03.257193Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:03.266348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.276727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.286895Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.301367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.312464Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.328073Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.341179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.359277Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669468855956327:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:03.359313Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:03.359422Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669468855956332:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:03.360427Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:03.368362Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669468855956334:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:19:03.451598Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669468855956385:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:03.654882Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.673301Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.733274Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:03.865888Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::MultipleEvicts [GOOD] Test command err: RandomSeed# 4569171474802910904 0 donors: 2025-06-30T09:19:04.323463Z 4 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:04.324532Z 4 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4168640546056003645] 2025-06-30T09:19:04.325628Z 4 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 2:1000 2025-06-30T09:19:04.339704Z 2 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:04.340854Z 2 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4168640546056003645] 2025-06-30T09:19:04.341649Z 2 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 4:1000 2025-06-30T09:19:04.355293Z 4 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:04.356413Z 4 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4168640546056003645] 2025-06-30T09:19:04.357306Z 4 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 2:1000 2025-06-30T09:19:04.371251Z 2 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:04.372286Z 2 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4168640546056003645] 2025-06-30T09:19:04.373173Z 2 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 4:1000 2025-06-30T09:19:04.387654Z 4 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:04.388836Z 4 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4168640546056003645] 2025-06-30T09:19:04.389793Z 4 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 2:1000 2025-06-30T09:19:04.404398Z 2 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:04.405240Z 2 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4168640546056003645] 2025-06-30T09:19:04.405905Z 2 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 4:1000 2025-06-30T09:19:04.417647Z 4 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:04.418758Z 4 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4168640546056003645] 2025-06-30T09:19:04.419455Z 4 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 2:1000 2025-06-30T09:19:04.431543Z 2 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:04.432373Z 2 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4168640546056003645] 2025-06-30T09:19:04.433014Z 2 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 4:1000 2025-06-30T09:19:04.443952Z 4 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:04.444679Z 4 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4168640546056003645] 2025-06-30T09:19:04.445253Z 4 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 2:1000 >> Acceleration::TestThresholdPutMirror3dc1Slow >> Donor::SkipBadDonor [GOOD] >> IncorrectQueries::VeryBigBlob >> IncorrectQueries::VeryBigBlob [GOOD] >> IncorrectQueries::WrongDataSize ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::MoveTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:18:46.286833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:46.286859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:46.286865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:46.286871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:46.286884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:46.286888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:46.286898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:46.286919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:46.287035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:46.287118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:46.303465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:18:46.303493Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:46.303633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:18:46.308171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:46.309343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:46.309387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:46.311695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:46.311767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:46.311912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:46.311981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:46.314778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:46.314867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:46.315226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:46.315242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:46.315253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:46.315264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:46.315270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:46.315308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:18:46.320263Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:18:46.344267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:46.344378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.344473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:46.344483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:46.344535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:46.344555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:46.345534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:46.345584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:46.345660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.345671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:46.345677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:46.345683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:46.346192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.346205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:46.346211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:46.346658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.346672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.346680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:46.346689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:46.347472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:46.347977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:46.348027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:46.348273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:46.348305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... : 3] 2025-06-30T09:19:04.685160Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:04.685164Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [62:210:2210], at schemeshard: 72057594046678944, txId: 1003, path id: 1 2025-06-30T09:19:04.685169Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [62:210:2210], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-30T09:19:04.685178Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:19:04.685183Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 1003:0 ProgressState at tablet: 72057594046678944 2025-06-30T09:19:04.685195Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:19:04.685199Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:19:04.685203Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 129 -> 240 2025-06-30T09:19:04.685504Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:19:04.685517Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:19:04.685522Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:19:04.685528Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-30T09:19:04.685533Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:19:04.685819Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:19:04.685833Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:19:04.685838Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:19:04.685843Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-30T09:19:04.685849Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:19:04.685863Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-30T09:19:04.686314Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:19:04.686326Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:19:04.686333Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 1003:0 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 3], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:19:04.686346Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:19:04.686353Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:19:04.686359Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:19:04.686363Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:19:04.686369Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-30T09:19:04.686375Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:19:04.686382Z node 62 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:19:04.686387Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:19:04.686410Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:19:04.686415Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:19:04.686487Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:19:04.686511Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:19:04.686517Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:19:04.686529Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:19:04.686929Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:19:04.687429Z node 62 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:19:04.687535Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:19:04.687543Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:19:04.687616Z node 62 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:19:04.687638Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:19:04.687644Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [62:474:2445] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:19:04.687721Z node 62 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTableMoved" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:19:04.687776Z node 62 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTableMoved" took 67us result status StatusSuccess 2025-06-30T09:19:04.687889Z node 62 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTableMoved" PathDescription { Self { Name: "TTLEnabledTableMoved" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TTLEnabledTableMoved" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::SkipBadDonor [GOOD] Test command err: RandomSeed# 6969110400994199229 2025-06-30T09:19:04.764257Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:04.764720Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 12979586886072706348] 2025-06-30T09:19:04.765964Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> IncorrectQueries::WrongDataSize [GOOD] >> IncorrectQueries::WrongVDiskID >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests1Inflight1BlobSize1000 >> TestDSProxyAndVDiskEqualByteCounters::MultiPut >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests1Inflight1BlobSize1000 >> BlobStorageBlockRace::Test >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation+UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation-UseSink >> BlobStorageBlockRace::Test [GOOD] >> BlobStorageSync::TestSyncLogCuttingMirror3dc >> TopicAutoscaling::ControlPlane_AutoscalingWithStorageSizeRetention [GOOD] >> TopicAutoscaling::CDC_PartitionSplit_AutosplitByLoad >> IncorrectQueries::WrongVDiskID [GOOD] >> IncorrectQueries::ProtoQueryGet [GOOD] >> IncorrectQueries::WrongPartId >> Viewer::Cluster10000Tablets >> GroupLayoutSanitizer::Test3dc >> Acceleration::TestAccelerationMirror3dcPutAsyncBlob1Slow [GOOD] >> Acceleration::TestAcceleration4Plus2BlockPutAsyncBlob1Slow >> TopicAutoscaling::PartitionMerge_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_AutoscaleAwareSDK >> CommitOffset::PartitionSplit_OffsetCommit [GOOD] >> CommitOffset::DistributedTxCommit >> IncorrectQueries::WrongPartId [GOOD] >> IncorrectQueries::ProtobufBlob >> Viewer::JsonAutocompleteStartOfDatabaseName >> IncorrectQueries::ProtobufBlob [GOOD] >> IncorrectQueries::SameBlob >> CommitOffset::Commit_WithoutSession_TopPast [GOOD] >> CommitOffset::Commit_WithWrongSession_ToParent >> IncorrectQueries::SameBlob [GOOD] >> IncorrectQueries::WrongCrc |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests2Inflight2BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests1Inflight1BlobSize1000 >> KqpPg::TableDeleteAllData-useSink [GOOD] >> KqpPg::PgUpdateCompoundKey+useSink >> Acceleration::TestThresholdPutMirror3dc1Slow [GOOD] >> Acceleration::TestThresholdPut4Plus2Block1Slow |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> Viewer::SelectStringWithNoBase64Encoding >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests1Inflight1BlobSize1000 [GOOD] >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10Inflight1BlobSize1000 |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests1Inflight1BlobSize1000 [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10Inflight1BlobSize1000 |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> DataShardVolatile::DistributedWriteThenScanQuery [GOOD] >> DataShardVolatile::DistributedWriteWithAsyncIndex >> TestDSProxyAndVDiskEqualByteCounters::MultiPut [GOOD] >> TestDSProxyAndVDiskEqualByteCounters::SinglePut >> KqpPg::ValuesInsert-useSink [GOOD] >> PgCatalog::PgType >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation-UseSink [GOOD] >> DataShardVolatile::DistributedWriteEarlierSnapshotNotBlocked >> KqpPg::PgUpdateCompoundKey+useSink [GOOD] >> KqpPg::PgUpdateCompoundKey-useSink |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> Viewer::JsonAutocompleteStartOfDatabaseName [GOOD] >> Viewer::JsonStorageListingV1 |79.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |79.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |79.9%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery [GOOD] >> BasicUsage::TWriteSession_WriteEncoded >> TSchemeShardTTLTests::ShouldSkipDroppedColumn [GOOD] >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10Inflight1BlobSize1000 [GOOD] >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests2Inflight2BlobSize1000 >> TSchemeShardSysNames::ESchemeOpCreateTable-Protect-DbAdmin-system [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-NoProtect-NoDbAdmin-anonymous |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests1Inflight1BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests10Inflight1BlobSize1000 >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10Inflight1BlobSize1000 [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10000Inflight1BlobSize1000 >> PgCatalog::PgType [GOOD] >> PgCatalog::InformationSchema >> Viewer::PDiskMerging >> KqpPg::PgUpdateCompoundKey-useSink [GOOD] >> THiveTest::TestLockTabletExecutionRebootTimeout [GOOD] >> THiveTest::TestLockTabletExecutionReconnect >> Viewer::PDiskMerging [GOOD] >> Viewer::SelectStringWithBase64Encoding ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ShouldSkipDroppedColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:46.679923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:46.679958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:46.679965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:46.679972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:46.679988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:46.679994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:46.680007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:46.680028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:46.680147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:46.680234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:46.698613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:46.698642Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:46.703019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:46.703091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:46.703129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:46.704756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:46.704833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:46.704982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:46.705055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:46.705743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:46.705804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:46.706149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:46.706162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:46.706192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:46.706201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:46.706209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:46.706230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.708007Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:46.735075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:46.735195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.735275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:46.735286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:46.735341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:46.735361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:46.736408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:46.736461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:46.736538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.736551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:46.736559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:46.736566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:46.737078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.737093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:46.737099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:46.737517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.737532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:46.737540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:46.737548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:46.738422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:46.739022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:46.739077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:46.739339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:46.739377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:46.739388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:46.739475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:46.739485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:46.739529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:46.739545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:46.740082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:46.740093Z node 1 :FLAT_TX_SCHEMESHARD ... 25-06-30T09:18:46.960821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:510:2469] TestWaitNotification: OK eventTxId 103 2025-06-30T09:18:52.464328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7389: Cannot get console configs 2025-06-30T09:18:52.464360Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:54.320346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0068 2025-06-30T09:18:54.330729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0195 2025-06-30T09:18:54.371506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-30T09:18:54.371611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-30T09:18:54.371646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-30T09:18:54.371692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-30T09:18:54.371706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-30T09:18:54.371716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-30T09:18:54.371728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-30T09:18:54.381963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-30T09:18:57.927122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0023 2025-06-30T09:18:57.937536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0054 2025-06-30T09:18:57.979086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-30T09:18:57.979184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-30T09:18:57.979215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-30T09:18:57.979261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-30T09:18:57.979271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-30T09:18:57.979278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-30T09:18:57.979285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-30T09:18:57.990993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-30T09:19:01.587005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0023 2025-06-30T09:19:01.597342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0054 2025-06-30T09:19:01.638009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-30T09:19:01.638079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-30T09:19:01.638105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-30T09:19:01.638150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-30T09:19:01.638163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-30T09:19:01.638171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-30T09:19:01.638180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-30T09:19:01.650876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-30T09:19:04.978181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0014 2025-06-30T09:19:04.991185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0021 2025-06-30T09:19:05.037967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-30T09:19:05.038099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-30T09:19:05.038137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-30T09:19:05.038190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-30T09:19:05.038203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-30T09:19:05.038213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-30T09:19:05.038223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-30T09:19:05.048558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-30T09:19:08.560643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6723: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-30T09:19:08.560713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-30T09:19:08.560800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-30T09:19:08.560858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409547, request: TableId: 2 Expiration { ColumnId: 2 WallClockTimestamp: 60025000 ColumnUnit: UNIT_AUTO } SchemaVersion: 3 Indexes { OwnerId: 72057594046678944 PathId: 4 SchemaVersion: 1 KeyMap { IndexColumnId: 1 MainColumnId: 3 } KeyMap { IndexColumnId: 2 MainColumnId: 1 } } Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-06-30T09:19:08.561038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6751: Conditional erase accepted: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-06-30T09:19:08.561316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-30T09:19:08.561328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-06-30T09:19:08.571643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-30T09:19:08.571717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-30T09:19:08.571731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:1, run at: 1970-01-01T01:01:00.025000Z, at schemeshard: 72057594046678944 2025-06-30T09:19:08.571752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> IncorrectQueries::WrongCrc [GOOD] >> IncorrectQueries::ProtoHasOnlyVDiskId [GOOD] >> IncorrectQueries::ProtoHasVDiskAndExtQueue [GOOD] >> IndexRestoreGet::BlobRecovery >> Viewer::Cluster10000Tablets [GOOD] >> Viewer::FuzzySearcherLimit1OutOf4 [GOOD] >> Viewer::FuzzySearcherLimit2OutOf4 [GOOD] >> Viewer::ExecuteQueryDoesntExecuteSchemeOperationsInsideTransation >> Acceleration::TestAcceleration4Plus2BlockPutAsyncBlob1Slow [GOOD] >> Acceleration::TestAccelerationMirror3dcPutAsyncBlob2Slow >> TestDSProxyAndVDiskEqualByteCounters::SinglePut [GOOD] >> RequestValidation::TestVPutBlobSize0 >> DataShardVolatile::DistributedWriteWithAsyncIndex [GOOD] >> DataShardVolatile::DistributedWriteThenLateWriteReadCommit ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::PgUpdateCompoundKey-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 11257, MsgBus: 27116 2025-06-30T09:18:30.209427Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669325426758414:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:30.209454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003306/r3tmp/tmpPUCr0q/pdisk_1.dat 2025-06-30T09:18:30.232745Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11257, node 1 2025-06-30T09:18:30.253855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:30.253868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:30.253871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:30.253923Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27116 TClient is connected to server localhost:27116 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:18:30.311843Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:30.311881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:30.312875Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:30.318759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 16 2025-06-30T09:18:30.576142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) --!syntax_pg INSERT INTO Pg1000_b (key, value) VALUES ( '0'::int2, ARRAY ['false'::bool, 'false'::bool] ); 2025-06-30T09:18:30.619668Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:18:30.624154Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669325426759114:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:30.624178Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669325426759102:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:30.624244Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:30.625117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:30.635368Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669325426759118:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-30T09:18:30.719533Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669325426759169:2394] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } --!syntax_pg INSERT INTO Pg1000_b (key, value) VALUES ( '1'::int2, ARRAY ['true'::bool, 'true'::bool] ); 18 2025-06-30T09:18:30.833647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:30.859401Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::"char", '0'::"char"] ); --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::"char", '1'::"char"] ); --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::"char", '2'::"char"] ); 21 2025-06-30T09:18:30.965146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:30.980101Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int2, '0'::int2] ); --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int2, '1'::int2] ); --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int2, '2'::int2] ); 23 2025-06-30T09:18:31.053361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:31.070400Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int4, '0'::int4] ); --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int4, '1'::int4] ); --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int4, '2'::int4] ); 20 2025-06-30T09:18:31.160361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:31.172934Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int8, '0'::int8] ); --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int8, '1'::int8] ); --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int8, '2'::int8] ); 2025-06-30T09:18:31.211151Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 700 2025-06-30T09:18:31.248868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715689:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:31.257281Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '0'::int2, ARRAY ['0.5'::float4, '0.5'::float4] ); --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '1'::int2, ARRAY ['1.5'::float4, '1.5'::float4] ); --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '2'::int2, ARRAY ['2.5'::float4, '2.5'::float4] ); 701 2025-06-30T09:18:31.344904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715695:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:31.366813Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1022_b (key, value) VALUES ( '0'::int2, ARRAY ['0.5'::float8, '0.5'::float8] ); --!syntax ... e access permissions } 2025-06-30T09:19:07.468598Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:07.471575Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:07.529686Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7521669483137815352:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:07.529706Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7521669483137815357:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:07.529715Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:07.530586Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:07.539191Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [9:7521669483137815359:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:07.593645Z node 9 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [9:7521669483137815410:2384] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:07.619050Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7521669483137815454:2317], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Cannot update primary key column: key1
:1:1: Error: Cannot update primary key column: key2 2025-06-30T09:19:07.619140Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=9&id=NTVkMDliZDgtOWI2MzZhZC0xZjNmOTFjYi05MjRiM2EzOQ==, ActorId: [9:7521669483137815447:2313], ActorState: ExecuteState, TraceId: 01jz0244ay8n00wyqryqt6hc6z, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:19:07.629598Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 16365, MsgBus: 19029 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003306/r3tmp/tmpgr95o6/pdisk_1.dat 2025-06-30T09:19:08.082570Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7521669488387131062:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:08.084251Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:19:08.099322Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16365, node 10 2025-06-30T09:19:08.134096Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:08.134114Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:08.134117Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:08.134176Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19029 2025-06-30T09:19:08.179108Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:08.179139Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:08.179872Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19029 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:08.212302Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:08.214927Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:08.538119Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521669488387131589:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:08.538144Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:08.541004Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:08.572966Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521669488387131693:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:08.572990Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:08.573046Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521669488387131698:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:08.573970Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:08.576659Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7521669488387131700:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:08.649784Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7521669488387131751:2384] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:08.714341Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7521669488387131816:2324], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Cannot update primary key column: key1
:1:1: Error: Cannot update primary key column: key2 2025-06-30T09:19:08.714999Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=10&id=N2ZlNDFhNDMtZDUxY2Q2MTYtOTg5Y2E1YTAtOTNkZTkzZjc=, ActorId: [10:7521669488387131809:2320], ActorState: ExecuteState, TraceId: 01jz0245d3fqhe8zg41155d4e7, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:19:08.851435Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TSchemeShardSysNames::ESchemeOpCreateView-NoProtect-NoDbAdmin-anonymous [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-NoDbAdmin-anonymous >> IndexRestoreGet::BlobRecovery [GOOD] >> Mirror3dc::GcQuorum >> RequestValidation::TestVPutBlobSize0 [GOOD] >> RequestValidation::TestVMultiPutBlobSize0 >> DataShardVolatile::DistributedWriteEarlierSnapshotNotBlocked [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit+UseSink >> RequestValidation::TestVMultiPutBlobSize0 [GOOD] >> ScrubFast::SingleBlob >> PgCatalog::InformationSchema [GOOD] >> PgCatalog::CheckSetConfig >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests2Inflight2BlobSize1000 [GOOD] >> CostMetricsGetHugeMirror3dc::TestGetMirror3dcRequests1Inflight1BlobSize2000000 >> Acceleration::TestThresholdPut4Plus2Block1Slow [GOOD] >> Acceleration::TestThresholdPutMirror3dc2Slow |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest |79.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |79.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |79.9%| [LD] {RESULT} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> Viewer::ExecuteQueryDoesntExecuteSchemeOperationsInsideTransation [GOOD] >> Viewer::FloatPointJsonQuery >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-NoDbAdmin-anonymous [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-DbAdmin-anonymous |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> Acceleration::TestAccelerationMirror3dcPutAsyncBlob2Slow [GOOD] >> Acceleration::TestAcceleration4Plus2BlockPutAsyncBlob2Slow >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests10Inflight1BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests100Inflight1BlobSize1000 >> THiveTest::TestLockTabletExecutionReconnect [GOOD] >> THiveTest::TestLockTabletExecutionRebootReconnect |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> BlobStorageSync::TestSyncLogCuttingMirror3dc [GOOD] >> BlobStorageSync::TestSyncLogCuttingMirror3of4 |79.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |79.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |79.9%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_BeforeAutoscaleAwareSDK >> Viewer::SelectStringWithNoBase64Encoding [GOOD] >> Viewer::ServerlessNodesPage |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> Viewer::FloatPointJsonQuery [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-DbAdmin-anonymous [GOOD] >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes >> TSchemeShardSysNames::ESchemeOpCreateView-NoProtect-NoDbAdmin-ordinaryuser >> DataShardVolatile::DistributedWriteThenLateWriteReadCommit [GOOD] >> DataShardVolatile::TwoAppendsMustBeVolatile+UseSink >> Acceleration::TestThresholdPutMirror3dc2Slow [GOOD] >> Acceleration::TestThresholdPut4Plus2Block2Slow |79.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest |79.9%| [LD] {RESULT} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |79.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit+UseSink [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit-UseSink >> THiveTest::TestLockTabletExecutionRebootReconnect [GOOD] >> THiveTest::TestLockTabletExecutionReconnectExpire |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> CostMetricsGetHugeMirror3dc::TestGetMirror3dcRequests1Inflight1BlobSize2000000 [GOOD] >> CostMetricsGetHugeMirror3dc::TestGetMirror3dcRequests10Inflight1BlobSize2000000 >> TSchemeShardSysNames::ESchemeOpCreateView-NoProtect-NoDbAdmin-ordinaryuser [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-NoDbAdmin-ordinaryuser |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> TopicAutoscaling::CDC_PartitionSplit_AutosplitByLoad [GOOD] >> TopicAutoscaling::ControlPlane_CDC >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes [GOOD] |79.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests100Inflight1BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests2Inflight2BlobSize1000 >> Viewer::ServerlessNodesPage [GOOD] >> Viewer::ServerlessWithExclusiveNodes >> TxUsage::Sinks_Oltp_WriteToTopic_1_Table >> TxUsage::WriteToTopic_Demo_12_Table >> LocalPartition::WithoutPartitionWithSplit >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-NoDbAdmin-ordinaryuser [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-DbAdmin-ordinaryuser ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes [GOOD] Test command err: 2025-06-30T09:19:08.220726Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:657:2390], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:08.220854Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:08.220880Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:654:2333], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:08.220893Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:08.220999Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:08.221006Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:08.319465Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:08.451609Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:19:08.479095Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-30T09:19:08.586433Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 8231, node 1 TClient is connected to server localhost:3460 2025-06-30T09:19:08.664453Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:08.664478Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:08.664483Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:08.664557Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration Request timer = 0.00337553395 BASE_PERF = 1.532520096 test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:09.635480Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:09.637217Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521669491356445795:2080] 1751275149596089 != 1751275149596092 2025-06-30T09:19:09.663943Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9952, node 3 2025-06-30T09:19:09.703146Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:09.703163Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:09.703168Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:09.703234Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:09.707697Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:09.707733Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:09.708539Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9531 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:09.771995Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:09.787231Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:09.807622Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:09.808938Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-30T09:19:09.811264Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-30T09:19:10.416417Z node 3 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:10.416442Z node 3 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:10.428826Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521669495651413753:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:10.428881Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:10.429063Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521669495651413765:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:10.430198Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:10.434274Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:19:10.434361Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521669495651413767:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:19:10.526841Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521669495651413818:2351] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:10.579964Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=3&id=OGQ3YzI0YTctZjY2NDNmZTItMWNiMzdkMTUtODg2NjQ4MDI=, ActorId: [3:7521669495651413751:2299], ActorState: ExecuteState, TraceId: 01jz02472wdcsz1xgegkk1w8rk, Create QueryResponse for error on request, msg: Scheme operations cannot be executed inside transaction 2025-06-30T09:19:10.615851Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:11.244776Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669503479816177:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:11.244793Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:11.282636Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:11.282973Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521669503479816153:2080] 1751275151244467 != 1751275151244470 TServer::EnableGrpc on GrpcPort 4472, node 4 2025-06-30T09:19:11.307060Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:11.307076Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:11.307079Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:11.307141Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17930 2025-06-30T09:19:11.355337Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Di ... 4e00] received request Name# RemoveDatabase ok# false data# peer# 2025-06-30T09:19:13.083518Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be38ce00] received request Name# ListDatabases ok# false data# peer# 2025-06-30T09:19:13.083541Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be34ea00] received request Name# DescribeDatabaseOptions ok# false data# peer# 2025-06-30T09:19:13.083548Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be354000] received request Name# GetScaleRecommendation ok# false data# peer# 2025-06-30T09:19:13.083564Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3ed500] received request Name# ListEndpoints ok# false data# peer# 2025-06-30T09:19:13.083574Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3ec000] received request Name# WhoAmI ok# false data# peer# 2025-06-30T09:19:13.083587Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3e5c00] received request Name# NodeRegistration ok# false data# peer# 2025-06-30T09:19:13.083597Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be373800] received request Name# Scan ok# false data# peer# 2025-06-30T09:19:13.083610Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be38c700] received request Name# GetShardLocations ok# false data# peer# 2025-06-30T09:19:13.083631Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be363100] received request Name# DescribeTable ok# false data# peer# 2025-06-30T09:19:13.083644Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be362300] received request Name# CreateSnapshot ok# false data# peer# 2025-06-30T09:19:13.083659Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be36f100] received request Name# RefreshSnapshot ok# false data# peer# 2025-06-30T09:19:13.083683Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be371c00] received request Name# DiscardSnapshot ok# false data# peer# 2025-06-30T09:19:13.083685Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be36f800] received request Name# List ok# false data# peer# 2025-06-30T09:19:13.083712Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be370000] received request Name# RateLimiter/CreateResource ok# false data# peer# 2025-06-30T09:19:13.083718Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be344000] received request Name# RateLimiter/AlterResource ok# false data# peer# 2025-06-30T09:19:13.083734Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be341500] received request Name# RateLimiter/DropResource ok# false data# peer# 2025-06-30T09:19:13.083749Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3b1500] received request Name# RateLimiter/ListResources ok# false data# peer# 2025-06-30T09:19:13.083759Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be361c00] received request Name# RateLimiter/DescribeResource ok# false data# peer# 2025-06-30T09:19:13.083783Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be354700] received request Name# RateLimiter/AcquireResource ok# false data# peer# 2025-06-30T09:19:13.083784Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be34d500] received request Name# CreateStream ok# false data# peer# 2025-06-30T09:19:13.083809Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be372300] received request Name# ListStreams ok# false data# peer# 2025-06-30T09:19:13.083822Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be372a00] received request Name# DeleteStream ok# false data# peer# 2025-06-30T09:19:13.083833Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be34dc00] received request Name# DescribeStream ok# false data# peer# 2025-06-30T09:19:13.083847Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be364000] received request Name# ListShards ok# false data# peer# 2025-06-30T09:19:13.083858Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be35a300] received request Name# SetWriteQuota ok# false data# peer# 2025-06-30T09:19:13.083872Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be347800] received request Name# UpdateStream ok# false data# peer# 2025-06-30T09:19:13.083882Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be363800] received request Name# PutRecord ok# false data# peer# 2025-06-30T09:19:13.083905Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be362a00] received request Name# PutRecords ok# false data# peer# 2025-06-30T09:19:13.083908Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be361500] received request Name# GetRecords ok# false data# peer# 2025-06-30T09:19:13.083929Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3b3800] received request Name# GetShardIterator ok# false data# peer# 2025-06-30T09:19:13.083937Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be357100] received request Name# SubscribeToShard ok# false data# peer# 2025-06-30T09:19:13.083952Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be24b100] received request Name# DescribeLimits ok# false data# peer# 2025-06-30T09:19:13.083971Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be345500] received request Name# DescribeStreamSummary ok# false data# peer# 2025-06-30T09:19:13.083977Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be249c00] received request Name# DecreaseStreamRetentionPeriod ok# false data# peer# 2025-06-30T09:19:13.084001Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be24e300] received request Name# IncreaseStreamRetentionPeriod ok# false data# peer# 2025-06-30T09:19:13.084003Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be34c700] received request Name# UpdateShardCount ok# false data# peer# 2025-06-30T09:19:13.084030Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be359c00] received request Name# UpdateStreamMode ok# false data# peer# 2025-06-30T09:19:13.084033Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be358e00] received request Name# RegisterStreamConsumer ok# false data# peer# 2025-06-30T09:19:13.084061Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be356a00] received request Name# DescribeStreamConsumer ok# false data# peer# 2025-06-30T09:19:13.084063Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be358700] received request Name# DeregisterStreamConsumer ok# false data# peer# 2025-06-30T09:19:13.084084Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be24ce00] received request Name# ListStreamConsumers ok# false data# peer# 2025-06-30T09:19:13.084093Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be24b800] received request Name# AddTagsToStream ok# false data# peer# 2025-06-30T09:19:13.084108Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be1d4a00] received request Name# DisableEnhancedMonitoring ok# false data# peer# 2025-06-30T09:19:13.084121Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be364e00] received request Name# EnableEnhancedMonitoring ok# false data# peer# 2025-06-30T09:19:13.084128Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be365500] received request Name# ListTagsForStream ok# false data# peer# 2025-06-30T09:19:13.084147Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be364700] received request Name# MergeShards ok# false data# peer# 2025-06-30T09:19:13.084151Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be36ce00] received request Name# RemoveTagsFromStream ok# false data# peer# 2025-06-30T09:19:13.084167Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3ece00] received request Name# SplitShard ok# false data# peer# 2025-06-30T09:19:13.084174Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3e7800] received request Name# StartStreamEncryption ok# false data# peer# 2025-06-30T09:19:13.084191Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be34c000] received request Name# StopStreamEncryption ok# false data# peer# 2025-06-30T09:19:13.084197Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be340700] received request Name# SelfCheck ok# false data# peer# 2025-06-30T09:19:13.084217Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3ee300] received request Name# NodeCheck ok# false data# peer# 2025-06-30T09:19:13.084221Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be248e00] received request Name# CreateSession ok# false data# peer# 2025-06-30T09:19:13.084240Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3ef100] received request Name# DeleteSession ok# false data# peer# 2025-06-30T09:19:13.084247Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be34ce00] received request Name# AttachSession ok# false data# peer# 2025-06-30T09:19:13.084266Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be1d2e00] received request Name# BeginTransaction ok# false data# peer# 2025-06-30T09:19:13.084272Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3bfdfd500] received request Name# CommitTransaction ok# false data# peer# 2025-06-30T09:19:13.084289Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be38dc00] received request Name# RollbackTransaction ok# false data# peer# 2025-06-30T09:19:13.084295Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be356300] received request Name# ExecuteQuery ok# false data# peer# 2025-06-30T09:19:13.084312Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be355500] received request Name# ExecuteScript ok# false data# peer# 2025-06-30T09:19:13.084318Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be365c00] received request Name# FetchScriptResults ok# false data# peer# 2025-06-30T09:19:13.084335Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be343800] received request Name# ExecuteTabletMiniKQL ok# false data# peer# 2025-06-30T09:19:13.084340Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be344700] received request Name# ChangeTabletSchema ok# false data# peer# 2025-06-30T09:19:13.084357Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3c2300] received request Name# RestartTablet ok# false data# peer# 2025-06-30T09:19:13.084363Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3eea00] received request Name# CreateLogStore ok# false data# peer# 2025-06-30T09:19:13.084378Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3ef800] received request Name# DescribeLogStore ok# false data# peer# 2025-06-30T09:19:13.084386Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be345c00] received request Name# DropLogStore ok# false data# peer# 2025-06-30T09:19:13.084401Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be3edc00] received request Name# AlterLogStore ok# false data# peer# 2025-06-30T09:19:13.084409Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be343100] received request Name# CreateLogTable ok# false data# peer# 2025-06-30T09:19:13.084425Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e39d1f0e00] received request Name# DescribeLogTable ok# false data# peer# 2025-06-30T09:19:13.084451Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be344e00] received request Name# DropLogTable ok# false data# peer# 2025-06-30T09:19:13.084458Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be346a00] received request Name# AlterLogTable ok# false data# peer# 2025-06-30T09:19:13.084474Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3b4ff7800] received request Name# Login ok# false data# peer# 2025-06-30T09:19:13.084483Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be24dc00] received request Name# DescribeReplication ok# false data# peer# 2025-06-30T09:19:13.084499Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x31e3be1d3c00] received request Name# DescribeView ok# false data# peer# >> Acceleration::TestAcceleration4Plus2BlockPutAsyncBlob2Slow [GOOD] >> Acceleration::TestAccelerationMirror3dcGetAsyncRead1Slow >> THiveTest::TestLockTabletExecutionReconnectExpire [GOOD] >> THiveTest::TestLockTabletExecutionStealLock >> TxUsage::WriteToTopic_Demo_22_RestartNo_Table >> KqpWorkload::STOCK [GOOD] >> Viewer::SelectStringWithBase64Encoding [GOOD] >> Viewer::QueryExecuteScript >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10000Inflight1BlobSize1000 [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests2Inflight2BlobSize1000 |79.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |80.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |80.0%| [LD] {RESULT} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut >> BasicUsage::ReadWithoutConsumerWithRestarts [GOOD] >> BasicUsage::ReadWithRestarts >> PgCatalog::CheckSetConfig [GOOD] >> PgCatalog::PgDatabase+useSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/perf/unittest >> KqpWorkload::STOCK [GOOD] Test command err: Trying to start YDB, gRPC: 61864, MsgBus: 10588 2025-06-30T09:18:56.902514Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669437620960808:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:56.902583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a72/r3tmp/tmpnnNyem/pdisk_1.dat 2025-06-30T09:18:56.965543Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:56.965795Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669437620960785:2080] 1751275136902343 != 1751275136902346 TServer::EnableGrpc on GrpcPort 61864, node 1 2025-06-30T09:18:56.994956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:56.994971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:56.994973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:56.995017Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:57.005422Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:57.005466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:57.006697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10588 TClient is connected to server localhost:10588 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:57.061153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:57.395995Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441915928706:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.396023Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.464805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.492645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.646315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:57.858489Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441915932554:2590], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.858512Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.858661Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669441915932559:2593], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:57.859726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:57.864767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:18:57.864870Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669441915932561:2594], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:18:57.904906Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:57.940167Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669441915932621:4745] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:01.902678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669437620960808:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:01.902721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; took: 0.371731s took: 0.371925s took: 0.372139s took: 0.380928s took: 0.382848s took: 0.382935s took: 0.383226s took: 0.383621s took: 0.384629s took: 0.387929s 2025-06-30T09:19:08.698527Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:105;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976716232; 2025-06-30T09:19:08.705872Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [1:7521669489160585287:5001], Table: `/Root/stock` ([72057594046644480:2:1]), SessionActorId: [1:7521669480570647549:5001]Got LOCKS BROKEN for table `/Root/stock`. ShardID=72075186224037888, Sink=[1:7521669489160585287:5001].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-30T09:19:08.706207Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7521669489160584407:5001], SessionActorId: [1:7521669480570647549:5001], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/stock`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[1:7521669480570647549:5001]. isRollback=0 2025-06-30T09:19:08.706309Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1938: SessionId: ydb://session/3?node_id=1&id=YTkxYzQ5NzktMWI4ZDgzMmQtNDQ4ZjFlNjgtZTI4N2Q5OGM=, ActorId: [1:7521669480570647549:5001], ActorState: ExecuteState, TraceId: 01jz0244ny8mpteaxd20ht2ne5, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7521669489160585223:5001] from: [1:7521669489160584407:5001] 2025-06-30T09:19:08.706352Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [1:7521669489160585223:5001] TxId: 281474976716232. Ctx: { TraceId: 01jz0244ny8mpteaxd20ht2ne5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTkxYzQ5NzktMWI4ZDgzMmQtNDQ4ZjFlNjgtZTI4N2Q5OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/stock`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-30T09:19:08.706420Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=YTkxYzQ5NzktMWI4ZDgzMmQtNDQ4ZjFlNjgtZTI4N2Q5OGM=, ActorId: [1:7521669480570647549:5001], ActorState: ExecuteState, TraceId: 01jz0244ny8mpteaxd20ht2ne5, Create QueryResponse for error on request, msg: 2025-06-30T09:19:08.709641Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:105;event=ev_write_error;status=STATUS_ABORTED;details=Distributed transaction aborted due to commit failure;tx_id=281474976716232; 2025-06-30T09:19:08.712025Z node 1 :TX_DATASHARD ERROR: datashard.cpp:751: Complete volatile write [1751275148744 : 281474976716232] from 72075186224037929 at tablet 72075186224037929, error: Status: STATUS_ABORTED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } 2025-06-30T09:19:08.714406Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:105;event=ev_write_error;status=STATUS_ABORTED;details=Distributed transaction aborted due to commit failure;tx_id=281474976716232; 2025-06-30T09:19:08.714469Z node 1 :TX_DATASHARD ERROR: datashard.cpp:751: Complete volatile write [1751275148744 : 281474976716232] from 72075186224037908 at tablet 72075186224037908, error: Status: STATUS_ABORTED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } took: 2.968006s took: 2.967930s 2025-06-30T09:19:09.853373Z node 1 :GLOBAL WARN: l ... abletStatus from node 1, TabletId: 72075186224037909 not found 2025-06-30T09:19:14.087852Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2025-06-30T09:19:14.087855Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037921 not found 2025-06-30T09:19:14.087857Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037901 not found 2025-06-30T09:19:14.087859Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037922 not found 2025-06-30T09:19:14.087862Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037927 not found 2025-06-30T09:19:14.087865Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037920 not found 2025-06-30T09:19:14.087867Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037895 not found 2025-06-30T09:19:14.087869Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037926 not found 2025-06-30T09:19:14.087872Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-30T09:19:14.087875Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-30T09:19:14.087877Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037928 not found 2025-06-30T09:19:14.087880Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2025-06-30T09:19:14.087883Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037902 not found 2025-06-30T09:19:14.087886Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037905 not found 2025-06-30T09:19:14.087888Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037914 not found 2025-06-30T09:19:14.087892Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037912 not found 2025-06-30T09:19:14.087895Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037896 not found 2025-06-30T09:19:14.087897Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037929 not found 2025-06-30T09:19:14.087899Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037919 not found 2025-06-30T09:19:14.087901Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found 2025-06-30T09:19:14.087904Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037894 not found 2025-06-30T09:19:14.087907Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037916 not found 2025-06-30T09:19:14.087910Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037915 not found 2025-06-30T09:19:14.087912Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037899 not found 2025-06-30T09:19:14.087914Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037903 not found 2025-06-30T09:19:14.087918Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037918 not found 2025-06-30T09:19:14.087920Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037904 not found 2025-06-30T09:19:14.087923Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037908 not found 2025-06-30T09:19:14.094948Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037917 not found 2025-06-30T09:19:14.150905Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037968 not found 2025-06-30T09:19:14.160245Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037932 not found 2025-06-30T09:19:14.160266Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037943 not found 2025-06-30T09:19:14.160269Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037955 not found 2025-06-30T09:19:14.160272Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037963 not found 2025-06-30T09:19:14.160275Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037936 not found 2025-06-30T09:19:14.160278Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037930 not found 2025-06-30T09:19:14.160282Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037947 not found 2025-06-30T09:19:14.160285Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037959 not found 2025-06-30T09:19:14.160288Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037954 not found 2025-06-30T09:19:14.160291Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037967 not found 2025-06-30T09:19:14.160294Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037961 not found 2025-06-30T09:19:14.160775Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037944 not found 2025-06-30T09:19:14.160779Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037945 not found 2025-06-30T09:19:14.160784Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037942 not found 2025-06-30T09:19:14.160787Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037933 not found 2025-06-30T09:19:14.160790Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037935 not found 2025-06-30T09:19:14.160793Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037969 not found 2025-06-30T09:19:14.160796Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037946 not found 2025-06-30T09:19:14.160799Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037965 not found 2025-06-30T09:19:14.160803Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037953 not found 2025-06-30T09:19:14.160806Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037950 not found 2025-06-30T09:19:14.160808Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037931 not found 2025-06-30T09:19:14.160811Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037951 not found 2025-06-30T09:19:14.160814Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037956 not found 2025-06-30T09:19:14.160817Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037966 not found 2025-06-30T09:19:14.160820Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037941 not found 2025-06-30T09:19:14.160823Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037940 not found 2025-06-30T09:19:14.160826Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037937 not found 2025-06-30T09:19:14.160829Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037939 not found 2025-06-30T09:19:14.160832Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037957 not found 2025-06-30T09:19:14.160835Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037938 not found 2025-06-30T09:19:14.160838Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037958 not found 2025-06-30T09:19:14.160842Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037948 not found 2025-06-30T09:19:14.160845Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037964 not found 2025-06-30T09:19:14.160848Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037952 not found 2025-06-30T09:19:14.160851Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037962 not found 2025-06-30T09:19:14.160855Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037960 not found 2025-06-30T09:19:14.160858Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037949 not found 2025-06-30T09:19:14.160861Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037934 not found >> TxUsage::WriteToTopic_Demo_20_RestartNo_Table >> THiveTest::TestLockTabletExecutionStealLock [GOOD] >> THiveTest::TestLockTabletExecutionLocalGone >> Acceleration::TestThresholdPut4Plus2Block2Slow [GOOD] >> Acceleration::TestThresholdGetMirror3dc1Slow >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit-UseSink [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenAbort >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-DbAdmin-ordinaryuser [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-NoProtect-NoDbAdmin-dbadmin >> TAsyncIndexTests::MergeIndexWithReboots[PipeResets] [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests2Inflight2BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests10Inflight10BlobSize1000 >> CostMetricsGetHugeMirror3dc::TestGetMirror3dcRequests10Inflight1BlobSize2000000 [GOOD] >> CostMetricsGetHugeMirror3dc::TestGetMirror3dcRequests100Inflight1BlobSize2000000 >> TxUsage::WriteToTopic_Demo_11_Table >> DataShardVolatile::TwoAppendsMustBeVolatile+UseSink [GOOD] >> DataShardVolatile::TwoAppendsMustBeVolatile-UseSink >> Acceleration::TestAccelerationMirror3dcGetAsyncRead1Slow [GOOD] >> Acceleration::TestAcceleration4Plus2BlockGetAsyncRead1Slow >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK_AutoCommit >> PgCatalog::PgDatabase+useSink [GOOD] >> PgCatalog::PgDatabase-useSink >> Viewer::ServerlessWithExclusiveNodes [GOOD] >> Viewer::SharedDoesntShowExclusiveNodes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeIndexWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:18:39.769974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:39.770004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:39.770011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:39.770018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:39.770025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:39.770030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:39.770039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:39.770057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:39.770182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:39.770254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:39.793281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:18:39.793324Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:39.793503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:18:39.803834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:39.805237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:39.805305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:39.812664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:39.812771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:39.812935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:39.813029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:39.814344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:39.814419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:39.814807Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:39.814821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:39.814835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:39.814845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:39.814852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:39.814889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:18:39.822620Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:18:39.848071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:39.848174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:39.848247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:39.848254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:39.848302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:39.848314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:39.849333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:39.849383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:39.849456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:39.849464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:39.849469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:39.849474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:39.850011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:39.850040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:39.850048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:39.850578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:39.850596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:39.850604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:39.850614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:39.851473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:39.852375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:39.852433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:39.852697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:39.852849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... rceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:15.893623Z node 26 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:19:15.893760Z node 26 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 160us result status StatusSuccess 2025-06-30T09:19:15.894022Z node 26 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> BlobStorageSync::TestSyncLogCuttingMirror3of4 [GOOD] >> BlobStorageSync::TestSyncLogCuttingBlock4Plus2 >> Acceleration::TestThresholdGetMirror3dc1Slow [GOOD] >> Acceleration::TestThresholdGet4Plus2Block1Slow >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests2Inflight2BlobSize1000 [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10Inflight10BlobSize1000 >> TSchemeShardSysNames::ESchemeOpCreateView-NoProtect-NoDbAdmin-dbadmin [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-NoDbAdmin-dbadmin >> THiveTest::TestLockTabletExecutionLocalGone [GOOD] >> THiveTest::TestProgressWithMaxTabletsScheduled |80.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> KqpPg::TableInsert-useSink [GOOD] >> KqpPg::TempTablesSessionsIsolation >> Acceleration::TestAcceleration4Plus2BlockGetAsyncRead1Slow [GOOD] >> Acceleration::TestAccelerationMirror3dcGetAsyncRead2Slow >> PgCatalog::PgDatabase-useSink [GOOD] >> PgCatalog::PgRoles >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-NoDbAdmin-dbadmin [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-DbAdmin-dbadmin >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests10Inflight10BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests100Inflight10BlobSize1000 |80.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |80.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |80.0%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage >> PgCatalog::PgRoles [GOOD] >> PgCatalog::PgTables >> KqpPg::TempTablesSessionsIsolation [GOOD] >> KqpPg::TempTablesDrop >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenAbort [GOOD] >> DataShardVolatile::DistributedWriteAsymmetricExecute |80.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |80.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |80.0%| [LD] {RESULT} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |80.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> Viewer::SharedDoesntShowExclusiveNodes [GOOD] >> Viewer::ServerlessWithExclusiveNodesCheckTable >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-DbAdmin-dbadmin [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-NoProtect-NoDbAdmin-clusteradmin >> Acceleration::TestAccelerationMirror3dcGetAsyncRead2Slow [GOOD] >> Acceleration::TestAcceleration4Plus2BlockGetAsyncRead2Slow |80.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> THiveTest::TestProgressWithMaxTabletsScheduled [GOOD] >> Viewer::QueryExecuteScript [GOOD] >> Viewer::Plan2SvgOK >> DataShardVolatile::TwoAppendsMustBeVolatile-UseSink [GOOD] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure+UseSink >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10Inflight10BlobSize1000 [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests100Inflight10BlobSize1000 >> Acceleration::TestThresholdGet4Plus2Block1Slow [GOOD] >> Acceleration::TestThresholdGetMirror3dc2Slow >> KqpPg::TempTablesDrop [GOOD] >> KqpPg::TempTablesWithCache >> TopicAutoscaling::PartitionMerge_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ControlPlane_CreateAlterDescribe >> TSchemeShardSysNames::ESchemeOpCreateView-NoProtect-NoDbAdmin-clusteradmin [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-NoDbAdmin-clusteradmin >> TopicAutoscaling::ControlPlane_CDC [GOOD] >> TopicAutoscaling::ControlPlane_CDC_Disable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestProgressWithMaxTabletsScheduled [GOOD] Test command err: 2025-06-30T09:17:53.258378Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:53.265404Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:53.265480Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:53.265768Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:53.265841Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:17:53.266267Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-30T09:17:53.266280Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:53.266450Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:50:2076] ControllerId# 72057594037932033 2025-06-30T09:17:53.266457Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:53.266478Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:53.266499Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:53.275535Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:53.275551Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:53.275846Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:58:2081] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.275874Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:59:2082] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.275902Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:60:2083] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.276149Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:61:2084] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.276174Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:62:2085] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.276198Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:63:2086] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.276223Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:64:2087] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.276238Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-30T09:17:53.276251Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:50:2076] 2025-06-30T09:17:53.276257Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:50:2076] 2025-06-30T09:17:53.276265Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-30T09:17:53.276274Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:17:53.276696Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:17:53.276710Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:53.277428Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:53.277470Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:53.277603Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:53.277648Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:53.277788Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:75:2076] ControllerId# 72057594037932033 2025-06-30T09:17:53.277792Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:53.277805Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:53.277823Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:53.279499Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:17:53.300125Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:53.300150Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:53.300491Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:82:2080] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.300527Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:83:2081] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.300555Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:84:2082] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.300580Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:85:2083] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.300868Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:86:2084] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.300910Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:87:2085] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.300954Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:88:2086] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.300959Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-30T09:17:53.300976Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:75:2076] 2025-06-30T09:17:53.300983Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:75:2076] 2025-06-30T09:17:53.300992Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-30T09:17:53.301003Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:17:53.301076Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:17:53.301167Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.301175Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:17:53.301201Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:17:53.301217Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:53.301261Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [2:75:2076] 2025-06-30T09:17:53.301267Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.301270Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:17:53.301276Z node 2 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:17:53.301291Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:53.419405Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:50:2076] 2025-06-30T09:17:53.419444Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.419455Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-30T09:17:53.420219Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-30T09:17:53.421111Z node 2 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-30T09:17:53.421137Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-30T09:17:53.421148Z node 2 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:17:53.421154Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:17:53.421181Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:96:2090] 2025-06-30T09:17:53.421235Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.421242Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-30T09:17:53.421256Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-30T09:17:53.422296Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-30T09:17:53.422311Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:17:53.422801Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.422810Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [2:96:2090] 2025-06-30T09:17:53.422815Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] loo ... 6:687:2256] CurrentLeaderTablet: [26:693:2260] CurrentGeneration: 1 CurrentStep: 0} 2025-06-30T09:19:18.465074Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037894 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037894 Cookie: 0 CurrentLeader: [26:687:2256] CurrentLeaderTablet: [26:693:2260] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[25:1099535971443:0] : 6}, {[25:24343667:0] : 3}}}} 2025-06-30T09:19:18.465079Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037894 followers: 0 2025-06-30T09:19:18.465084Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 26 selfDC 2 leaderDC 2 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037894 followers: 0 countLeader 1 allowFollowers 0 winner: [26:687:2256] 2025-06-30T09:19:18.465092Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037894] forward result local node, try to connect [26:1091:2472] 2025-06-30T09:19:18.465097Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037894]::SendEvent [26:1091:2472] 2025-06-30T09:19:18.465109Z node 26 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037894] Accept Connect Originator# [26:1091:2472] 2025-06-30T09:19:18.465137Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037894] connected with status OK role: Leader [26:1091:2472] 2025-06-30T09:19:18.465142Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037894] send queued [26:1091:2472] 2025-06-30T09:19:18.465194Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037895] ::Bootstrap [26:1095:2475] 2025-06-30T09:19:18.465199Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037895] lookup [26:1095:2475] 2025-06-30T09:19:18.465211Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037895 entry.State: StInit ev: {EvForward TabletID: 72075186224037895 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:19:18.465235Z node 26 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037895 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:19:18.465284Z node 25 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037895 Cookie: 0} 2025-06-30T09:19:18.465291Z node 25 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037895 Cookie: 1} 2025-06-30T09:19:18.465297Z node 25 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037895 Cookie: 2} 2025-06-30T09:19:18.465335Z node 26 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037895 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [26:941:2368] CurrentLeaderTablet: [26:943:2369] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:19:18.465349Z node 26 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037895 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [26:941:2368] CurrentLeaderTablet: [26:943:2369] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:19:18.465362Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037895 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037895 Cookie: 0 CurrentLeader: [26:941:2368] CurrentLeaderTablet: [26:943:2369] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[25:1099535971443:0] : 6}, {[25:24343667:0] : 3}}}} 2025-06-30T09:19:18.465366Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037895 followers: 0 2025-06-30T09:19:18.465372Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 26 selfDC 2 leaderDC 2 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037895 followers: 0 countLeader 1 allowFollowers 0 winner: [26:941:2368] 2025-06-30T09:19:18.465379Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037895] forward result local node, try to connect [26:1095:2475] 2025-06-30T09:19:18.465384Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037895]::SendEvent [26:1095:2475] 2025-06-30T09:19:18.465401Z node 26 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037895] Accept Connect Originator# [26:1095:2475] 2025-06-30T09:19:18.465425Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037895] connected with status OK role: Leader [26:1095:2475] 2025-06-30T09:19:18.465430Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037895] send queued [26:1095:2475] 2025-06-30T09:19:18.465470Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037896] ::Bootstrap [26:1099:2478] 2025-06-30T09:19:18.465474Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037896] lookup [26:1099:2478] 2025-06-30T09:19:18.465488Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037896 entry.State: StInit ev: {EvForward TabletID: 72075186224037896 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:19:18.465543Z node 26 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037896 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:19:18.465598Z node 25 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037896 Cookie: 0} 2025-06-30T09:19:18.465605Z node 25 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037896 Cookie: 1} 2025-06-30T09:19:18.465612Z node 25 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037896 Cookie: 2} 2025-06-30T09:19:18.465650Z node 26 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037896 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [26:769:2282] CurrentLeaderTablet: [26:775:2286] CurrentGeneration: 1 CurrentStep: 0} 2025-06-30T09:19:18.465664Z node 26 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037896 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [26:769:2282] CurrentLeaderTablet: [26:775:2286] CurrentGeneration: 1 CurrentStep: 0} 2025-06-30T09:19:18.465676Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037896 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037896 Cookie: 0 CurrentLeader: [26:769:2282] CurrentLeaderTablet: [26:775:2286] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[25:1099535971443:0] : 6}, {[25:24343667:0] : 3}}}} 2025-06-30T09:19:18.465681Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037896 followers: 0 2025-06-30T09:19:18.465687Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 26 selfDC 2 leaderDC 2 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037896 followers: 0 countLeader 1 allowFollowers 0 winner: [26:769:2282] 2025-06-30T09:19:18.465694Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037896] forward result local node, try to connect [26:1099:2478] 2025-06-30T09:19:18.465699Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037896]::SendEvent [26:1099:2478] 2025-06-30T09:19:18.465716Z node 26 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037896] Accept Connect Originator# [26:1099:2478] 2025-06-30T09:19:18.465737Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037896] connected with status OK role: Leader [26:1099:2478] 2025-06-30T09:19:18.465742Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037896] send queued [26:1099:2478] 2025-06-30T09:19:18.465792Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037897] ::Bootstrap [26:1103:2481] 2025-06-30T09:19:18.465797Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037897] lookup [26:1103:2481] 2025-06-30T09:19:18.465811Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037897 entry.State: StInit ev: {EvForward TabletID: 72075186224037897 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:19:18.465831Z node 26 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037897 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:19:18.465883Z node 25 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037897 Cookie: 0} 2025-06-30T09:19:18.465895Z node 25 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037897 Cookie: 1} 2025-06-30T09:19:18.465902Z node 25 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037897 Cookie: 2} 2025-06-30T09:19:18.465967Z node 26 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037897 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [26:853:2310] CurrentLeaderTablet: [26:855:2311] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:19:18.465983Z node 26 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037897 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [26:853:2310] CurrentLeaderTablet: [26:855:2311] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:19:18.465995Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037897 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037897 Cookie: 0 CurrentLeader: [26:853:2310] CurrentLeaderTablet: [26:855:2311] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[25:1099535971443:0] : 6}, {[25:24343667:0] : 3}}}} 2025-06-30T09:19:18.466000Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037897 followers: 0 2025-06-30T09:19:18.466006Z node 26 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 26 selfDC 2 leaderDC 2 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037897 followers: 0 countLeader 1 allowFollowers 0 winner: [26:853:2310] 2025-06-30T09:19:18.466014Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037897] forward result local node, try to connect [26:1103:2481] 2025-06-30T09:19:18.466019Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037897]::SendEvent [26:1103:2481] 2025-06-30T09:19:18.466036Z node 26 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037897] Accept Connect Originator# [26:1103:2481] 2025-06-30T09:19:18.466057Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037897] connected with status OK role: Leader [26:1103:2481] 2025-06-30T09:19:18.466062Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037897] send queued [26:1103:2481] >> BasicUsage::WaitEventBlocksBeforeDiscovery [GOOD] >> BasicUsage::SimpleHandlers >> PgCatalog::PgTables [GOOD] >> Acceleration::TestAcceleration4Plus2BlockGetAsyncRead2Slow [GOOD] >> Acceleration::TestDelayMultiplierPutMirror3dc1Slow >> Viewer::Plan2SvgOK [GOOD] >> Viewer::Plan2SvgBad >> GenericFederatedQuery::PostgreSQLOnPremSelectAll >> Acceleration::TestThresholdGetMirror3dc2Slow [GOOD] >> Acceleration::TestThresholdGet4Plus2Block2Slow >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests100Inflight10BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests10000Inflight100BlobSize1000 >> CommitOffset::Commit_WithWrongSession_ToParent [GOOD] >> CommitOffset::Commit_WithoutSession_ParentNotFinished >> GenericFederatedQuery::ClickHouseManagedSelectAll >> TxUsage::WriteToTopic_Demo_12_Table [GOOD] >> KqpPg::TempTablesWithCache [GOOD] >> KqpPg::TableDeleteWhere+useSink >> DataShardVolatile::DistributedWriteAsymmetricExecute [GOOD] >> DataShardVolatile::DistributedWriteThenDropTable >> TxUsage::WriteToTopic_Demo_13_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> PgCatalog::PgTables [GOOD] Test command err: Trying to start YDB, gRPC: 21254, MsgBus: 28187 2025-06-30T09:18:25.296769Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669304667481193:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:25.296803Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00332e/r3tmp/tmpLCRMsU/pdisk_1.dat 2025-06-30T09:18:25.364384Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21254, node 1 2025-06-30T09:18:25.382054Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:25.382080Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:25.382083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:25.382129Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28187 TClient is connected to server localhost:28187 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:18:25.443460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:25.443494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:25.444973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:25.453865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:25.463304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 1042 2025-06-30T09:18:25.771801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce_pgbpchar_17472595041006102391_17823623939509273229' Typemod mismatch, got type pgbpchar for column value, type mod , but expected 2 --!syntax_pg INSERT INTO Coerce_pgbpchar_17472595041006102391_17823623939509273229 (key, value) VALUES ( '0'::int2, 'abcd'::bpchar ) 2025-06-30T09:18:25.823200Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669304667481883:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:25.823233Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:25.823352Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669304667481895:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:25.824360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:25.828127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-30T09:18:25.828420Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669304667481897:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:18:25.883804Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669304667481948:2388] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:25.974197Z node 1 :TX_DATASHARD CRIT: execute_kqp_data_tx_unit.cpp:449: Exception while executing KQP transaction [0:281474976715663] at 72075186224037888: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-30T09:18:25.976078Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715663 at tablet 72075186224037888 status: EXEC_ERROR errors: UNKNOWN (Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ) | 2025-06-30T09:18:25.976164Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [1:7521669304667481999:2298] TxId: 281474976715663. Ctx: { TraceId: 01jz022vgy74wzx5x86sdy8qgv, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWM2MTA3MzktNDFlYzEyYjEtNGNjNzkwODAtOTQ0MzllZmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ; 2025-06-30T09:18:25.977946Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=YWM2MTA3MzktNDFlYzEyYjEtNGNjNzkwODAtOTQ0MzllZmM=, ActorId: [1:7521669304667481880:2298], ActorState: ExecuteState, TraceId: 01jz022vgy74wzx5x86sdy8qgv, Create QueryResponse for error on request, msg:
: Error: Error executing transaction (ExecError): Execution failed
: Error: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-30T09:18:25.990632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce__pgbpchar_17472595041006102391_5352544928909966465' Typemod mismatch, got type _pgbpchar for column value, type mod , but expected 2 --!syntax_pg INSERT INTO Coerce__pgbpchar_17472595041006102391_5352544928909966465 (key, value) VALUES ( '0'::int2, '{abcd,abcd}'::_bpchar ) 2025-06-30T09:18:26.062223Z node 1 :TX_DATASHARD CRIT: execute_kqp_data_tx_unit.cpp:449: Exception while executing KQP transaction [0:281474976715668] at 72075186224037889: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-30T09:18:26.063700Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715668 at tablet 72075186224037889 status: EXEC_ERROR errors: UNKNOWN (Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ) | 2025-06-30T09:18:26.063778Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [1:7521669308962449425:2331] TxId: 281474976715668. Ctx: { TraceId: 01jz022vpt792901gktxq2qrqs, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWQwMTdkM2QtZjcxODk5NDgtNDFkZmNjYTktNzZiZGU0ZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ; 2025-06-30T09:18:26.063845Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=YWQwMTdkM2QtZjcxODk5NDgtNDFkZmNjYTktNzZiZGU0ZWM=, ActorId: [1:7521669308962449385:2331], ActorState: ExecuteState, TraceId: 01jz022vpt792901gktxq2qrqs, Create QueryResponse for error on request, msg:
: Error: Error executing transaction (ExecError): Execution failed
: Error: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 1042 2025-06-30T09:18:26.068958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo ... Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:17.621660Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:17.621696Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:17.622900Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:17.625284Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:17.933143Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7521669527443724835:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:17.933160Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7521669527443724818:2288], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:17.933183Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:17.933897Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:17.936014Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7521669527443724856:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:19:17.990706Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7521669527443724907:2326] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 29246, MsgBus: 2412 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00332e/r3tmp/tmp9FhzY6/pdisk_1.dat 2025-06-30T09:19:18.449164Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:18.450864Z node 14 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29246, node 14 2025-06-30T09:19:18.490981Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:18.490996Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:18.490999Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:18.491053Z node 14 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2412 2025-06-30T09:19:18.526488Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:18.526518Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:18.530437Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2412 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:18.604915Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:18.607955Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:18.874391Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7521669531000369485:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:18.874421Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7521669531000369474:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:18.874445Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:18.875304Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:18.877271Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [14:7521669531000369488:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:19:18.963379Z node 14 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [14:7521669531000369539:2329] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:18.974384Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:19.040482Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:19.427012Z node 14 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:19.568140Z node 14 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 14, TabletId: 72075186224037888 not found 2025-06-30T09:19:19.572217Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:19.667407Z node 14 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [14:7521669535295337367:2393], TxId: 281474976715672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=14&id=ZTQ1MjQ0NmMtYjAxZTg0YzAtZGY5ZTVmOWEtZTcwNjQ2NzM=. CustomerSuppliedId : . TraceId : 01jz024g2k4d8gmxjwszt6c1vk. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED DEFAULT_ERROR: {
: Error: Terminate was called, reason(57): ERROR: invalid input syntax for type boolean: "pg_proc" }. 2025-06-30T09:19:19.667607Z node 14 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [14:7521669535295337369:2394], TxId: 281474976715672, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jz024g2k4d8gmxjwszt6c1vk. SessionId : ydb://session/3?node_id=14&id=ZTQ1MjQ0NmMtYjAxZTg0YzAtZGY5ZTVmOWEtZTcwNjQ2NzM=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [14:7521669535295337364:2390], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-30T09:19:19.667835Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=14&id=ZTQ1MjQ0NmMtYjAxZTg0YzAtZGY5ZTVmOWEtZTcwNjQ2NzM=, ActorId: [14:7521669535295337358:2390], ActorState: ExecuteState, TraceId: 01jz024g2k4d8gmxjwszt6c1vk, Create QueryResponse for error on request, msg: >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests100Inflight10BlobSize1000 [GOOD] >> CountingEvents::Put_Mirror3of4 >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-NoDbAdmin-clusteradmin [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-DbAdmin-clusteradmin >> DataShardVolatile::VolatileCommitOnBlobStorageFailure+UseSink [GOOD] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure-UseSink >> CostMetricsGetHugeMirror3dc::TestGetMirror3dcRequests100Inflight1BlobSize2000000 [GOOD] >> CostMetricsGetHugeMirror3dc::TestGetMirror3dcRequests2Inflight2BlobSize2000000 >> BlobStorageSync::TestSyncLogCuttingBlock4Plus2 [GOOD] >> BlobStorageSync::SyncWhenDiskGetsDown [GOOD] >> BurstDetection::TestPutEvenly >> CountingEvents::Put_Mirror3of4 [GOOD] >> CountingEvents::Put_Mirror3dc >> Viewer::Plan2SvgBad [GOOD] >> Viewer::ServerlessWithExclusiveNodesCheckTable [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectAll >> CountingEvents::Put_Mirror3dc [GOOD] >> CountingEvents::Put_Block42 >> Acceleration::TestDelayMultiplierPutMirror3dc1Slow [GOOD] >> Acceleration::TestDelayMultiplierPut4Plus2Block1Slow >> CountingEvents::Put_Block42 [GOOD] >> CountingEvents::Put_None ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::Plan2SvgBad [GOOD] Test command err: Data has built Merge = 0.05809857622 Data has merged 2025-06-30T09:19:09.363161Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669494171815770:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:09.365120Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:09.450007Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:09.453655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:09.453700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:09.460060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26374, node 1 2025-06-30T09:19:09.518978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:09.518994Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:09.518997Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:09.519053Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17038 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:09.568671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:09.575200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:09.581699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:09.582661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:19:09.583955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-30T09:19:10.313751Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669498466783658:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:10.313801Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:10.314042Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669498466783670:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:10.315284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:10.318720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:19:10.318836Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669498466783672:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:19:10.332602Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:10.395544Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669498466783732:2351] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:10.916024Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:10.939984Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:10.941196Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521669496676224081:2080] 1751275150884136 != 1751275150884139 TServer::EnableGrpc on GrpcPort 30982, node 2 2025-06-30T09:19:10.971671Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:10.971683Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:10.971687Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:10.971741Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17436 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:19:11.003294Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:11.003340Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:11.005161Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:19:11.014975Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:11.025538Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:11.035333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:11.036360Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-30T09:19:11.037810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-30T09:19:11.808354Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669500971192037:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:11.808396Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, is ... do unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:15.723020Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:15.723037Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:15.803351Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:15.803375Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:15.822988Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:15.883258Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:15.883284Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:15.926694Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:15.926712Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:15.987946Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:15.987962Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:16.052310Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:16.052333Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:16.136660Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:16.136680Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:16.203111Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:16.203137Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:16.279724Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:16.279747Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:16.347131Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:16.347157Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:16.403116Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:16.403138Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:16.467090Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:16.467111Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:16.523114Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:16.523138Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:16.587499Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:16.587523Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:16.655108Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:16.655131Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:16.725053Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:16.725076Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:16.727149Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:16.727648Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:16.727879Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:17.867315Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:17.867345Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:18.006257Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:18.006280Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:18.069769Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-30T09:19:18.069787Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-30T09:19:18.283548Z node 5 :RPC_REQUEST WARN: rpc_stream_execute_scan_query.cpp:410: Client lost 2025-06-30T09:19:18.283623Z node 5 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [5:7521669530188550223:2539] TxId: 281474976715694. Ctx: { TraceId: 01jz024emxe1dn64cb1aen03p7, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=MzA0ZjI2ZGEtZWZkOGFmYzYtYTY3M2YzZTYtNjNkNDhmYjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-30T09:19:18.283685Z node 5 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=5&id=MzA0ZjI2ZGEtZWZkOGFmYzYtYTY3M2YzZTYtNjNkNDhmYjc=, ActorId: [5:7521669530188550203:2539], ActorState: ExecuteState, TraceId: 01jz024emxe1dn64cb1aen03p7, Create QueryResponse for error on request, msg: 2025-06-30T09:19:18.283820Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275158325, txId: 281474976715693] shutting down 2025-06-30T09:19:18.283942Z node 5 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [5:7521669530188550228:2544], TxId: 281474976715694, task: 2. Ctx: { TraceId : 01jz024emxe1dn64cb1aen03p7. SessionId : ydb://session/3?node_id=5&id=MzA0ZjI2ZGEtZWZkOGFmYzYtYTY3M2YzZTYtNjNkNDhmYjc=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [5:7521669530188550223:2539], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-30T09:19:18.918076Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521669532638598294:2242];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:18.922898Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:19:18.979774Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:18.986908Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7521669532638598066:2080] 1751275158899989 != 1751275158899992 TServer::EnableGrpc on GrpcPort 24688, node 6 2025-06-30T09:19:19.006021Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:19.006039Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:19.006041Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:19.006125Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:19.019344Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:19.019380Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:19.019793Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20975 2025-06-30T09:19:19.582858Z node 6 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (8C3E2D8D): Could not find correct token validator 2025-06-30T09:19:20.005240Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521669540571275762:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:20.005267Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:20.025979Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25868, node 7 2025-06-30T09:19:20.037423Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:20.037438Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:20.037440Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:20.037504Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2301 2025-06-30T09:19:20.111005Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:20.111043Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:20.112000Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:20.571116Z node 7 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (8C3E2D8D): Could not find correct token validator |80.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |80.0%| [LD] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |80.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut >> CountingEvents::Put_None [GOOD] >> CountingEvents::Get_Mirror3of4 [GOOD] >> CountingEvents::Get_Mirror3dc |80.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |80.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |80.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::ServerlessWithExclusiveNodesCheckTable [GOOD] Test command err: 2025-06-30T09:19:07.124044Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669484197925135:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:07.124256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 25360, node 1 2025-06-30T09:19:07.187932Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:07.188234Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669484197925108:2080] 1751275147123770 != 1751275147123773 2025-06-30T09:19:07.189170Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:07.189181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:07.189183Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:07.189233Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11481 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:07.226963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:07.227010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:07.227986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:07.261848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:07.264263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:07.271643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:07.273018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:19:07.274766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-30T09:19:07.709108Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669484197925779:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:07.709112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669484197925771:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:07.709133Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:07.710126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:07.714166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:19:07.714307Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669484197925785:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:19:07.796201Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669484197925836:2348] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:08.394491Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669489623617822:2260];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:08.402927Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:19:08.411301Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8242, node 2 2025-06-30T09:19:08.434440Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:08.434453Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:08.434457Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:08.434527Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5937 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:08.498606Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:08.498643Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:08.499426Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:08.500119Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:19:08.517776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:08.535909Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:08.543385Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:19:08.545388Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-30T09:19:08.911137Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669489623618239:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:08.911164Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669489623618228:2300], DatabaseId: /Root, PoolId: defaul ... ion.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:11.609304Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:19:11.609402Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521669504016870037:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:19:11.708106Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521669504016870088:2347] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:13.074694Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:276:2235], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:13.074764Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:13.074806Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:13.193181Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:13.279112Z node 5 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:19:13.285755Z node 5 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-30T09:19:13.330010Z node 5 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 10517, node 5 TClient is connected to server localhost:11593 2025-06-30T09:19:13.361908Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:13.361948Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:13.361952Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:13.362055Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration json result: {"Version":16,"TotalNodes":"1","FoundNodes":"1","FieldsAvailable":"0000000010000110111111100100111","FieldsRequired":"0000000000000000000000000100101","Problems":["no-database-board-info"],"Nodes":[{"NodeId":6,"Disconnected":true,"SystemState":{"StartTime":"0","ChangeTime":"1","LoadAverage":[101.97021484375,76.75439453125,65.24169921875],"NumberOfCpus":64,"SystemState":"Green","Host":"ghrun-x4qzxsyz2q.auto.internal","Version":".274c048","Location":{"DataCenter":"2","Module":"2","Rack":"2","Unit":"2"},"CoresUsed":0,"CoresTotal":0,"RealNumberOfCpus":64}}]} 2025-06-30T09:19:15.320099Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:511:2391], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:15.320183Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:15.320202Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:15.522278Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:15.668062Z node 7 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:19:15.711013Z node 7 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-30T09:19:15.908327Z node 7 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 64419, node 7 TClient is connected to server localhost:14461 2025-06-30T09:19:15.980042Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:15.980065Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:15.980069Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:15.980212Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration json result: {"Version":16,"TotalNodes":"1","FoundNodes":"1","FieldsAvailable":"0000000010000110111111100000111","FieldsRequired":"0000000000000000000000000000101","Nodes":[{"NodeId":9,"Disconnected":true,"SystemState":{"StartTime":"0","ChangeTime":"1","LoadAverage":[101.97021484375,76.75439453125,65.24169921875],"NumberOfCpus":64,"SystemState":"Green","Host":"ghrun-x4qzxsyz2q.auto.internal","Version":".274c048","Location":{"DataCenter":"3","Module":"3","Rack":"3","Unit":"3"},"CoresUsed":0,"CoresTotal":0,"RealNumberOfCpus":64}}]} 2025-06-30T09:19:17.493620Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:511:2391], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:17.493678Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:17.493700Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:17.605834Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:17.709593Z node 10 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:19:17.719038Z node 10 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-30T09:19:17.783428Z node 10 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 15545, node 10 TClient is connected to server localhost:13722 2025-06-30T09:19:17.823926Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:17.823953Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:17.823958Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:17.824069Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration json result: {"Version":16,"TotalNodes":"1","FoundNodes":"1","FieldsAvailable":"0000000010000110111111100000111","FieldsRequired":"0000000000000000000000000000101","Nodes":[{"NodeId":11,"Disconnected":true,"SystemState":{"StartTime":"0","ChangeTime":"2","LoadAverage":[106.857421875,78.1865234375,65.76708984375],"NumberOfCpus":64,"SystemState":"Green","Host":"ghrun-x4qzxsyz2q.auto.internal","Version":".274c048","Location":{"DataCenter":"2","Module":"2","Rack":"2","Unit":"2"},"CoresUsed":0,"CoresTotal":0,"RealNumberOfCpus":64}}]} 2025-06-30T09:19:19.919725Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [13:593:2392], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:19.919829Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:19.919865Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:20.032472Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:20.143119Z node 13 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:19:20.148554Z node 13 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-30T09:19:20.192189Z node 13 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 13202, node 13 TClient is connected to server localhost:18387 2025-06-30T09:19:20.234440Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:20.234465Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:20.234470Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:20.234610Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration json result: {"Version":16,"TotalNodes":"2","FoundNodes":"2","FieldsAvailable":"0000000010100110111111100100111","FieldsRequired":"0000000000000000000000000100101","Nodes":[{"NodeId":15,"Disconnected":true,"SystemState":{"StartTime":"0","ChangeTime":"1","LoadAverage":[106.857421875,78.1865234375,65.76708984375],"NumberOfCpus":64,"SystemState":"Green","Host":"ghrun-x4qzxsyz2q.auto.internal","Version":".274c048","Location":{"DataCenter":"3","Module":"3","Rack":"3","Unit":"3"},"CoresUsed":0,"CoresTotal":0,"RealNumberOfCpus":64}},{"NodeId":16,"Disconnected":true,"SystemState":{"StartTime":"0","ChangeTime":"1","LoadAverage":[106.857421875,78.1865234375,65.76708984375],"NumberOfCpus":64,"SystemState":"Green","Host":"ghrun-x4qzxsyz2q.auto.internal","Version":".274c048","Location":{"DataCenter":"4","Module":"4","Rack":"4","Unit":"4"},"CoresUsed":0,"CoresTotal":0,"RealNumberOfCpus":64},"Tablets":[{"Type":"DataShard","State":"Green","Count":1}]}]} >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-DbAdmin-clusteradmin [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-NoProtect-NoDbAdmin-system >> GenericFederatedQuery::IcebergHadoopSaSelectAll >> BurstDetection::TestPutEvenly [GOOD] >> BurstDetection::TestPutBurst >> CountingEvents::Get_Mirror3dc [GOOD] >> CountingEvents::Get_Block42 >> CountingEvents::Get_Block42 [GOOD] >> CountingEvents::Get_None >> BasicUsage::TWriteSession_WriteEncoded [GOOD] >> CompressExecutor::TestExecutorMemUsage >> TxUsage::WriteToTopic_Demo_22_RestartNo_Table [GOOD] >> Viewer::JsonStorageListingV1 [GOOD] >> Viewer::JsonStorageListingV1GroupIdFilter >> CountingEvents::Get_None [GOOD] >> CountingEvents::Collect_Mirror3of4 >> DataShardVolatile::DistributedWriteThenDropTable [GOOD] >> DataShardVolatile::DistributedWriteThenCopyTable >> BasicUsage::PreferredDatabaseNoFallback [GOOD] >> Acceleration::TestThresholdGet4Plus2Block2Slow [GOOD] >> BlobPatching::Mirror3of4 >> CostMetricsGetHugeMirror3dc::TestGetMirror3dcRequests2Inflight2BlobSize2000000 [GOOD] >> CostMetricsGetHugeMirror3dc::TestGetMirror3dcRequests10Inflight10BlobSize2000000 >> TxUsage::WriteToTopic_Demo_22_RestartNo_Query >> CountingEvents::Collect_Mirror3of4 [GOOD] >> CountingEvents::Collect_Mirror3dc >> DataShardVolatile::VolatileCommitOnBlobStorageFailure-UseSink [GOOD] >> DataShardVolatile::VolatileTxAbortedOnSplit >> BurstDetection::TestPutBurst [GOOD] >> BurstDetection::TestOverlySensitive >> BlobPatching::Mirror3of4 [GOOD] >> BlobPatching::Mirror3dc >> GenericFederatedQuery::YdbManagedSelectAll >> GenericFederatedQuery::PostgreSQLOnPremSelectAll [GOOD] >> GenericFederatedQuery::PostgreSQLOnPremSelectConstant >> CountingEvents::Collect_Mirror3dc [GOOD] >> CountingEvents::Collect_Block42 >> GenericFederatedQuery::IcebergHiveTokenSelectAll >> TSchemeShardSysNames::ESchemeOpCreateView-NoProtect-NoDbAdmin-system [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-NoDbAdmin-system >> BlobPatching::Mirror3dc [GOOD] >> BlobPatching::Mirror3 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::PreferredDatabaseNoFallback [GOOD] Test command err: 2025-06-30T09:18:35.306673Z :GetAllStartPartitionSessions INFO: Random seed for debugging is 1751275115306664 2025-06-30T09:18:35.796371Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669347180568517:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:35.796504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:36.029926Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d89/r3tmp/tmpamCCLI/pdisk_1.dat 2025-06-30T09:18:36.043580Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:36.047565Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:18:36.095217Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28022, node 1 2025-06-30T09:18:36.137083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:36.137104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:36.139520Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:36.139935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:36.140689Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002d89/r3tmp/yandexCqB1bL.tmp 2025-06-30T09:18:36.140713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002d89/r3tmp/yandexCqB1bL.tmp 2025-06-30T09:18:36.140802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002d89/r3tmp/yandexCqB1bL.tmp 2025-06-30T09:18:36.140859Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:36.148658Z INFO: TTestServer started on Port 10179 GrpcPort 28022 2025-06-30T09:18:36.259076Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:36.259123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:36.279263Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10179 PQClient connected to localhost:28022 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:36.352039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-30T09:18:36.627467Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669351475536645:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.627511Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.627810Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669351475536665:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:36.628820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:36.639026Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669351475536667:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:18:36.695344Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669351475536754:2665] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:36.705717Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669351475536764:2307], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:36.706875Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669350367644584:2273], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:36.707826Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=YTA5NWYwYjktNmYyMTU0MWItOWUyMjIxYTMtY2UyZjI2YTE=, ActorId: [2:7521669350367644551:2267], ActorState: ExecuteState, TraceId: 01jz02363c3j3a92c47xg1nnds, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:36.708460Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:36.710128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:36.719176Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=Y2VhZWFmNjQtZWRhN2E0MzEtMzM0ZWRlOTgtMTkwNjY4NDk=, ActorId: [1:7521669351475536641:2294], ActorState: ExecuteState, TraceId: 01jz02362gfqex3kge3799wmrn, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:36.719342Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:36.750805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:36.795110Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:36.831220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:28022", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, false, 1000); 2025-06-30T09:18:36.871755Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:36.875066Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jz02369ncpgdn09c29dw4zmq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2E3Y2M2OWItZmVlNzNmZjQtZmVkNzAyYTYtM2NlMWVkMWE=, CurrentExecutionId: , Cu ... ooser_actor.h:174: TPartitionChooser [3:7521669422366123628:2447] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-30T09:18:52.158617Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669422366123666:2447] disconnected; active server actors: 1 2025-06-30T09:18:52.158629Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669422366123666:2447] disconnected no session 2025-06-30T09:18:52.175000Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7521669422366123628:2447] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-30T09:18:52.175019Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7521669422366123628:2447] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-30T09:18:52.175025Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7521669422366123628:2447] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-30T09:18:52.175034Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-30T09:18:52.175736Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:821: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2025-06-30T09:18:52.177007Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [3:7521669422366123688:2447], now have 1 active actors on pipe 2025-06-30T09:18:52.177062Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:18:52.177069Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:18:52.177112Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|3a5a8f72-6b194b7b-b6df2cd5-7bd77ec2_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-30T09:18:52.177155Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-30T09:18:52.177179Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:52.178943Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:18:52.178959Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:18:52.178991Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:18:52.183181Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|3a5a8f72-6b194b7b-b6df2cd5-7bd77ec2_0 2025-06-30T09:18:52.183749Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1751275132183 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:18:52.183798Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|3a5a8f72-6b194b7b-b6df2cd5-7bd77ec2_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-30T09:18:52.183915Z :INFO: [] MessageGroupId [src] SessionId [src|3a5a8f72-6b194b7b-b6df2cd5-7bd77ec2_0] Write session: close. Timeout = 0 ms 2025-06-30T09:18:52.183921Z :INFO: [] MessageGroupId [src] SessionId [src|3a5a8f72-6b194b7b-b6df2cd5-7bd77ec2_0] Write session will now close 2025-06-30T09:18:52.183925Z :DEBUG: [] MessageGroupId [src] SessionId [src|3a5a8f72-6b194b7b-b6df2cd5-7bd77ec2_0] Write session: aborting 2025-06-30T09:18:52.184038Z :INFO: [] MessageGroupId [src] SessionId [src|3a5a8f72-6b194b7b-b6df2cd5-7bd77ec2_0] Write session: gracefully shut down, all writes complete 2025-06-30T09:18:52.184044Z :DEBUG: [] MessageGroupId [src] SessionId [src|3a5a8f72-6b194b7b-b6df2cd5-7bd77ec2_0] Write session: destroy 2025-06-30T09:18:52.184500Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|3a5a8f72-6b194b7b-b6df2cd5-7bd77ec2_0 grpc read done: success: 0 data: 2025-06-30T09:18:52.184509Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|3a5a8f72-6b194b7b-b6df2cd5-7bd77ec2_0 grpc read failed 2025-06-30T09:18:52.184515Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|3a5a8f72-6b194b7b-b6df2cd5-7bd77ec2_0 grpc closed 2025-06-30T09:18:52.184522Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|3a5a8f72-6b194b7b-b6df2cd5-7bd77ec2_0 is DEAD 2025-06-30T09:18:52.184763Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:18:52.188215Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7521669422366123688:2447] destroyed 2025-06-30T09:18:52.188245Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. ====TYdbPqTestRetryPolicy() ====ExpectBreakDown === Session was created, waiting for retries >>> Ready to answer: ok ====CreateRetryState ====CreateRetryState Initialized Test retry state: get retry delay 2025-06-30T09:18:52.266937Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-30T09:18:54.269997Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-30T09:18:56.086648Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-30T09:18:56.270937Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s === In the next federation discovery response dc2 will be available Test retry state: get retry delay 2025-06-30T09:18:58.271983Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-30T09:19:00.090313Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7389: Cannot get console configs 2025-06-30T09:19:00.090340Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Test retry state: get retry delay 2025-06-30T09:19:00.278902Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-30T09:19:01.086945Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-30T09:19:02.282868Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-30T09:19:04.283985Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-30T09:19:06.089062Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-30T09:19:06.285955Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-30T09:19:08.290865Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-30T09:19:10.294884Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-30T09:19:11.093232Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-30T09:19:12.298912Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-30T09:19:14.302941Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-30T09:19:16.094928Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-30T09:19:16.303902Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-30T09:19:18.306908Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-30T09:19:20.309573Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-30T09:19:21.063050Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186224037893][rt3.dc1--test-topic] TPersQueueReadBalancer::HandleWakeup 2025-06-30T09:19:21.063085Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][rt3.dc1--test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037892 Cookie: 1 2025-06-30T09:19:21.068228Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][rt3.dc1--test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 1 DataSize: 0 UsedReserveSize: 0 2025-06-30T09:19:21.068450Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][rt3.dc1--test-topic] ProcessPendingStats. PendingUpdates size 1 2025-06-30T09:19:21.067326Z node 4 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 TotalPartitions: 1 SourceIdMaxCounts: 6000000 } 2025-06-30T09:19:21.094926Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction === Waiting for repair >>> Ready to answer: ok 2025-06-30T09:19:22.310955Z :INFO: [/Root] [] [] Start federated write session to database 'dc2' (previous was ) FederationState: { Status: SUCCESS SelfLocation: "fancy_datacenter" DbInfos: [ { name: "dc1" path: "/Root" id: "account-dc1" endpoint: "localhost:8101" location: "dc1" status: AVAILABLE weight: 1000 } { name: "dc2" path: "/Root" id: "account-dc2" endpoint: "localhost:8101" location: "dc2" status: AVAILABLE weight: 500 } { name: "dc3" path: "/Root" id: "account-dc3" endpoint: "localhost:8101" location: "dc3" status: AVAILABLE weight: 500 } ] ControlPlaneEndpoint: cp.logbroker-federation:2135 } === Closing the session 2025-06-30T09:19:22.321278Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: try to update token 2025-06-30T09:19:22.322216Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Start write session. Will connect to nodeId: 0 2025-06-30T09:19:22.324134Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: close. Timeout 0.000000s 2025-06-30T09:19:22.324146Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session will now close 2025-06-30T09:19:22.324161Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: aborting 2025-06-30T09:19:22.324208Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: gracefully shut down, all writes complete 2025-06-30T09:19:22.324233Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: destroy >> TStorageBalanceTest::TestScenario2 [GOOD] >> TStorageBalanceTest::TestScenario3 >> GenericFederatedQuery::ClickHouseManagedSelectAll [GOOD] >> GenericFederatedQuery::ClickHouseManagedSelectConstant >> BlobPatching::Mirror3 [GOOD] >> BlobPatching::Block42 >> CountingEvents::Collect_Block42 [GOOD] >> CountingEvents::Collect_None >> LocalPartition::WithoutPartitionWithSplit [GOOD] >> TxUsage::ReadRuleGeneration >> CountingEvents::Collect_None [GOOD] >> Deadlines::TestPut4Plus2Block >> BlobPatching::Block42 [GOOD] >> BlobPatching::None >> TxUsage::WriteToTopic_Demo_20_RestartNo_Table [GOOD] >> BlobPatching::None [GOOD] >> BlobPatching::StressMirror3of4 >> Deadlines::TestPut4Plus2Block [GOOD] >> Deadlines::TestGetMirror3dc >> TxUsage::Sinks_Oltp_WriteToTopics_3_Table [GOOD] >> BurstDetection::TestOverlySensitive [GOOD] >> CompatibilityInfo::VDiskCompatible [GOOD] >> CompatibilityInfo::VDiskIncompatible >> TxUsage::Sinks_Oltp_WriteToTopic_1_Table [GOOD] >> CompatibilityInfo::VDiskIncompatible [GOOD] >> CompatibilityInfo::VDiskIncompatibleWithDefault [GOOD] >> CompatibilityInfo::VDiskSuppressCompatibilityCheck [GOOD] >> CompatibilityInfo::BSControllerCompatible >> GenericFederatedQuery::IcebergHadoopTokenSelectAll [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartNo_Query >> GenericFederatedQuery::IcebergHadoopTokenSelectConstant >> CompatibilityInfo::BSControllerCompatible [GOOD] >> CompatibilityInfo::BSControllerIncompatible >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-NoDbAdmin-system [GOOD] >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-DbAdmin-system >> CompatibilityInfo::BSControllerIncompatible [GOOD] >> CompatibilityInfo::BSControllerIncompatibleWithDefault >> Deadlines::TestGetMirror3dc [GOOD] >> Deadlines::TestGet4Plus2Block >> CompatibilityInfo::BSControllerIncompatibleWithDefault [GOOD] >> CompatibilityInfo::BSControllerSuppressCompatibilityCheck [GOOD] >> CompatibilityInfo::VDiskMigration [GOOD] >> CompatibilityInfo::BSControllerMigration >> TxUsage::Sinks_Oltp_WriteToTopic_1_Query >> TxUsage::Sinks_Oltp_WriteToTopics_3_Query >> Acceleration::TestDelayMultiplierPut4Plus2Block1Slow [GOOD] >> Acceleration::TestDelayMultiplierPutMirror3dc2Slow >> CompatibilityInfo::BSControllerMigration [GOOD] >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10000Inflight1BlobSize1000 >> GenericFederatedQuery::IcebergHadoopBasicSelectAll >> CostMetricsGetHugeMirror3dc::TestGetMirror3dcRequests10Inflight10BlobSize2000000 [GOOD] >> CostMetricsGetHugeMirror3dc::TestGetMirror3dcRequests100Inflight10BlobSize2000000 >> CommitOffset::DistributedTxCommit [GOOD] >> CommitOffset::DistributedTxCommit_ChildFirst >> DataShardVolatile::VolatileTxAbortedOnSplit [GOOD] >> DataShardVolatile::VolatileTxAbortedOnDrop >> Deadlines::TestGet4Plus2Block [GOOD] >> Deadlines::TestGetMirror3of4 >> DataShardVolatile::DistributedWriteThenCopyTable [GOOD] >> Deadlines::TestGetMirror3of4 [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectConstant >> DataShardVolatile::DistributedWriteThenBulkUpsert >> BlobPatching::StressMirror3of4 [GOOD] >> BlobPatching::StressMirror3dc >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-DbAdmin-system [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> Deadlines::TestGetMirror3of4 [GOOD] Test command err: RandomSeed# 2559447193431948771 2025-06-30T09:19:24.208274Z 9 00h01m40.010512s :BS_PROXY_PUT ERROR: [0a40d0bac6785ac6] Result# TEvPutResult {Id# [1:1:1:1:123:1000:0] Status# DEADLINE StatusFlags# { } ErrorReason# "Deadline timer hit" ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 2025-06-30T09:19:24.613275Z 10 00h01m40.010512s :BS_PROXY_GET ERROR: [7aef808e49a49b1b] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:1:123:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "Deadline timer hit"} Marker# BPG29 2025-06-30T09:19:24.962074Z 9 00h01m40.010512s :BS_PROXY_GET ERROR: [6416e444bc29c809] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:1:123:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "Deadline timer hit"} Marker# BPG29 2025-06-30T09:19:25.293734Z 9 00h01m40.010512s :BS_PROXY_GET ERROR: [1c4154e9ecf21d4a] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:1:123:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "Deadline timer hit"} Marker# BPG29 >> GenericFederatedQuery::PostgreSQLOnPremSelectConstant [GOOD] >> GenericFederatedQuery::PostgreSQLSelectCount >> Acceleration::TestDelayMultiplierPutMirror3dc2Slow [GOOD] >> Acceleration::TestDelayMultiplierPut4Plus2Block2Slow >> TopicAutoscaling::ControlPlane_CreateAlterDescribe [GOOD] >> TopicAutoscaling::ControlPlane_DisableAutoPartitioning >> TopicAutoscaling::ControlPlane_CDC_Disable [GOOD] >> TopicAutoscaling::BalancingAfterSplit_sessionsWithPartition >> GenericFederatedQuery::ClickHouseManagedSelectConstant [GOOD] >> GenericFederatedQuery::ClickHouseSelectCount >> GenericFederatedQuery::YdbManagedSelectAll [GOOD] >> GenericFederatedQuery::YdbManagedSelectConstant >> KqpPg::TableDeleteWhere+useSink [GOOD] >> KqpPg::TableDeleteWhere-useSink >> GenericFederatedQuery::IcebergHiveTokenSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectConstant >> BlobPatching::StressMirror3dc [GOOD] >> BlobPatching::StressMirror3 >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_PQv1 >> TxUsage::WriteToTopic_Demo_13_Table [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectCount >> DataShardVolatile::VolatileTxAbortedOnDrop [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter+UseSink >> BasicUsage::ReadWithRestarts [GOOD] >> BasicUsage::ConflictingWrites >> TxUsage::WriteToTopic_Demo_13_Query >> DataShardVolatile::DistributedWriteThenBulkUpsert [GOOD] >> DataShardVolatile::DistributedWriteThenBulkUpsertWithCdc |80.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest >> BlobPatching::StressMirror3 [GOOD] >> BlobPatching::StressBlock42 >> GenericFederatedQuery::IcebergHadoopSaSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectCount >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK_AutoCommit [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PQv1 >> BasicUsage::SimpleHandlers [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectConstant >> GenericFederatedQuery::PostgreSQLSelectCount [GOOD] >> GenericFederatedQuery::PostgreSQLFilterPushdown >> Acceleration::TestDelayMultiplierPut4Plus2Block2Slow [GOOD] >> Acceleration::TestDelayMultiplierGetMirror3dc1Slow ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::SimpleHandlers [GOOD] Test command err: 2025-06-30T09:18:36.736466Z :WaitEventBlocksBeforeDiscovery INFO: Random seed for debugging is 1751275116736456 2025-06-30T09:18:36.868808Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669351227510242:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:36.868901Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:36.878600Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669352466011267:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:36.878623Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:36.922074Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d70/r3tmp/tmpc3Bf3k/pdisk_1.dat 2025-06-30T09:18:36.936453Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:36.961311Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:36.967248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:36.967277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:36.969846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19324, node 1 2025-06-30T09:18:36.990223Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002d70/r3tmp/yandex3Ak7YX.tmp 2025-06-30T09:18:36.990239Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002d70/r3tmp/yandex3Ak7YX.tmp 2025-06-30T09:18:36.990318Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002d70/r3tmp/yandex3Ak7YX.tmp 2025-06-30T09:18:36.990366Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:36.999429Z INFO: TTestServer started on Port 23673 GrpcPort 19324 TClient is connected to server localhost:23673 PQClient connected to localhost:19324 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:18:37.037746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:37.037773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:37.039583Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:37.040879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:37.043325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... waiting... waiting... 2025-06-30T09:18:37.361857Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669355522478334:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.361887Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669355522478346:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.361896Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.362786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:37.366609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:18:37.366709Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669355522478348:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:18:37.418982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.419531Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669356760978850:2273], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:37.420182Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=YWQ3ZTdiNjgtZjMxNmExZWEtMjYyOWI2NGQtM2RjZGVlNTU=, ActorId: [2:7521669356760978786:2266], ActorState: ExecuteState, TraceId: 01jz0236sw3r151dhrwxwwbyp1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:37.420902Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:37.456649Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669355522478502:2697] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:37.461686Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669355522478512:2312], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:37.462276Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=Nzc0NDRkNWMtNzU5NzZjN2UtOTlkMDM2MDYtN2RjODNiZGU=, ActorId: [1:7521669355522478331:2295], ActorState: ExecuteState, TraceId: 01jz0236sd08kvdpgh7qa94rh8, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:37.462433Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:37.502711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.542526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:19324", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, false, 1000); 2025-06-30T09:18:37.603497Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jz02370a6c5tmhqnr8t8xr5k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id= ... ssion cookie 2 consumer shared/user session shared/user_3_2_15991933565188968692_v1 grpc read failed 2025-06-30T09:19:27.335254Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_3_2_15991933565188968692_v1 grpc closed 2025-06-30T09:19:27.335259Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/user session shared/user_3_2_15991933565188968692_v1 is DEAD 2025-06-30T09:19:27.335291Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:299:0 2025-06-30T09:19:27.335299Z :INFO: [/Root] [/Root] [6756fbc9-e2045882-ce60dec-59059ea0] Counters: { Errors: 0 CurrentSessionLifetimeMs: 192 BytesRead: 4936800 MessagesRead: 300 BytesReadCompressed: 4936800 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:19:27.335395Z :INFO: [/Root] [/Root] [eff0c1a0-e000bc71-7416358d-aa30174d] Closing read session. Close timeout: 18446744073709.551615s 2025-06-30T09:19:27.335402Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:19:27.335407Z :INFO: [/Root] [/Root] [eff0c1a0-e000bc71-7416358d-aa30174d] Counters: { Errors: 0 CurrentSessionLifetimeMs: 192 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:19:27.335477Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|5532a8c-3675ca7-89efaddf-f00fbe73_0] Write session: close. Timeout = 0 ms 2025-06-30T09:19:27.335481Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|5532a8c-3675ca7-89efaddf-f00fbe73_0] Write session will now close 2025-06-30T09:19:27.335487Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|5532a8c-3675ca7-89efaddf-f00fbe73_0] Write session: aborting 2025-06-30T09:19:27.335551Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|5532a8c-3675ca7-89efaddf-f00fbe73_0] Write session: gracefully shut down, all writes complete 2025-06-30T09:19:27.335556Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|5532a8c-3675ca7-89efaddf-f00fbe73_0] Write session: destroy 2025-06-30T09:19:27.335517Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669572676214394:2465] disconnected; active server actors: 1 2025-06-30T09:19:27.335525Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669572676214394:2465] client user disconnected session shared/user_3_2_15991933565188968692_v1 2025-06-30T09:19:27.335533Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-06-30T09:19:27.335541Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037893][rt3.dc1--test-topic] consumer user balancing. Sessions=2, Families=1, UnradableFamilies=0 [], RequireBalancing=0 [] 2025-06-30T09:19:27.335545Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037893][rt3.dc1--test-topic] consumer user start rebalancing. familyCount=1, sessionCount=2, desiredFamilyCount=0, allowPlusOne=1 2025-06-30T09:19:27.335549Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037893][rt3.dc1--test-topic] consumer user balancing duration: 0.000006s 2025-06-30T09:19:27.336043Z :INFO: [/Root] [/Root] [854dca10-64b4f529-2ac38d16-3bb644a0] Closing read session. Close timeout: 0.000000s 2025-06-30T09:19:27.336053Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:19:27.336059Z :INFO: [/Root] [/Root] [854dca10-64b4f529-2ac38d16-3bb644a0] Counters: { Errors: 0 CurrentSessionLifetimeMs: 194 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:19:27.336065Z :INFO: [/Root] [/Root] [6756fbc9-e2045882-ce60dec-59059ea0] Closing read session. Close timeout: 0.000000s 2025-06-30T09:19:27.336071Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:299:0 2025-06-30T09:19:27.336075Z :INFO: [/Root] [/Root] [6756fbc9-e2045882-ce60dec-59059ea0] Counters: { Errors: 0 CurrentSessionLifetimeMs: 193 BytesRead: 4936800 MessagesRead: 300 BytesReadCompressed: 4936800 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:19:27.336079Z :INFO: [/Root] [/Root] [eff0c1a0-e000bc71-7416358d-aa30174d] Closing read session. Close timeout: 0.000000s 2025-06-30T09:19:27.336083Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:19:27.336087Z :INFO: [/Root] [/Root] [eff0c1a0-e000bc71-7416358d-aa30174d] Counters: { Errors: 0 CurrentSessionLifetimeMs: 192 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:19:27.336092Z :INFO: [/Root] [/Root] [eff0c1a0-e000bc71-7416358d-aa30174d] Closing read session. Close timeout: 0.000000s 2025-06-30T09:19:27.336095Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:19:27.336098Z :INFO: [/Root] [/Root] [eff0c1a0-e000bc71-7416358d-aa30174d] Counters: { Errors: 0 CurrentSessionLifetimeMs: 192 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:19:27.336116Z :NOTICE: [/Root] [/Root] [eff0c1a0-e000bc71-7416358d-aa30174d] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:19:27.336144Z :INFO: [/Root] [/Root] [6756fbc9-e2045882-ce60dec-59059ea0] Closing read session. Close timeout: 0.000000s 2025-06-30T09:19:27.336148Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:299:0 2025-06-30T09:19:27.336152Z :INFO: [/Root] [/Root] [6756fbc9-e2045882-ce60dec-59059ea0] Counters: { Errors: 0 CurrentSessionLifetimeMs: 193 BytesRead: 4936800 MessagesRead: 300 BytesReadCompressed: 4936800 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:19:27.336157Z :NOTICE: [/Root] [/Root] [6756fbc9-e2045882-ce60dec-59059ea0] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:19:27.336170Z :INFO: [/Root] [/Root] [854dca10-64b4f529-2ac38d16-3bb644a0] Closing read session. Close timeout: 0.000000s 2025-06-30T09:19:27.336175Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:19:27.336179Z :INFO: [/Root] [/Root] [854dca10-64b4f529-2ac38d16-3bb644a0] Counters: { Errors: 0 CurrentSessionLifetimeMs: 194 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:19:27.336184Z :NOTICE: [/Root] [/Root] [854dca10-64b4f529-2ac38d16-3bb644a0] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:19:27.336956Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_6602872471532220205_v1 grpc read done: success# 0, data# { } 2025-06-30T09:19:27.336969Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer shared/user session shared/user_3_3_1984408563648952320_v1 grpc read done: success# 0, data# { } 2025-06-30T09:19:27.336970Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_3_1_6602872471532220205_v1 grpc read failed 2025-06-30T09:19:27.336974Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer shared/user session shared/user_3_3_1984408563648952320_v1 grpc read failed 2025-06-30T09:19:27.336978Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_3_1_6602872471532220205_v1 grpc closed 2025-06-30T09:19:27.336981Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer shared/user session shared/user_3_3_1984408563648952320_v1 grpc closed 2025-06-30T09:19:27.336987Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer shared/user session shared/user_3_3_1984408563648952320_v1 is DEAD 2025-06-30T09:19:27.336989Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_3_1_6602872471532220205_v1 is DEAD 2025-06-30T09:19:27.337138Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: src_id|5532a8c-3675ca7-89efaddf-f00fbe73_0 grpc read done: success: 0 data: 2025-06-30T09:19:27.337144Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: src_id|5532a8c-3675ca7-89efaddf-f00fbe73_0 grpc read failed 2025-06-30T09:19:27.337151Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: src_id|5532a8c-3675ca7-89efaddf-f00fbe73_0 grpc closed 2025-06-30T09:19:27.337154Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: src_id|5532a8c-3675ca7-89efaddf-f00fbe73_0 is DEAD 2025-06-30T09:19:27.337265Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:19:27.337270Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669572676214392:2464] disconnected; active server actors: 1 2025-06-30T09:19:27.337274Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669572676214392:2464] client user disconnected session shared/user_3_1_6602872471532220205_v1 2025-06-30T09:19:27.337288Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-06-30T09:19:27.337294Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669572676214398:2466] disconnected; active server actors: 1 2025-06-30T09:19:27.337296Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669572676214398:2466] client user disconnected session shared/user_3_3_1984408563648952320_v1 2025-06-30T09:19:27.337473Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_3_1_6602872471532220205_v1 2025-06-30T09:19:27.337500Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7521669572676214397:2475] destroyed 2025-06-30T09:19:27.337508Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7521669572676214441:2476] destroyed 2025-06-30T09:19:27.337525Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_1_6602872471532220205_v1 2025-06-30T09:19:27.337539Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. >> DataShardVolatile::UpsertNoLocksArbiter+UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter-UseSink >> GenericFederatedQuery::YdbManagedSelectConstant [GOOD] >> GenericFederatedQuery::YdbSelectCount >> GenericFederatedQuery::ClickHouseSelectCount [GOOD] >> GenericFederatedQuery::ClickHouseFilterPushdown >> GenericFederatedQuery::IcebergHiveTokenSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectCount |80.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest >> CostMetricsGetHugeMirror3dc::TestGetMirror3dcRequests100Inflight10BlobSize2000000 [GOOD] >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests1Inflight1BlobSize1000 >> Acceleration::TestDelayMultiplierGetMirror3dc1Slow [GOOD] >> Acceleration::TestDelayMultiplierGet4Plus2Block1Slow >> GenericFederatedQuery::IcebergHadoopTokenSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown >> BlobPatching::StressBlock42 [GOOD] >> BlobPatching::StressNone >> DataShardVolatile::UpsertNoLocksArbiter-UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter+UseSink |80.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest >> BlobPatching::StressNone [GOOD] >> BlobPatching::DiffsWithIncorectPatchedBlobPartId >> GenericFederatedQuery::IcebergHadoopSaSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown >> BlobPatching::DiffsWithIncorectPatchedBlobPartId [GOOD] >> BlobPatching::PatchBlock42 >> GroupLayoutSanitizer::Test3dc [GOOD] >> GroupLayoutSanitizer::TestBlock4Plus2 >> GenericFederatedQuery::IcebergHadoopBasicSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectCount |80.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest >> Acceleration::TestDelayMultiplierGet4Plus2Block1Slow [GOOD] >> Acceleration::TestDelayMultiplierGetMirror3dc2Slow ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_system_names/unittest >> TSchemeShardSysNames::ESchemeOpCreateView-Protect-DbAdmin-system [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:18:41.403697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:41.403737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:41.403744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:41.403752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:41.403771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:41.403777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:41.403792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:41.403814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:41.403995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:41.404114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:41.427424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:18:41.427482Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:41.435494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:41.435596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:41.435657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:41.437904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:41.438026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:41.438227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:41.438320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:41.439161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:41.439219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:41.439559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:41.439575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:41.439621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:41.439636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:41.439644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:41.439671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:18:41.441479Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:18:41.464371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:41.464464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:41.464534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:41.464542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:41.464592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:41.464602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:41.465696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:41.465747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:41.465824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:41.465835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:41.465842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:41.465849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:41.466424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:41.466440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:41.466447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:41.466865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:41.466881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:41.466887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:41.466895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:41.467444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:41.469803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:41.469860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:41.470106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:41.470142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:18:41.470163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:41.470245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:18:41.470255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:41.470300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:18:41.470316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:18:41.470907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:41.470918Z node 1 :FLAT_TX_SCHEMESHARD ... 30] was 2 2025-06-30T09:19:25.478081Z node 29 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 202, publications: 4, subscribers: 0 2025-06-30T09:19:25.478086Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 202, [OwnerId: 72057594046678944, LocalPathId: 127], 6 2025-06-30T09:19:25.478090Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 202, [OwnerId: 72057594046678944, LocalPathId: 128], 6 2025-06-30T09:19:25.478093Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 202, [OwnerId: 72057594046678944, LocalPathId: 129], 4 2025-06-30T09:19:25.478097Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 202, [OwnerId: 72057594046678944, LocalPathId: 130], 2 2025-06-30T09:19:25.478942Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 127 Version: 6 PathOwnerId: 72057594046678944, cookie: 202 2025-06-30T09:19:25.478970Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 127 Version: 6 PathOwnerId: 72057594046678944, cookie: 202 2025-06-30T09:19:25.478976Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 4, at schemeshard: 72057594046678944, txId: 202 2025-06-30T09:19:25.478983Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 202, pathId: [OwnerId: 72057594046678944, LocalPathId: 127], version: 6 2025-06-30T09:19:25.478990Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 127] was 2 2025-06-30T09:19:25.479245Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 128 Version: 6 PathOwnerId: 72057594046678944, cookie: 202 2025-06-30T09:19:25.479262Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 128 Version: 6 PathOwnerId: 72057594046678944, cookie: 202 2025-06-30T09:19:25.479271Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 202 2025-06-30T09:19:25.479276Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 202, pathId: [OwnerId: 72057594046678944, LocalPathId: 128], version: 6 2025-06-30T09:19:25.479282Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 128] was 2 2025-06-30T09:19:25.479963Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 129 Version: 4 PathOwnerId: 72057594046678944, cookie: 202 2025-06-30T09:19:25.479985Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 129 Version: 4 PathOwnerId: 72057594046678944, cookie: 202 2025-06-30T09:19:25.479992Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 202 2025-06-30T09:19:25.479998Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 202, pathId: [OwnerId: 72057594046678944, LocalPathId: 129], version: 4 2025-06-30T09:19:25.480004Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 129] was 2 2025-06-30T09:19:25.480153Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 130 Version: 2 PathOwnerId: 72057594046678944, cookie: 202 2025-06-30T09:19:25.480167Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 130 Version: 2 PathOwnerId: 72057594046678944, cookie: 202 2025-06-30T09:19:25.480173Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 202 2025-06-30T09:19:25.480178Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 202, pathId: [OwnerId: 72057594046678944, LocalPathId: 130], version: 2 2025-06-30T09:19:25.480183Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 130] was 1 2025-06-30T09:19:25.480195Z node 29 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 202, subscribers: 0 2025-06-30T09:19:25.480750Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 202 2025-06-30T09:19:25.480785Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 202 2025-06-30T09:19:25.481225Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 202 2025-06-30T09:19:25.481256Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 202 TestModificationResult got TxId: 202, wait until txId: 202 TestWaitNotification wait txId: 202 2025-06-30T09:19:25.481638Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 202: send EvNotifyTxCompletion 2025-06-30T09:19:25.481649Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 202 2025-06-30T09:19:25.481872Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 202, at schemeshard: 72057594046678944 2025-06-30T09:19:25.481899Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 202: got EvNotifyTxCompletionResult 2025-06-30T09:19:25.481921Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 202: satisfy waiter [29:2689:4677] TestWaitNotification: OK eventTxId 202 TestWaitNotification wait txId: 197 2025-06-30T09:19:25.482083Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 197: send EvNotifyTxCompletion 2025-06-30T09:19:25.482088Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 197 TestWaitNotification wait txId: 198 2025-06-30T09:19:25.482099Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 198: send EvNotifyTxCompletion 2025-06-30T09:19:25.482118Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 198 TestWaitNotification wait txId: 199 2025-06-30T09:19:25.482128Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 199: send EvNotifyTxCompletion 2025-06-30T09:19:25.482132Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 199 TestWaitNotification wait txId: 200 2025-06-30T09:19:25.482142Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 200: send EvNotifyTxCompletion 2025-06-30T09:19:25.482145Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 200 TestWaitNotification wait txId: 201 2025-06-30T09:19:25.482156Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 201: send EvNotifyTxCompletion 2025-06-30T09:19:25.482160Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 201 2025-06-30T09:19:25.482352Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 197, at schemeshard: 72057594046678944 2025-06-30T09:19:25.482385Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 197: got EvNotifyTxCompletionResult 2025-06-30T09:19:25.482390Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 197: satisfy waiter [29:2692:4680] 2025-06-30T09:19:25.482422Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 198, at schemeshard: 72057594046678944 2025-06-30T09:19:25.482458Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 199, at schemeshard: 72057594046678944 2025-06-30T09:19:25.482467Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 198: got EvNotifyTxCompletionResult 2025-06-30T09:19:25.482471Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 198: satisfy waiter [29:2692:4680] 2025-06-30T09:19:25.482493Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 200, at schemeshard: 72057594046678944 2025-06-30T09:19:25.482512Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 199: got EvNotifyTxCompletionResult 2025-06-30T09:19:25.482515Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 199: satisfy waiter [29:2692:4680] 2025-06-30T09:19:25.482530Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 201, at schemeshard: 72057594046678944 2025-06-30T09:19:25.482546Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 200: got EvNotifyTxCompletionResult 2025-06-30T09:19:25.482550Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 200: satisfy waiter [29:2692:4680] 2025-06-30T09:19:25.482572Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 201: got EvNotifyTxCompletionResult 2025-06-30T09:19:25.482575Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 201: satisfy waiter [29:2692:4680] TestWaitNotification: OK eventTxId 197 TestWaitNotification: OK eventTxId 198 TestWaitNotification: OK eventTxId 199 TestWaitNotification: OK eventTxId 200 TestWaitNotification: OK eventTxId 201 >> GenericFederatedQuery::PostgreSQLFilterPushdown [GOOD] >> DataShardVolatile::DistributedWriteThenBulkUpsertWithCdc [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenDrop >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests10000Inflight100BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests1Inflight1BlobSize1000 >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests1Inflight1BlobSize1000 [GOOD] >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests10Inflight1BlobSize1000 ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::PostgreSQLFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 14867, MsgBus: 31913 2025-06-30T09:19:20.211103Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669542138150890:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:20.213003Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042f1/r3tmp/tmpDhKji3/pdisk_1.dat TServer::EnableGrpc on GrpcPort 14867, node 1 2025-06-30T09:19:20.301958Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:20.309308Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:20.309329Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:20.309331Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:20.309388Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:20.316955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:20.316996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:20.318441Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31913 TClient is connected to server localhost:31913 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:20.412571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:20.415685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:20.746274Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669542138151458:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:20.746304Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:21.215298Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:21.224390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:21.313440Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669546433118886:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:21.313477Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:21.313643Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669546433118891:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:21.314636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:21.318945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-30T09:19:21.319056Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669546433118893:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:21.417060Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669546433118933:2392] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:21.572522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:21.673351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:21.873465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:21.993098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:22.091761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:22.195121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:22.226505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:22.585760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:19:22.592708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:22.593285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715694:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:22.593579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715695:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol ... kloadService] [TPoolFetcherActor] ActorId: [4:7521669580100965418:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:29.142505Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:29.143443Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:29.147816Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521669580100965420:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:29.239605Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521669580100965460:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:29.302382Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:29.373031Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:29.438178Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:29.506055Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:29.576856Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:29.632355Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:29.645951Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:30.007088Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) Call DescribeTable. data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> GenericFederatedQuery::YdbSelectCount [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartNo_Query [GOOD] >> TTopicReaderTests::TestRun_ReadOneMessage >> GenericFederatedQuery::IcebergHiveTokenSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown >> DataShardVolatile::UpsertBrokenLockArbiter+UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter-UseSink >> GenericFederatedQuery::ClickHouseFilterPushdown [GOOD] >> Acceleration::TestDelayMultiplierGetMirror3dc2Slow [GOOD] >> Acceleration::TestDelayMultiplierGet4Plus2Block2Slow >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Table >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown [GOOD] >> Mirror3dc::GcQuorum [GOOD] >> Mirror3dcRestore::TestRestore ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::YdbSelectCount [GOOD] Test command err: Trying to start YDB, gRPC: 24226, MsgBus: 13806 2025-06-30T09:19:23.454583Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669553344701454:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:23.454610Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042e7/r3tmp/tmp8ncc5G/pdisk_1.dat 2025-06-30T09:19:23.520161Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:23.520389Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669553344701431:2080] 1751275163454349 != 1751275163454352 TServer::EnableGrpc on GrpcPort 24226, node 1 2025-06-30T09:19:23.539758Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:23.539771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:23.539773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:23.539817Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13806 TClient is connected to server localhost:13806 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:19:23.596799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:23.596838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:23.597959Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:23.618246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:23.621458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:23.927561Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669553344702054:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:23.927600Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:24.457173Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:24.459961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:24.525452Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669557639669482:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:24.525481Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:24.525598Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669557639669487:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:24.526387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:24.528179Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669557639669489:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:24.626578Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669557639669529:2392] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:24.716711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:24.785736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:24.871110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:24.950426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:25.019528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:25.107291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:25.122700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:25.478206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715689:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:19:25.485357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:25.486083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:25.486660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true p ... 9:28.750319Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:28.750356Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:28.750845Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:28.753227Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:19:28.756119Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:29.097404Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521669579817056094:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:29.097426Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:29.648367Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:29.649550Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:29.657939Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521669579817056222:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:29.657965Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521669579817056227:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:29.657971Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:29.658593Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:29.665481Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521669579817056229:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:29.737302Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521669579817056269:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:29.834305Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:29.929239Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.039086Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.158026Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:30.257816Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:30.375854Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.395411Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:30.691865Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:19:30.704393Z node 3 :KQP_GATEWAY WARN: kqp_federated_query_helpers.cpp:320: DescribePath failed,
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2136: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:2136
: Error: Endpoint list is empty for database pgdb, cluster endpoint localhost:2136. Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } columns { name: "col2" type { type_id: DOUBLE } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> TTopicWriterTests::TestEnterMessage_1KiB_No_Delimiter [GOOD] >> TTopicWriterTests::TestEnterMessage_Custom_Delimiter_Delimited [GOOD] >> KqpPg::TableDeleteWhere-useSink [GOOD] >> TTopicWriterTests::TestEnterMessage_ZeroSymbol_Delimited [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_NewlineDelimited [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::ClickHouseFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 18879, MsgBus: 10972 2025-06-30T09:19:20.336969Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669542005939332:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:20.337223Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004348/r3tmp/tmpeCaFVN/pdisk_1.dat 2025-06-30T09:19:20.406467Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:20.406972Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669542005939300:2080] 1751275160336733 != 1751275160336736 TServer::EnableGrpc on GrpcPort 18879, node 1 2025-06-30T09:19:20.431290Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:20.431301Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:20.431302Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:20.431338Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10972 TClient is connected to server localhost:10972 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:19:20.486650Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:20.486675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:20.487915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:20.506892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:20.511668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:20.855144Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669542005939922:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:20.855179Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:21.341513Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:21.345935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:21.426679Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669546300907349:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:21.426726Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:21.427095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669546300907355:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:21.428076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:21.431036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-30T09:19:21.431152Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669546300907357:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:21.500511Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669546300907397:2392] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:21.628664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:21.725385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:21.866387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:21.995969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:22.109117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:22.270812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:22.287011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:22.816463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715699:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:19:22.832205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715702:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:22.832817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715700:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:22.833178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715701:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind ... NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:29.747931Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:29.748019Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521669580047196893:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:29.748828Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:29.751053Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521669580047196895:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:29.803282Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521669580047196935:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:29.897654Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:29.997452Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.113412Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.246057Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:30.345446Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:30.436876Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.452098Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:30.917047Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715699:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) Call DescribeTable. data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 23883, MsgBus: 26847 2025-06-30T09:19:21.345453Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669545658208095:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:21.357579Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004322/r3tmp/tmpKkc8rQ/pdisk_1.dat 2025-06-30T09:19:21.451471Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:21.454851Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669545658207887:2080] 1751275161339839 != 1751275161339842 TServer::EnableGrpc on GrpcPort 23883, node 1 2025-06-30T09:19:21.474645Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:21.474656Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:21.474658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:21.474695Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26847 2025-06-30T09:19:21.508466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:21.508500Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:21.510775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26847 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:21.588709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:21.592271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:22.055663Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669549953175809:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:22.055726Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:22.338923Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:22.359281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:22.392295Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669549953175937:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:22.392320Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669549953175942:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:22.392334Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:22.393216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:22.395609Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669549953175944:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:22.470353Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669549953175985:2392] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:22.565894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:22.632621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:22.719282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:22.824934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:22.921354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:23.015889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:23.037326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:23.393293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:19:23.400716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:23.401230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715695:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:23.402346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715694:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { ... or] ActorId: [4:7521669582347838332:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:30.373388Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521669582347838372:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:30.464952Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:30.535014Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.621260Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.698392Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:30.762978Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:30.830039Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.844163Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:31.158089Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 |80.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_Custom_Delimiter_Delimited [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests1Inflight1BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10Inflight1BlobSize1000 |80.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_NewlineDelimited [GOOD] >> TopicAutoscaling::ControlPlane_DisableAutoPartitioning [GOOD] >> TopicAutoscaling::ControlPlane_PauseAutoPartitioning >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10000Inflight1BlobSize1000 [GOOD] >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests10Inflight1BlobSize1000 [GOOD] >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10Inflight10BlobSize1000 >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests10000Inflight1BlobSize1000 >> GenericFederatedQuery::IcebergHadoopBasicSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicFilterPushdown ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::TableDeleteWhere-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 29529, MsgBus: 25859 2025-06-30T09:18:29.864517Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669323745925179:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:29.864586Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00331b/r3tmp/tmpp2bRab/pdisk_1.dat 2025-06-30T09:18:29.923431Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29529, node 1 2025-06-30T09:18:29.945551Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:18:29.945583Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:18:29.945585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:18:29.945634Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25859 2025-06-30T09:18:29.967120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:29.967153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:29.968243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25859 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:30.016213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:30.303016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce_pgbpchar_17472595041006102391_17823623939509273229' Unable to coerce value for pgbpchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-30T09:18:30.379101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce__pgbpchar_17472595041006102391_5352544928909966465' Unable to coerce value for _pgbpchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-30T09:18:30.394930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) abcd 2025-06-30T09:18:30.416495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {abcd,abcd} 2025-06-30T09:18:30.442512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) abcd 2025-06-30T09:18:30.470555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {"abcd ","abcd "} 2025-06-30T09:18:30.516168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce_pgvarchar_17472595041006102391_17823623939509273229' Unable to coerce value for pgvarchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character varying(2) 2025-06-30T09:18:30.550619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce__pgvarchar_17472595041006102391_5352544928909966465' Unable to coerce value for _pgvarchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character varying(2) 2025-06-30T09:18:30.595688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) abcd 2025-06-30T09:18:30.640041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {abcd,abcd} 2025-06-30T09:18:30.679615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) abcd 2025-06-30T09:18:30.755956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {abcd,abcd} 2025-06-30T09:18:30.788351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715686:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce_pgbit_17472595041006102391_5866627432374416336' Unable to coerce value for pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(2) 2025-06-30T09:18:30.842875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:30.869152Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup;
: Error: Bulk upsert to table '/Root/Coerce__pgbit_17472595041006102391_11087201080355820517' Unable to coerce value for _pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(2) 2025-06-30T09:18:30.924780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715688:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 1111 2025-06-30T09:18:31.005159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {1111,1111} 2025-06-30T09:18:31.047520Z node 1 :FLAT_TX_SCHEMESHARD W ... is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715854:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 650 2025-06-30T09:19:30.664410Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.667184Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715856:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.679368Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.681888Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715858:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 829 2025-06-30T09:19:30.695149Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.698305Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715860:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.711368Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.716684Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715862:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 774 2025-06-30T09:19:30.777544Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.779996Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715864:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.793659Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.796526Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715866:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2950 2025-06-30T09:19:30.811167Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.813935Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715868:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.831248Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.835441Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715870:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 114 2025-06-30T09:19:30.851299Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.853673Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715872:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.866241Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.868984Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715874:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.880670Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 3802 2025-06-30T09:19:30.884656Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715876:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.898105Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.902257Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715878:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 4072 2025-06-30T09:19:30.927070Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.933412Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715880:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:30.951705Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:30.954834Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715882:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 142 2025-06-30T09:19:30.983771Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715883:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:31.012515Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715884:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 3615 2025-06-30T09:19:31.041398Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715885:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:31.067145Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:31.069156Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715887:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 3614 2025-06-30T09:19:31.093078Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:31.097005Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715889:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:31.110779Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:31.113441Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715891:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 22 2025-06-30T09:19:31.127188Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:31.129885Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715893:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:31.152321Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-30T09:19:31.154891Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715895:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:31.168015Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill |80.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_Invalid_Encode [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform [GOOD] >> Acceleration::TestDelayMultiplierGet4Plus2Block2Slow [GOOD] >> Acceleration::TestMaxNumOfSlowDisksGetMirror3dc1Slow >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenDrop [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenSplit |80.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |80.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs >> TxUsage::WriteToTopic_Demo_20_RestartNo_Query [GOOD] >> TTopicWriterTests::TestTopicWriterParams_No_Delimiter [GOOD] >> TTopicWriterTests::TestTopicWriterParams_InvalidDelimiter [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter-UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiterRestart+UseSink |80.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 11717, MsgBus: 10334 2025-06-30T09:19:22.350663Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669549504183786:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:22.350861Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00430e/r3tmp/tmp7cwIm3/pdisk_1.dat 2025-06-30T09:19:22.444814Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:22.445409Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669549504183603:2080] 1751275162346395 != 1751275162346398 TServer::EnableGrpc on GrpcPort 11717, node 1 2025-06-30T09:19:22.462697Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:22.462713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:22.462715Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:22.462787Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10334 TClient is connected to server localhost:10334 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:19:22.519199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:22.519248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:22.520542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:22.537217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:22.855426Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669549504184226:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:22.855467Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:23.344208Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:23.346768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:23.417113Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669553799151652:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:23.417172Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:23.417284Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669553799151658:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:23.418275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:23.430299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-30T09:19:23.431463Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669553799151660:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:23.532229Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669553799151700:2392] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:23.689757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:23.767114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:23.871900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:23.970171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:24.041193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:24.127176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:24.140698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:24.532505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:19:24.537271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715694:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:24.537631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715695:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:24.537892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { ... or] ActorId: [4:7521669582903309023:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:31.030416Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521669587198276359:2389] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:31.114442Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:31.196009Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:31.287615Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:31.370896Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:31.496908Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:31.593137Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:31.606332Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:31.893699Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Table >> DataShardReadIteratorSysTables::ShouldRead >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey+EvWrite |80.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestTopicWriterParams_InvalidDelimiter [GOOD] >> TxUsage::WriteToTopic_Demo_13_Query [GOOD] >> CommitOffset::Commit_WithoutSession_ParentNotFinished [GOOD] >> CommitOffset::Commit_WithoutSession_ToPastParentPartition >> DataShardReadIterator::ShouldStopWhenNodeDisconnected >> Viewer::JsonStorageListingV1GroupIdFilter [GOOD] >> Viewer::JsonStorageListingV1NodeIdFilter >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10Inflight10BlobSize1000 [GOOD] >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests100Inflight10BlobSize1000 >> Acceleration::TestMaxNumOfSlowDisksGetMirror3dc1Slow [GOOD] >> Acceleration::TestMaxNumOfSlowDisksGet4Plus2Block1Slow >> DataShardReadIterator::ShouldHandleReadAck >> DataShardReadIterator::ShouldReadRangeInclusiveEndsCellVec >> TxUsage::WriteToTopic_Demo_14_Table >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10Inflight1BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10000Inflight1BlobSize1000 >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown [GOOD] |80.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |80.1%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut >> TxUsage::ReadRuleGeneration [GOOD] >> DataShardReadIterator::ShouldReadKeyCellVec >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Query >> DataShardReadIterator::ShouldReverseReadMultipleKeys ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 2983, MsgBus: 18693 2025-06-30T09:19:23.547656Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669552769299486:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:23.547906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042fc/r3tmp/tmpGG11Jl/pdisk_1.dat 2025-06-30T09:19:23.633949Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:23.638322Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669552769299303:2080] 1751275163545470 != 1751275163545473 TServer::EnableGrpc on GrpcPort 2983, node 1 2025-06-30T09:19:23.650037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:23.650070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:23.657363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:23.659999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:23.660016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:23.660019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:23.660069Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18693 TClient is connected to server localhost:18693 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:23.755870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:23.758372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:24.037103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669557064267224:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:24.037134Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:24.546721Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:24.548878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:24.573077Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669557064267353:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:24.573103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669557064267358:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:24.573109Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:24.573884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:24.576246Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669557064267360:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:24.634934Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669557064267400:2392] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:24.729765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:24.811056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:24.887375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:24.988127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:25.056411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:25.124871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:25.138517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:25.486354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:19:25.497076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:25.497332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:25.497508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { ca ... t\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:32.415974Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:32.516304Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:32.615609Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:32.712264Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:32.800834Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:32.887020Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:32.943698Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:33.307171Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> DataShardVolatile::DistributedWriteLostPlanThenSplit [GOOD] >> DataShardVolatile::DistributedOutOfOrderFollowerConsistency >> DataShardReadIterator::ShouldRangeReadReverseLeftInclusive >> Acceleration::TestMaxNumOfSlowDisksGet4Plus2Block1Slow [GOOD] >> Acceleration::TestMaxNumOfSlowDisksPutMirror3dc1Slow >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests100Inflight10BlobSize1000 [GOOD] >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10000Inflight1000BlobSize1000 >> DataShardVolatile::UpsertNoLocksArbiterRestart+UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiterRestart-UseSink >> GenericFederatedQuery::IcebergHadoopBasicFilterPushdown [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_1_Query [GOOD] >> DataShardReadIteratorSysTables::ShouldRead [GOOD] >> DataShardReadIteratorSysTables::ShouldNotReadUserTableUsingLocalTid >> TxUsage::Sinks_Oltp_WriteToTopic_2_Table >> Mirror3dcRestore::TestRestore [GOOD] >> Mirror3of4::Compaction ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopBasicFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 22413, MsgBus: 4055 2025-06-30T09:19:24.993967Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669558487635357:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:24.993999Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042de/r3tmp/tmp8CG2dp/pdisk_1.dat 2025-06-30T09:19:25.082770Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22413, node 1 2025-06-30T09:19:25.096648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:25.096709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:25.097711Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:25.100058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:25.100073Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:25.100076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:25.100128Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4055 TClient is connected to server localhost:4055 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:25.179173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:25.182011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:25.424701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669562782603255:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:25.424729Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:25.996504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:25.998328Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:26.059802Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669567077570681:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:26.059835Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:26.059860Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669567077570687:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:26.060631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:26.062266Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669567077570689:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:26.157989Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669567077570729:2392] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:26.282533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:26.370368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:26.451803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:26.552214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:26.657221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:26.740927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:26.771129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:27.104582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:19:27.119537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715694:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:27.119828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:27.120032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715695:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mappin ... fault, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:19:33.407510Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521669597613221959:2391] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:33.513603Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:33.617641Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:33.691766Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:33.809015Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:33.870209Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:33.940932Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:33.952067Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:19:34.220078Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey-EvWrite >> DataShardReadIteratorConsistency::LocalSnapshotReadWithPlanQueueRace >> Acceleration::TestMaxNumOfSlowDisksPutMirror3dc1Slow [GOOD] >> Acceleration::TestMaxNumOfSlowDisksPut4Plus2Block1Slow >> DataShardReadIterator::ShouldReadRangeInclusiveEndsCellVec [GOOD] >> DataShardReadIterator::ShouldReadRangeInclusiveEndsArrow >> DataShardReadIterator::ShouldHandleReadAck [GOOD] >> DataShardReadIterator::ShouldHandleOutOfOrderReadAck >> DataShardReadIterator::ShouldReverseReadMultipleKeys [GOOD] >> DataShardReadIterator::ShouldReverseReadMultipleKeysOneByOne >> TopicAutoscaling::BalancingAfterSplit_sessionsWithPartition [GOOD] >> TPersQueueMirrorer::ValidStartStream >> DataShardReadIterator::ShouldReadKeyCellVec [GOOD] >> DataShardReadIterator::ShouldReadKeyArrow >> DataShardReadIteratorSysTables::ShouldNotReadUserTableUsingLocalTid [GOOD] >> DataShardReadIteratorSysTables::ShouldForbidSchemaVersion >> DataShardReadIterator::ShouldReadRangeCellVec >> CompressExecutor::TestExecutorMemUsage [GOOD] >> DataShardReadIterator::ShouldRangeReadReverseLeftInclusive [GOOD] >> DataShardReadIterator::ShouldRangeReadReverseLeftNonInclusive >> DataShardVolatile::UpsertNoLocksArbiterRestart-UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiterRestart+UseSink >> DataShardVolatile::DistributedOutOfOrderFollowerConsistency [GOOD] >> DataShardVolatile::DistributedWriteRSNotAckedBeforeCommit >> DataShardReadIteratorConsistency::LocalSnapshotReadWithPlanQueueRace [GOOD] >> DataShardReadIteratorConsistency::LocalSnapshotReadHasRequiredDependencies ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> CompressExecutor::TestExecutorMemUsage [GOOD] Test command err: 2025-06-30T09:18:19.870940Z :WriteAndReadSomeMessagesWithAsyncCompression INFO: Random seed for debugging is 1751275099870933 2025-06-30T09:18:23.896182Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669293902983919:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:23.941789Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669294858474285:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:23.941935Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000f06/r3tmp/tmpBZCnD3/pdisk_1.dat 2025-06-30T09:18:23.944302Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:23.961485Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:23.968327Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:24.005326Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:24.007547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:24.007566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:24.012046Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:24.047662Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 12423, node 1 2025-06-30T09:18:24.052356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:24.052396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:24.054414Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:24.056295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:24.065378Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/000f06/r3tmp/yandextiGMl4.tmp 2025-06-30T09:18:24.065392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/000f06/r3tmp/yandextiGMl4.tmp 2025-06-30T09:18:24.065457Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/000f06/r3tmp/yandextiGMl4.tmp 2025-06-30T09:18:24.065512Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:24.072161Z INFO: TTestServer started on Port 23156 GrpcPort 12423 TClient is connected to server localhost:23156 PQClient connected to localhost:12423 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:24.113985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-30T09:18:24.440997Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669298197951971:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:24.440998Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669298197951963:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:24.441053Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:24.441844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:24.446468Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669298197951978:2300], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720661 completed, doublechecking } 2025-06-30T09:18:24.483487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:24.524409Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669298197952127:2661] txid# 281474976720663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:24.545832Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669298197952144:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:24.545848Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669299153441724:2274], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:24.545990Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=OTZiY2JkZC02NTkzNWRhNy1jMTg3MWMxZS1kMzhiM2EwZg==, ActorId: [2:7521669299153441684:2268], ActorState: ExecuteState, TraceId: 01jz022t8e8dxwrwtd7z8mvfg7, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:24.545990Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NDhkODIzMzctZjZkZjg5MTAtOWYwMzU2NjctZTUyNmUyMDk=, ActorId: [1:7521669298197951961:2294], ActorState: ExecuteState, TraceId: 01jz022t5r1xje9850ynhja1fk, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:24.546667Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:24.546656Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:24.889040Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:24.907482Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:25.555841Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669302492919535:2318], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:25.556548Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=MjNiYzU1MzMtNDQwY2MyN2QtNTVjMzkzMWYtYjM0NTYyZWM=, ActorId: [1:7521669302492919533:2317], ActorState: ExecuteState, TraceId: 01jz022v8b50rs4w0zwbnxvbrh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:25.556679Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response ... server" ip=ipv6:[::1]:43500 proto=v1 topic=test-topic durationSec=0 2025-06-30T09:19:34.943469Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:19:34.943927Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 3 sessionId: describe result for acl check 2025-06-30T09:19:34.943964Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-30T09:19:34.943970Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-30T09:19:34.943973Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-30T09:19:34.943979Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [13:7521669601225074113:2518] (SourceId=test-message-group-id, PreferedPartition=(NULL)) StartKqpSession 2025-06-30T09:19:34.944572Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [13:7521669601225074113:2518] (SourceId=test-message-group-id, PreferedPartition=(NULL)) Select from the table 2025-06-30T09:19:35.107889Z node 13 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715700. Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-30T09:19:35.107951Z node 13 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [13:7521669601225074124:2520] TxId: 281474976715700. Ctx: { TraceId: 01jz024z10518dpj9rbsgtz3ce, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=MmQ4NTk3MjEtOWVkY2ZmY2ItNGE3YWU4Y2EtOGQ1MmQ5OGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-30T09:19:35.108037Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=13&id=MmQ4NTk3MjEtOWVkY2ZmY2ItNGE3YWU4Y2EtOGQ1MmQ5OGE=, ActorId: [13:7521669601225074114:2520], ActorState: ExecuteState, TraceId: 01jz024z10518dpj9rbsgtz3ce, Create QueryResponse for error on request, msg: 2025-06-30T09:19:35.108511Z node 13 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [13:7521669601225074113:2518] (SourceId=test-message-group-id, PreferedPartition=(NULL)) ReplyError: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=13&id=MmQ4NTk3MjEtOWVkY2ZmY2ItNGE3YWU4Y2EtOGQ1MmQ5OGE=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jz024z10518dpj9rbv0y2cts" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 2025-06-30T09:19:35.108542Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 3 reason: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=13&id=MmQ4NTk3MjEtOWVkY2ZmY2ItNGE3YWU4Y2EtOGQ1MmQ5OGE=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jz024z10518dpj9rbv0y2cts" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 sessionId: 2025-06-30T09:19:35.108799Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: is DEAD Test retry state: get retry delay 2025-06-30T09:19:35.109028Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9df9870b-e47f79ae-d077703d-2af5e2c6_0] Got error. Status: UNAVAILABLE, Description:
: Error: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=13&id=MmQ4NTk3MjEtOWVkY2ZmY2ItNGE3YWU4Y2EtOGQ1MmQ5OGE=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jz024z10518dpj9rbv0y2cts" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 , code: 500001 2025-06-30T09:19:35.109038Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9df9870b-e47f79ae-d077703d-2af5e2c6_0] Write session will restart in 2.000000s 2025-06-30T09:19:35.109060Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9df9870b-e47f79ae-d077703d-2af5e2c6_0] Write session: Do CDS request 2025-06-30T09:19:35.109066Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9df9870b-e47f79ae-d077703d-2af5e2c6_0] Do schedule cds request after 2000 ms 2025-06-30T09:19:35.127555Z node 14 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720682. Failed to resolve tablet: 72075186224037888 after several retries. 2025-06-30T09:19:35.127613Z node 14 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [14:7521669599573458428:2447] TxId: 281474976720682. Ctx: { TraceId: 01jz024z0tbxbra799dx32a4mq, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=NzZmYjNlYjktMmZhZGY1MGQtYzFkMWI4YTMtYzgzNzRmOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037888 after several retries. 2025-06-30T09:19:35.127717Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=14&id=NzZmYjNlYjktMmZhZGY1MGQtYzFkMWI4YTMtYzgzNzRmOTM=, ActorId: [14:7521669599573458415:2447], ActorState: ExecuteState, TraceId: 01jz024z0tbxbra799dx32a4mq, Create QueryResponse for error on request, msg: 2025-06-30T09:19:35.128095Z node 14 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037888 after several retries." severity: 1 } TxMeta { id: "01jz024z1r1wbrdr4n4ds71bw4" } } YdbStatus: UNAVAILABLE ConsumedRu: 19 } 2025-06-30T09:19:35.284175Z node 13 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715702. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-30T09:19:35.284234Z node 13 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [13:7521669605520041477:2522] TxId: 281474976715702. Ctx: { TraceId: 01jz024z5n268gqf5cycxdgfk4, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzQ0MzA0MWItNWUwMTMyNjYtZmE2NTBmYS04MWJiMmYxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-30T09:19:35.284335Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=13&id=NzQ0MzA0MWItNWUwMTMyNjYtZmE2NTBmYS04MWJiMmYxZQ==, ActorId: [13:7521669605520041457:2522], ActorState: ExecuteState, TraceId: 01jz024z5n268gqf5cycxdgfk4, Create QueryResponse for error on request, msg: 2025-06-30T09:19:35.284734Z node 13 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jz024z6kaxr4yzpw8rejx6fk" } } YdbStatus: UNAVAILABLE ConsumedRu: 19 } 2025-06-30T09:19:35.834721Z node 14 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720684. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-30T09:19:35.834797Z node 14 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [14:7521669603868425807:2454] TxId: 281474976720684. Ctx: { TraceId: 01jz024zqsfpbc9e2t0thvcmxf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=MWI0YzRkZWEtN2QxYWI3ODctMTMxNzBlNDktNmE1YWFlNTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-30T09:19:35.834894Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=14&id=MWI0YzRkZWEtN2QxYWI3ODctMTMxNzBlNDktNmE1YWFlNTQ=, ActorId: [14:7521669603868425804:2454], ActorState: ExecuteState, TraceId: 01jz024zqsfpbc9e2t0thvcmxf, Create QueryResponse for error on request, msg: 2025-06-30T09:19:35.835310Z node 14 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jz024zqsfpbc9e2t0xk96r69" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-30T09:19:35.836065Z node 13 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715704. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-30T09:19:35.836111Z node 13 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [13:7521669605520041563:2529] TxId: 281474976715704. Ctx: { TraceId: 01jz024zqw6apt1j557xxyb6pw, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=MjBjMzM5Y2ItNDk2MmNlZWMtOGRmYmE0OWMtODAwY2E4NzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-30T09:19:35.836173Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=13&id=MjBjMzM5Y2ItNDk2MmNlZWMtOGRmYmE0OWMtODAwY2E4NzQ=, ActorId: [13:7521669605520041560:2529], ActorState: ExecuteState, TraceId: 01jz024zqw6apt1j557xxyb6pw, Create QueryResponse for error on request, msg: 2025-06-30T09:19:35.836452Z node 13 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jz024zqw6apt1j5581kkfc1j" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-30T09:19:35.941666Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9df9870b-e47f79ae-d077703d-2af5e2c6_0] Write session: close. Timeout = 0 ms 2025-06-30T09:19:35.941682Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9df9870b-e47f79ae-d077703d-2af5e2c6_0] Write session will now close 2025-06-30T09:19:35.941691Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9df9870b-e47f79ae-d077703d-2af5e2c6_0] Write session: aborting 2025-06-30T09:19:35.941881Z :WARNING: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9df9870b-e47f79ae-d077703d-2af5e2c6_0] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2025-06-30T09:19:35.941886Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9df9870b-e47f79ae-d077703d-2af5e2c6_0] Write session: destroy >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange+EvWrite >> DataShardReadIterator::ShouldReadRangeInclusiveEndsArrow [GOOD] >> DataShardReadIterator::ShouldReadRangeReverse >> DataShardReadIterator::ShouldHandleOutOfOrderReadAck [GOOD] >> DataShardReadIterator::ShouldHandleReadAckWhenExhaustedRangeRead >> Acceleration::TestMaxNumOfSlowDisksPut4Plus2Block1Slow [GOOD] >> Acceleration::TestMaxNumOfSlowDisksPutMirror3dc2Slow >> DataShardReadIterator::ShouldStopWhenNodeDisconnected [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared-Volatile-BreakLocks >> DataShardReadIterator::ShouldReverseReadMultipleKeysOneByOne [GOOD] >> DataShardReadIterator::ShouldReverseReadMultipleRanges >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests10000Inflight1BlobSize1000 [GOOD] >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests2Inflight2BlobSize1000 >> DataShardReadIteratorSysTables::ShouldForbidSchemaVersion [GOOD] >> DataShardReadIteratorSysTables::ShouldNotAllowArrow >> DataShardReadIterator::ShouldReadKeyArrow [GOOD] >> DataShardReadIterator::ShouldReadKeyOnlyValueColumn >> YdbProxy::ListDirectory >> TTopicReaderTests::TestRun_ReadOneMessage [GOOD] >> TTopicReaderTests::TestRun_ReadTwoMessages_With_Limit_1 >> Acceleration::TestMaxNumOfSlowDisksPutMirror3dc2Slow [GOOD] >> Acceleration::TestMaxNumOfSlowDisksPut4Plus2Block2Slow >> DataShardReadIterator::ShouldReadRangeCellVec [GOOD] >> DataShardReadIterator::ShouldReadRangeArrow >> DataShardVolatile::UpsertBrokenLockArbiterRestart+UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiterRestart-UseSink >> DataShardReadIterator::ShouldRangeReadReverseLeftNonInclusive [GOOD] >> DataShardReadIterator::ShouldNotReadAfterCancel >> TopicAutoscaling::ControlPlane_PauseAutoPartitioning [GOOD] >> TopicAutoscaling::ControlPlane_CDC_Enable >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10000Inflight1BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests2Inflight2BlobSize1000 >> DataShardReadIteratorConsistency::LocalSnapshotReadHasRequiredDependencies [GOOD] >> DataShardReadIteratorConsistency::LocalSnapshotReadNoUnnecessaryDependencies >> YdbProxy::ListDirectory [GOOD] >> YdbProxy::DropTopic >> TopicAutoscaling::ReadingAfterSplitTest_PQv1 [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_AutoscaleAwareSDK >> DataShardVolatile::DistributedWriteRSNotAckedBeforeCommit [GOOD] >> DataShardVolatile::DistributedUpsertRestartBeforePrepare+UseSink >> DataShardReadIterator::ShouldHandleReadAckWhenExhaustedRangeRead [GOOD] >> DataShardReadIterator::ShouldHandleReadAckWhenExhaustedRangeReadReverse >> DataShardReadIterator::ShouldReadRangeReverse [GOOD] >> DataShardReadIterator::ShouldReadRangeInclusiveEndsMissingLeftRight >> YdbProxy::DropTopic [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange-EvWrite >> DataShardReadIterator::TryCommitLocksPrepared-Volatile-BreakLocks [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared+Volatile-BreakLocks >> DataShardReadIterator::ShouldReverseReadMultipleRanges [GOOD] >> DataShardReadIterator::ShouldReverseReadMultipleRangesOneByOneWithAcks >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests2Inflight2BlobSize1000 [GOOD] >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests10Inflight10BlobSize1000 >> TxUsage::WriteToTopic_Demo_14_Table [GOOD] >> DataShardReadIteratorSysTables::ShouldNotAllowArrow [GOOD] >> ReadIteratorExternalBlobs::ExtBlobs >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests2Inflight2BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10Inflight10BlobSize1000 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DropTopic [GOOD] Test command err: 2025-06-30T09:19:38.055985Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669617934527819:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:38.056012Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cf5/r3tmp/tmpx48XuB/pdisk_1.dat 2025-06-30T09:19:38.123470Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:9270 TServer::EnableGrpc on GrpcPort 25751, node 1 2025-06-30T09:19:38.161056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:38.161076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:38.161079Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:38.161146Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9270 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:19:38.201212Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:38.201250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:38.202509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:38.212526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:38.651237Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669620083213189:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:38.651263Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cf5/r3tmp/tmpyHW6Qo/pdisk_1.dat 2025-06-30T09:19:38.663678Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:25365 TServer::EnableGrpc on GrpcPort 22863, node 2 2025-06-30T09:19:38.686313Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:38.686325Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:38.686328Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:38.686373Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25365 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:38.751730Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:38.751767Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:38.752803Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:38.755154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:38.826378Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-30T09:19:38.830327Z node 2 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-30T09:19:38.830463Z node 2 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-30T09:19:38.831675Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669620083213897:2392] txid# 281474976715660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } >> TxUsage::WriteToTopic_Demo_14_Query >> DataShardReadIterator::ShouldReadRangeArrow [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsKeysRequestCellVec >> DataShardReadIterator::ShouldReadKeyOnlyValueColumn [GOOD] >> DataShardReadIterator::ShouldReadKeyValueColumnAndSomeKeyColumn >> DataShardVolatile::DistributedUpsertRestartBeforePrepare+UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartBeforePrepare-UseSink >> DataShardReadIterator::ShouldNotReadAfterCancel [GOOD] >> DataShardReadIterator::ShouldLimitReadRangeChunk1Limit100 >> DataShardReadIteratorConsistency::LocalSnapshotReadNoUnnecessaryDependencies [GOOD] >> DataShardReadIteratorConsistency::LocalSnapshotReadWithConcurrentWrites >> Acceleration::TestMaxNumOfSlowDisksPut4Plus2Block2Slow [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiterRestart-UseSink [GOOD] >> DataShardVolatile::UpsertDependenciesShardsRestart+UseSink >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_AutoscaleAwareSDK >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10Inflight10BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests100Inflight10BlobSize1000 >> YdbProxy::ReadTopic >> DataShardReadIterator::ShouldHandleReadAckWhenExhaustedRangeReadReverse [GOOD] >> DataShardReadIterator::ShouldForbidDuplicatedReadId >> DataShardReadIterator::ShouldReadRangeInclusiveEndsMissingLeftRight [GOOD] >> DataShardReadIterator::ShouldReadRangeNonInclusiveEnds ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> Acceleration::TestMaxNumOfSlowDisksPut4Plus2Block2Slow [GOOD] Test command err: RandomSeed# 1535203256227422197 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 32 } Cost# 1376 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 33 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 32 } Cost# 1376 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 33 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 35 } Cost# 1376 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 36 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 35 } Cost# 1376 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 36 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 32 } Cost# 1376 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 33 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 32 } Cost# 1376 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 33 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 35 } Cost# 1376 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 36 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 35 } Cost# 1376 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 36 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 75 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 76 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 78 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 79 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 76 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 77 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 77 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 78 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 78 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 79 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 76 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 77 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 77 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 78 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 75 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 76 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 78 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 79 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 76 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 77 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 77 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 78 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 75 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 76 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 76 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 77 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 77 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 78 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 75 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 76 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 78 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog ... {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 75 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 76 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 78 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 79 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 76 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 77 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 77 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 78 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 75 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 76 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 76 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 77 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 77 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 78 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 75 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 76 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 78 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 79 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:47.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 77 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 78 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 32 } Cost# 1376 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 33 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 32 } Cost# 1376 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 33 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 78 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 79 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 76 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 77 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 78 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 79 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 76 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 77 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 75 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 76 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 76 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 77 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 75 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 76 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 74 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 75 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 75 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 76 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 73 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 74 }}}} 1970-01-01T00:02:46.060512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:1:1024:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 78 } Cost# 644 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 79 }}}} >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests10Inflight10BlobSize1000 [GOOD] >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests100Inflight10BlobSize1000 >> DataShardReadIterator::ShouldReverseReadMultipleRangesOneByOneWithAcks [GOOD] >> DataShardReadIterator::ShouldReturnMvccSnapshotFromFuture >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Table [GOOD] >> DataShardVolatile::DistributedUpsertRestartBeforePrepare-UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPrepare+UseSink >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Query >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests100Inflight10BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10000Inflight1000BlobSize1000 >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips+EvWrite >> DataShardReadIterator::TryCommitLocksPrepared+Volatile-BreakLocks [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared-Volatile+BreakLocks >> DataShardReadIterator::ShouldLimitReadRangeChunk1Limit100 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit98 >> DataShardReadIterator::ShouldReadNoColumnsKeysRequestCellVec [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsKeysRequestArrow >> Viewer::JsonStorageListingV1NodeIdFilter [GOOD] >> Viewer::JsonStorageListingV1PDiskIdFilter |80.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |80.1%| [LD] {RESULT} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut >> YdbProxy::RemoveDirectory >> DataShardReadIterator::ShouldReadKeyValueColumnAndSomeKeyColumn [GOOD] >> DataShardReadIterator::ShouldReadMultipleKeys >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Query [GOOD] >> DataShardReadIteratorConsistency::LocalSnapshotReadWithConcurrentWrites [GOOD] >> DataShardReadIteratorConsistency::Bug_7674_IteratorDuplicateRows >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Table >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10000Inflight1000BlobSize1000 [GOOD] >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests100Inflight10BlobSize1000 [GOOD] >> DataShardVolatile::UpsertDependenciesShardsRestart+UseSink [GOOD] >> DataShardVolatile::UpsertDependenciesShardsRestart-UseSink >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests10000Inflight1000BlobSize1000 >> YdbProxy::RemoveDirectory [GOOD] >> YdbProxy::StaticCreds >> DataShardReadIterator::ShouldReadRangeNonInclusiveEnds [GOOD] >> DataShardReadIterator::ShouldReadRangeLeftInclusive >> DataShardReadIterator::ShouldForbidDuplicatedReadId [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1000 >> DataShardVolatile::DistributedUpsertRestartAfterPrepare+UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPrepare-UseSink >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Table [GOOD] >> YdbProxy::ReadTopic [GOOD] >> YdbProxy::ReadNonExistentTopic ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10000Inflight1000BlobSize1000 [GOOD] Test command err: RandomSeed# 7087206611599721123 2025-06-30T09:19:24.327601Z 1 00h00m00.010512s :BS_LOCALRECOVERY CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# false EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {[SyncerState 12][HugeBlobEntryPoint 1]} ReadLogReplies# {}} reason# Entry point for Syncer check failed, ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 6 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 6 } } status# ERROR;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-30T09:19:24.331172Z 1 00h00m30.000512s :BS_PROXY_PUT ERROR: [21e97dcb13349628] Result# TEvPutResult {Id# [1:1:1:1:3:4:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:1:1:3:4:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "BS_QUEUE: VDISK_ERROR_STATE status in response", ] } ] Part situations# [ { OrderNumber# 0 Situations# E } ] " ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 2025-06-30T09:19:24.470886Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.472256Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.473408Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.474516Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.475681Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.476722Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.477721Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: ... 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.576325Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.577337Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.578373Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.579458Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.580496Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.581499Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.582518Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.584130Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-30T09:19:24.585739Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } >> TPQTest::TestSourceIdDropBySourceIdCount [GOOD] >> TPQTest::TestStorageRetention >> TxUsage::Sinks_Oltp_WriteToTopic_2_Table [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartNo_Table >> YdbProxy::StaticCreds [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_2_Query >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit98 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit99 >> YdbProxy::ReadNonExistentTopic [GOOD] >> TAsyncIndexTests::SplitBothWithReboots[PipeResets] [GOOD] >> TTxDataShardMiniKQL::WriteAndReadMany [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::StaticCreds [GOOD] Test command err: 2025-06-30T09:19:41.959362Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669632114178398:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:41.960793Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002ced/r3tmp/tmpWgHAbD/pdisk_1.dat 2025-06-30T09:19:42.013623Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:29318 TServer::EnableGrpc on GrpcPort 22576, node 1 2025-06-30T09:19:42.039403Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:42.039420Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:42.039422Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:42.039474Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29318 2025-06-30T09:19:42.061108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:42.061139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:42.062237Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:42.092891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:42.105849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) 2025-06-30T09:19:42.113032Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669636409146284:2319] txid# 281474976715660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-30T09:19:42.399028Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669633505809440:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:42.399071Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002ced/r3tmp/tmp5XW6dQ/pdisk_1.dat 2025-06-30T09:19:42.412109Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:25564 TServer::EnableGrpc on GrpcPort 29223, node 2 2025-06-30T09:19:42.446577Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:42.446589Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:42.446590Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:42.446629Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25564 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:42.499516Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:42.499561Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:42.500621Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:42.502972Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:42.507561Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275182552 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user1" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 1 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 ... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275182552 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user1" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 ... (TRUNCATED) |80.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes >> DataShardReadIterator::ShouldReturnMvccSnapshotFromFuture [GOOD] >> DataShardReadIterator::ShouldRollbackLocksWhenWrite >> DataShardReadIteratorConsistency::Bug_7674_IteratorDuplicateRows [GOOD] >> DataShardReadIteratorConsistency::LeaseConfirmationNotOutOfOrder >> DataShardReadIterator::ShouldReadNoColumnsKeysRequestArrow [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsRangeRequestCellVec >> TPersQueueMirrorer::ValidStartStream [GOOD] |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |80.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix+EvWrite ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::ReadNonExistentTopic [GOOD] Test command err: 2025-06-30T09:19:40.539093Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669625650044409:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:40.539155Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cf1/r3tmp/tmpqDxcSK/pdisk_1.dat 2025-06-30T09:19:40.609419Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:18651 TServer::EnableGrpc on GrpcPort 25599, node 1 2025-06-30T09:19:40.640893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:40.640923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:40.642134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:40.649585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:40.649599Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:40.649601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:40.649643Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18651 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:40.713151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:41.541241Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:41.543446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:41.608069Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669629945012558:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:41.608086Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669629945012550:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:41.608102Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:41.608778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:41.610423Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669629945012564:2329], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-30T09:19:41.672931Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669629945012604:2461] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:41.808228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:41.882605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:41.970582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:19:42.053392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:19:42.138861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:19:42.643228Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669636039944235:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:42.643289Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cf1/r3tmp/tmp2ELPPm/pdisk_1.dat 2025-06-30T09:19:42.658620Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:64404 TServer::EnableGrpc on GrpcPort 30797, node 2 2025-06-30T09:19:42.689710Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:42.689723Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:42.689726Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:42.689789Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64404 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:42.743559Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:42.743594Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:42.744704Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:42.747559Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... >> BasicUsage::ConflictingWrites [GOOD] >> DataShardReadIterator::ShouldReadMultipleKeys [GOOD] >> DataShardReadIterator::ShouldReadMultipleKeysOneByOne >> Describe::LocationWithKillTablets >> DataShardReadIterator::TryCommitLocksPrepared-Volatile+BreakLocks [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared+Volatile+BreakLocks >> YdbProxy::DropTable >> DataShardVolatile::DistributedUpsertRestartAfterPrepare-UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPlan ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::WriteAndReadMany [GOOD] Test command err: 2025-06-30T09:17:55.714058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:55.714089Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:55.714621Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:17:55.718534Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:17:55.718690Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:136:2157] 2025-06-30T09:17:55.718801Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:17:55.730553Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:17:55.732998Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:17:55.733035Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:17:55.733224Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-30T09:17:55.733235Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-30T09:17:55.733244Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-30T09:17:55.733315Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:17:55.733333Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:17:55.733347Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:205:2157] in generation 2 2025-06-30T09:17:55.762460Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:17:55.767808Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-30T09:17:55.767902Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:17:55.767923Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:220:2216] 2025-06-30T09:17:55.767928Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-30T09:17:55.767932Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-30T09:17:55.767936Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:17:55.767978Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:55.767985Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:55.768046Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-30T09:17:55.768066Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-30T09:17:55.768090Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:17:55.768095Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:17:55.768102Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-30T09:17:55.768106Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-30T09:17:55.768112Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-30T09:17:55.768116Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-30T09:17:55.768120Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:17:55.768129Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:216:2213], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:55.768133Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:55.768143Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:214:2212], serverId# [1:216:2213], sessionId# [0:0:0] 2025-06-30T09:17:55.768534Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:136:2157]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-30T09:17:55.768543Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:17:55.768552Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:17:55.768580Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-30T09:17:55.768587Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-30T09:17:55.768596Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-30T09:17:55.768613Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-30T09:17:55.768616Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-30T09:17:55.768620Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-30T09:17:55.768624Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:17:55.768674Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-30T09:17:55.768678Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-30T09:17:55.768680Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-30T09:17:55.768683Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:17:55.768690Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-30T09:17:55.768693Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-30T09:17:55.768696Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-30T09:17:55.768698Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-30T09:17:55.768702Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-30T09:17:55.779693Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-30T09:17:55.779726Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:17:55.779735Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:17:55.779765Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-30T09:17:55.779783Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-30T09:17:55.779921Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:226:2222], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:55.779932Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:17:55.779942Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:225:2221], serverId# [1:226:2222], sessionId# [0:0:0] 2025-06-30T09:17:55.779966Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:136:2157]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-30T09:17:55.779971Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-30T09:17:55.780012Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-30T09:17:55.780021Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-30T09:17:55.780027Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-30T09:17:55.780034Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-30T09:17:55.780932Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-30T09:17:55.780956Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:17:55.781034Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:55.781042Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:17:55.781053Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:17:55.781062Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:17:55.781068Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-30T09:17:55.781077Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-30T09:17:55.781082Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 02?\022\002\205\000\034MyReads MyWrites\205\004\205\002?\022\002\206\202\024Reply\024Write?\030\205\002\206\203\010\002 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\024)\211\026?\022\203\005\004\200\205\006\203\004\203\004\203\004\006\n\016\213\004\203\004\207\203\001H\213\002\203\004\203\004\203\010\203\010\203\004\206\203\014\203\014,SelectRange\000\003?* h\020\000\000\000\000\000\000\016\000\000\000\000\000\000\000?\014\005?2\003?,D\003?.F\003?0p\007\013?:\003?4\000\'?8\003\013?>\003?<\003j\030\001\003?@\000\003?B\000\003?D\007\240%&\003?F\000\006\004?J\003\203\014\000\003\203\014\000\003\003?L\000\377\007\002\000\005?\032\005?\026?x\000\005?\030\003\005? \005?\034?x\000\006 2025-06-30T09:19:40.091991Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:19:40.092105Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:19:40.092318Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit CheckDataTx 2025-06-30T09:19:40.093585Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-30T09:19:40.093598Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit CheckDataTx 2025-06-30T09:19:40.093603Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-30T09:19:40.093607Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit BuildAndWaitDependencies 2025-06-30T09:19:40.093620Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-30T09:19:40.093635Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:11] at 9437184 2025-06-30T09:19:40.093640Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-30T09:19:40.093643Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-30T09:19:40.093646Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit ExecuteDataTx 2025-06-30T09:19:40.093650Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-30T09:19:40.096443Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-30T09:19:40.096502Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-30T09:19:40.096513Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-30T09:19:40.108931Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:19:40.108973Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-30T09:19:40.109208Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-30T09:19:40.110006Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-30T09:19:40.110054Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-30T09:19:40.110066Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-30T09:19:40.147014Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:19:40.147052Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-30T09:19:40.147276Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-30T09:19:40.169980Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:11] at 9437184 exceeded memory limit 4194304 and requests 33554432 more for the next try 2025-06-30T09:19:40.170067Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-30T09:19:40.170081Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-30T09:19:40.170231Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:19:40.170241Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-30T09:19:40.170420Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-30T09:19:40.445173Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-30T09:19:40.445350Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-30T09:19:40.445370Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-30T09:19:40.487677Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:19:40.487719Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-30T09:19:40.487949Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-30T09:19:40.842566Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:11] at 9437184 exceeded memory limit 37748736 and requests 301989888 more for the next try 2025-06-30T09:19:40.842765Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-30T09:19:40.842782Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-30T09:19:40.858169Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:19:40.858206Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-30T09:19:40.858417Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-30T09:19:40.860636Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-30T09:19:40.860699Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-30T09:19:40.860712Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-30T09:19:40.865646Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:19:40.865673Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-30T09:19:40.865872Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-30T09:19:40.866508Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-30T09:19:40.866542Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-30T09:19:40.866551Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-30T09:19:40.871823Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:19:40.871849Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-30T09:19:40.872041Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-30T09:19:40.873348Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-30T09:19:40.873391Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-30T09:19:40.873402Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-30T09:19:40.941338Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:19:40.941370Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-30T09:19:40.941550Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-30T09:19:41.730270Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:11] at tablet 9437184 with status COMPLETE 2025-06-30T09:19:41.730314Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:11] at 9437184: {NSelectRow: 0, NSelectRange: 1, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 129871, SelectRangeBytes: 40000268, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-30T09:19:41.730336Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-30T09:19:41.730343Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit ExecuteDataTx 2025-06-30T09:19:41.730349Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit FinishPropose 2025-06-30T09:19:41.730355Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit FinishPropose 2025-06-30T09:19:41.730364Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 11 at tablet 9437184 send to client, exec latency: 62 ms, propose latency: 62 ms, status: COMPLETE 2025-06-30T09:19:41.730391Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is DelayComplete 2025-06-30T09:19:41.730395Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit FinishPropose 2025-06-30T09:19:41.730398Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit CompletedOperations 2025-06-30T09:19:41.730401Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit CompletedOperations 2025-06-30T09:19:41.730413Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-30T09:19:41.730415Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit CompletedOperations 2025-06-30T09:19:41.730418Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:11] at 9437184 has finished 2025-06-30T09:19:41.740477Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-30T09:19:41.740508Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:11] at 9437184 on unit FinishPropose 2025-06-30T09:19:41.740524Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1000 [GOOD] >> DataShardReadIterator::ShouldFailUknownColumns >> DataShardVolatile::UpsertDependenciesShardsRestart-UseSink [GOOD] >> DataShardVolatile::NotCachingAbortingDeletes+UseSink >> DataShardReadIterator::ShouldReadRangeLeftInclusive [GOOD] >> DataShardReadIterator::ShouldReadRangeRightInclusive >> PartitionEndWatcher::EmptyPartition [GOOD] >> PartitionEndWatcher::AfterCommit [GOOD] >> YdbProxy::AlterTable >> TopicAutoscaling::ControlPlane_CDC_Enable [GOOD] >> TopicAutoscaling::MidOfRange [GOOD] >> YdbProxy::DropTable [GOOD] >> YdbProxy::DescribeTopic ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TPersQueueMirrorer::ValidStartStream [GOOD] Test command err: 2025-06-30T09:18:52.859122Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669421581003483:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:52.860197Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:52.926311Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004040/r3tmp/tmpvyLRRN/pdisk_1.dat 2025-06-30T09:18:52.976530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:52.976564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:52.980760Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:52.990571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7679, node 1 2025-06-30T09:18:53.014970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004040/r3tmp/yandexvVs80E.tmp 2025-06-30T09:18:53.014987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004040/r3tmp/yandexvVs80E.tmp 2025-06-30T09:18:53.015072Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004040/r3tmp/yandexvVs80E.tmp 2025-06-30T09:18:53.015119Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:53.028854Z INFO: TTestServer started on Port 22055 GrpcPort 7679 TClient is connected to server localhost:22055 PQClient connected to localhost:7679 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:53.195170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:53.207232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:53.210803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-30T09:18:53.212394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:18:53.269489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:53.284249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-30T09:18:53.525058Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669425875971486:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.525095Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.525253Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669425875971499:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.526315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:53.528923Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669425875971501:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-30T09:18:53.578269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.588120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.602918Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669425875971717:2529] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:53.609626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669425875971871:2612] 2025-06-30T09:18:53.859851Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:57.861570Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669421581003483:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:57.862290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:18:59.087341Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-30T09:18:59.099906Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:18:59.101922Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521669451645775840:2685], Recipient [1:7521669421581003775:2166]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:59.101953Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:59.101957Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:18:59.101968Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521669451645775836:2682], Recipient [1:7521669421581003775:2166]: {TEvModifySchemeTransaction txid# 281474976710674 TabletId# 72057594046644480} 2025-06-30T09:18:59.101971Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:18:59.112636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "back-compatibility-test" TotalGroupCount: 3 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } } } } TxId: 281474976710674 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:18:59.112766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_pq.cpp:307: TCreatePQ Propose, path: /Root/back-compatibility-test, opId: 281474976710674:0, at schemeshard: 72057594046644480 2025-06-30T09:18:59.112864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to th ... node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:958: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 5 endOffset 10 2025-06-30T09:19:42.945166Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 5, endOffset# 10, WTime# 1751275182819, sizeLag# 1179 2025-06-30T09:19:42.945173Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1TEvPartitionReady. Aval parts: 1 2025-06-30T09:19:42.945182Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 performing read request: guid# cc5ab4b-8a29406c-d4a2ea7d-1302c69b, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 6, size# 1414, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-30T09:19:42.945210Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 6 maxSize 1414 maxTimeLagMs 0 readTimestampMs 0 readOffset 5 EndOffset 10 ClientCommitOffset 2 committedOffset 2 Guid cc5ab4b-8a29406c-d4a2ea7d-1302c69b 2025-06-30T09:19:42.945346Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-30T09:19:42.945360Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 0 2025-06-30T09:19:42.945396Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 5 Topic 'rt3.dc1--topic1' partition 0 user user offset 5 count 6 size 1414 endOffset 10 max time lag 0ms effective offset 5 2025-06-30T09:19:42.945443Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 5 added 2 blobs, size 670 count 5 last offset 6, current partition end offset: 10 2025-06-30T09:19:42.945454Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 5. Send blob request. 2025-06-30T09:19:42.945484Z node 8 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 5 partno 0 count 1 parts_count 0 source 1 size 161 accessed 0 times before, last time 2025-06-30T09:19:42.000000Z 2025-06-30T09:19:42.945493Z node 8 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 6 partno 0 count 4 parts_count 0 source 1 size 509 accessed 0 times before, last time 2025-06-30T09:19:42.000000Z 2025-06-30T09:19:42.945503Z node 8 :PERSQUEUE DEBUG: read.h:121: Reading cookie 5. All 2 blobs are from cache. 2025-06-30T09:19:42.945522Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 2 blobs 2025-06-30T09:19:42.945531Z node 8 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 5 partno 0 count 1 parts 0 suffix '63' 2025-06-30T09:19:42.945536Z node 8 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 6 partno 0 count 4 parts 0 suffix '63' 2025-06-30T09:19:42.945544Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 5 totakecount 1 count 1 size 141 from pos 0 cbcount 1 2025-06-30T09:19:42.945574Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 6 totakecount 4 count 4 size 489 from pos 0 cbcount 4 2025-06-30T09:19:42.945611Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--topic1' partition: 0 messageNo: 0 requestId: cookie: 5 2025-06-30T09:19:42.945807Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 10 Result { Offset: 5 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 6 WriteTimestampMS: 1751275182840 CreateTimestampMS: 1751275182840 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 6 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 7 WriteTimestampMS: 1751275182841 CreateTimestampMS: 1751275182840 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 7 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 8 WriteTimestampMS: 1751275182841 CreateTimestampMS: 1751275182840 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 8 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 9 WriteTimestampMS: 1751275182841 CreateTimestampMS: 1751275182840 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 9 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 10 WriteTimestampMS: 1751275182841 CreateTimestampMS: 1751275182840 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 2 SizeLag: 18446744073709551233 RealReadOffset: 9 WaitQuotaTimeMs: 0 EndOffset: 10 StartOffset: 0 } Cookie: 5 } 2025-06-30T09:19:42.945858Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 10 2025-06-30T09:19:42.945873Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 10 ReadOffset 10 ReadGuid cc5ab4b-8a29406c-d4a2ea7d-1302c69b has messages 1 2025-06-30T09:19:42.945940Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 read done: guid# cc5ab4b-8a29406c-d4a2ea7d-1302c69b, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 558 2025-06-30T09:19:42.945963Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 response to read: guid# cc5ab4b-8a29406c-d4a2ea7d-1302c69b 2025-06-30T09:19:42.946075Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 Process answer. Aval parts: 0 2025-06-30T09:19:42.946190Z :DEBUG: [] [] [ea0ce5c-8a09812f-23b3ebf6-ef96e6e8] [] Got ReadResponse, serverBytesSize = 558, now ReadSizeBudget = 0, ReadSizeServerDelta = 52428242 2025-06-30T09:19:42.946223Z :DEBUG: [] [] [ea0ce5c-8a09812f-23b3ebf6-ef96e6e8] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52428242 2025-06-30T09:19:42.946314Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (5-9) 2025-06-30T09:19:42.946329Z :DEBUG: [] [] [ea0ce5c-8a09812f-23b3ebf6-ef96e6e8] [] Returning serverBytesSize = 558 to budget 2025-06-30T09:19:42.946334Z :DEBUG: [] [] [ea0ce5c-8a09812f-23b3ebf6-ef96e6e8] [] In ContinueReadingDataImpl, ReadSizeBudget = 558, ReadSizeServerDelta = 52428242 2025-06-30T09:19:42.946419Z :DEBUG: [] [] [ea0ce5c-8a09812f-23b3ebf6-ef96e6e8] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-30T09:19:42.946486Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (5-5) 2025-06-30T09:19:42.946498Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (6-6) 2025-06-30T09:19:42.946503Z :DEBUG: [] Take Data. Partition 0. Read: {1, 1} (7-7) 2025-06-30T09:19:42.946508Z :DEBUG: [] Take Data. Partition 0. Read: {1, 2} (8-8) 2025-06-30T09:19:42.946518Z :DEBUG: [] Take Data. Partition 0. Read: {1, 3} (9-9) 2025-06-30T09:19:42.946493Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 grpc read done: success# 1, data# { read_request { bytes_size: 558 } } 2025-06-30T09:19:42.946529Z :DEBUG: [] [] [ea0ce5c-8a09812f-23b3ebf6-ef96e6e8] [] The application data is transferred to the client. Number of messages 5, size 115 bytes 2025-06-30T09:19:42.946537Z :DEBUG: [] [] [ea0ce5c-8a09812f-23b3ebf6-ef96e6e8] [] Returning serverBytesSize = 0 to budget 2025-06-30T09:19:42.946539Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 got read request: guid# 9527ba55-3b6b9a92-987ec374-60d394c5 2025-06-30T09:19:42.946558Z :INFO: [] [] [ea0ce5c-8a09812f-23b3ebf6-ef96e6e8] Closing read session. Close timeout: 0.000000s 2025-06-30T09:19:42.946565Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:9:2 2025-06-30T09:19:42.946573Z :INFO: [] [] [ea0ce5c-8a09812f-23b3ebf6-ef96e6e8] Counters: { Errors: 0 CurrentSessionLifetimeMs: 5 BytesRead: 115 MessagesRead: 5 BytesReadCompressed: 115 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:19:42.946589Z :NOTICE: [] [] [ea0ce5c-8a09812f-23b3ebf6-ef96e6e8] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:19:42.946595Z :DEBUG: [] [] [ea0ce5c-8a09812f-23b3ebf6-ef96e6e8] [] Abort session to cluster 2025-06-30T09:19:42.946730Z :NOTICE: [] [] [ea0ce5c-8a09812f-23b3ebf6-ef96e6e8] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:19:42.946874Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 grpc read done: success# 0, data# { } 2025-06-30T09:19:42.946883Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 grpc read failed 2025-06-30T09:19:42.946887Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 grpc closed 2025-06-30T09:19:42.946906Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/user session shared/user_7_2_17419447681720985051_v1 is DEAD 2025-06-30T09:19:42.946987Z :DEBUG: [] MessageGroupId [src-id-test] SessionId [src-id-test|202b80a5-733b3724-a14a96b4-74f03021_0] Write session: destroy 2025-06-30T09:19:42.947066Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_7_2_17419447681720985051_v1 2025-06-30T09:19:42.947085Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [7:7521669633633394168:2485] destroyed 2025-06-30T09:19:42.947100Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_7_2_17419447681720985051_v1 2025-06-30T09:19:42.947153Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--topic1] pipe [7:7521669633633394165:2482] disconnected; active server actors: 1 2025-06-30T09:19:42.947168Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--topic1] pipe [7:7521669633633394165:2482] client user disconnected session shared/user_7_2_17419447681720985051_v1 >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit99 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit100 >> TTopicReaderTests::TestRun_ReadTwoMessages_With_Limit_1 [GOOD] >> TTopicReaderTests::TestRun_Read_Less_Messages_Than_Sent >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10000Inflight1000BlobSize1000 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests1Inflight1BlobSize2000000 >> YdbProxy::MakeDirectory |80.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |80.1%| [LD] {RESULT} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat >> YdbProxy::DescribeTopic [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitBothWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:18:38.204144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:38.204169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:38.204173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:38.204177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:38.204181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:38.204184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:38.204191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:38.204203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:38.204290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:38.204358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:38.215645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:18:38.215684Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:38.215835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:18:38.219243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:38.220142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:38.220189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:38.221501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:38.221549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:38.221638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:38.221706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:38.222379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:38.222436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:38.222761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:38.222775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:38.222786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:38.222796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:38.222802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:38.222839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:18:38.224371Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:18:38.247543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:38.247646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.247719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:38.247727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:38.247783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:38.247798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:38.248620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:38.248666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:38.248739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.248750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:38.248756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:38.248763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:38.249220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.249232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:38.249239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:38.249567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.249576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.249583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:38.249591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:38.250437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:38.250928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:38.250976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:38.251214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:38.251247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... ercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\001\000\004\000\000\0002\000\000\000" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:42.850439Z node 46 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409550][46:1018:2806] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-30T09:19:42.850467Z node 46 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409551][46:1019:2806] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-30T09:19:42.850477Z node 46 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][46:965:2806] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409550 } 2025-06-30T09:19:42.850492Z node 46 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][46:965:2806] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-30T09:19:42.850509Z node 46 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409550][46:1018:2806] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1751275182837274 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1751275182837274 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-30T09:19:42.850538Z node 46 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409551][46:1019:2806] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 3 Group: 1751275182837274 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-30T09:19:42.851473Z node 46 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409550][46:1018:2806] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 2 2025-06-30T09:19:42.851506Z node 46 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][46:965:2806] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409550 } 2025-06-30T09:19:42.851609Z node 46 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409551][46:1019:2806] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-30T09:19:42.851624Z node 46 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][46:965:2806] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-30T09:19:43.005274Z node 46 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:19:43.005357Z node 46 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 100us result status StatusSuccess 2025-06-30T09:19:43.005534Z node 46 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\004\000\000\0002\000\000\000\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> YdbProxy::AlterTable [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsRangeRequestCellVec [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsRangeRequestArrow >> YdbProxy::MakeDirectory [GOOD] >> YdbProxy::OAuthToken |80.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_external_blobs/unittest >> DataShardReadIterator::ShouldRollbackLocksWhenWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenWriteInSeparateTransactions+EvWrite ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DescribeTopic [GOOD] Test command err: 2025-06-30T09:19:43.751400Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669637378434818:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:43.751428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cc2/r3tmp/tmpegmgVb/pdisk_1.dat 2025-06-30T09:19:43.804637Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:62763 TServer::EnableGrpc on GrpcPort 61074, node 1 2025-06-30T09:19:43.841233Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:43.841246Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:43.841251Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:43.841301Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:43.853168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:43.853219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:43.854311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62763 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:43.892727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:44.095763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:44.163694Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669641673402815:2376] txid# 281474976715660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-30T09:19:44.165060Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-30T09:19:44.406056Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669641587232954:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:44.406075Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cc2/r3tmp/tmpJtm1fw/pdisk_1.dat 2025-06-30T09:19:44.417938Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:7878 TServer::EnableGrpc on GrpcPort 2387, node 2 2025-06-30T09:19:44.437587Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:44.437599Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:44.437601Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:44.437639Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7878 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:44.506459Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:44.506491Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:44.507581Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:44.509915Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:44.514974Z node 2 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:19:44.578144Z node 2 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::AlterTable [GOOD] Test command err: 2025-06-30T09:19:44.282094Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669643662073068:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:44.282119Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cb5/r3tmp/tmp9fGUBo/pdisk_1.dat 2025-06-30T09:19:44.335922Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:32587 TServer::EnableGrpc on GrpcPort 29937, node 1 2025-06-30T09:19:44.378186Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:44.378201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:44.378204Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:44.378248Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:44.383987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:44.384024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:44.385117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:32587 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:44.417160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:44.662462Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669643662073652:2295] txid# 281474976715658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-30T09:19:44.664626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:44.727509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:19:44.732187Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669643662073768:2374] txid# 281474976715661, issues: { message: "Can\'t drop unknown column: \'extra\'" severity: 1 } >> CommitOffset::Commit_WithoutSession_ToPastParentPartition [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_SameSession >> DataShardReadIterator::ShouldReadMultipleKeysOneByOne [GOOD] >> DataShardReadIterator::ShouldReadKeyPrefix1 >> BlobPatching::PatchBlock42 [GOOD] >> BlobStorageBlockRace::BlocksRacingViaSyncLog >> DataShardVolatile::DistributedUpsertRestartAfterPlan [GOOD] >> DataShardVolatile::CompactedVolatileChangesCommit ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::MidOfRange [GOOD] Test command err: 2025-06-30T09:18:52.886225Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669422352779633:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:52.886422Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004033/r3tmp/tmp5MevCB/pdisk_1.dat 2025-06-30T09:18:52.953312Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:52.969170Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669422352779518:2080] 1751275132882107 != 1751275132882110 2025-06-30T09:18:52.970784Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19451, node 1 2025-06-30T09:18:53.019301Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004033/r3tmp/yandexokmE4X.tmp 2025-06-30T09:18:53.019323Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004033/r3tmp/yandexokmE4X.tmp 2025-06-30T09:18:53.019401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004033/r3tmp/yandexokmE4X.tmp 2025-06-30T09:18:53.019438Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:53.030945Z INFO: TTestServer started on Port 28739 GrpcPort 19451 2025-06-30T09:18:53.042272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:53.042303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:53.051280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28739 PQClient connected to localhost:19451 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:53.203812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:53.213588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:53.227337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:18:53.229408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:18:53.313957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:53.323689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:18:53.623002Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669426647747592:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.623039Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.623127Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669426647747604:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.624141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:53.626299Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669426647747606:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:18:53.682761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.693809Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669426647747737:2466] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:53.711442Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669426647747745:2315], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:53.711664Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZjQ5NzM3Yy0zYWVjYTBhMC1lZGIzN2I0Ny00ZWJlMTJmMQ==, ActorId: [1:7521669426647747574:2296], ActorState: ExecuteState, TraceId: 01jz023pnk4b11rdg8sw15qkh1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:53.712253Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:53.739153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.765175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669426647747962:2609] 2025-06-30T09:18:53.879752Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:57.885270Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669422352779633:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:57.885302Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:18:58.960895Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-30T09:18:58.965779Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:18:58.966284Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521669448122584636:2684], Recipient [1:7521669422352779847:2150]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:58.966296Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:58.966298Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:18:58.966305Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521669448122584632:2681], Recipient [1:7521669422352779847:2150]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-30T09:18:58.966307Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cp ... veTx for txid 281474976715675:1 2025-06-30T09:19:44.127504Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037893] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:19:44.127507Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037893] Try execute txs with state EXECUTED 2025-06-30T09:19:44.127509Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037893] TxId 281474976715675, State EXECUTED 2025-06-30T09:19:44.127511Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037893] TxId 281474976715675 State EXECUTED FrontTxId 281474976715675 2025-06-30T09:19:44.127513Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037893] TPersQueue::SendEvReadSetAckToSenders 2025-06-30T09:19:44.127515Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037893] TxId 281474976715675, NewState WAIT_RS_ACKS 2025-06-30T09:19:44.127517Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037893] TxId 281474976715675 moved from EXECUTED to WAIT_RS_ACKS 2025-06-30T09:19:44.127518Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 4 2025-06-30T09:19:44.127520Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715675] PredicateAcks: 0/0 2025-06-30T09:19:44.127521Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715675:2 2025-06-30T09:19:44.127522Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715675:2 2025-06-30T09:19:44.127522Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037893] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-30T09:19:44.127524Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715675] PredicateAcks: 0/0 2025-06-30T09:19:44.127528Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 15] was 5 2025-06-30T09:19:44.127529Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037893] add an TxId 281474976715675 to the list for deletion 2025-06-30T09:19:44.127529Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976715675, publications: 2, subscribers: 1 2025-06-30T09:19:44.127531Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976715675, [OwnerId: 72057594046644480, LocalPathId: 14], 4 2025-06-30T09:19:44.127532Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037893] TxId 281474976715675, NewState DELETING 2025-06-30T09:19:44.127532Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976715675, [OwnerId: 72057594046644480, LocalPathId: 15], 2 2025-06-30T09:19:44.127535Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037893] delete key for TxId 281474976715675 2025-06-30T09:19:44.127541Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037893] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:19:44.127547Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270794756, Sender [6:7521669645594730202:2434], Recipient [6:7521669645594730202:2434]: NKikimr::TEvKeyValue::TEvCollect 2025-06-30T09:19:44.127574Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270794752, Sender [6:7521669645594730202:2434], Recipient [6:7521669645594730202:2434]: NKikimrClient.TKeyValueRequest Cookie: 5 CmdDeleteRange { Range { From: "tx_00000281474976715675" IncludeFrom: true To: "tx_00000281474976715675" IncludeTo: true } } CmdWrite { Key: "_txinfo" Value: "\020\251\300\212\201\3742\030\233\247\200\200\200\200@(\240\215\0060\251\300\212\201\37428\233\247\200\200\200\200@" StorageChannel: INLINE } 2025-06-30T09:19:44.127596Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270794753, Sender [6:7521669645594730324:2434], Recipient [6:7521669645594730202:2434]: NKikimr::TEvKeyValue::TEvIntermediate 2025-06-30T09:19:44.127630Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:44.127635Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270794760, Sender [6:7521669645594730322:2444], Recipient [6:7521669645594730202:2434]: NKikimr::TEvKeyValue::TEvCompleteGC 2025-06-30T09:19:44.127740Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 274137603, Sender [6:7521669619824925454:2241], Recipient [6:7521669619824925324:2165]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046644480 Generation: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] Version: 4 } 2025-06-30T09:19:44.127748Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5044: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-30T09:19:44.127763Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 14 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715675 2025-06-30T09:19:44.127769Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270795264, Sender [6:7521669645594730202:2434], Recipient [6:7521669645594730202:2434]: NKikimrClient.TResponse Status: 1 Cookie: 5 DeleteRangeResult { Status: 0 } WriteResult { Status: 0 StatusFlags: 1 } 2025-06-30T09:19:44.127770Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5317: HandleHook, processing event TEvKeyValue::TEvResponse 2025-06-30T09:19:44.127772Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037893] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:19:44.127773Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037893] Try execute txs with state DELETING 2025-06-30T09:19:44.127775Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037893] TxId 281474976715675, State DELETING 2025-06-30T09:19:44.127778Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037893] delete TxId 281474976715675 2025-06-30T09:19:44.127784Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 14 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715675 2025-06-30T09:19:44.127787Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046644480, txId: 281474976715675 2025-06-30T09:19:44.127788Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270794756, Sender [6:7521669645594730202:2434], Recipient [6:7521669645594730202:2434]: NKikimr::TEvKeyValue::TEvCollect 2025-06-30T09:19:44.127790Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715675, pathId: [OwnerId: 72057594046644480, LocalPathId: 14], version: 4 2025-06-30T09:19:44.127794Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 14] was 3 2025-06-30T09:19:44.127816Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:19:44.127827Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270794760, Sender [6:7521669645594730327:2445], Recipient [6:7521669645594730202:2434]: NKikimr::TEvKeyValue::TEvCompleteGC 2025-06-30T09:19:44.127848Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 274137603, Sender [6:7521669619824925454:2241], Recipient [6:7521669619824925324:2165]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046644480 Generation: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 15] Version: 2 } 2025-06-30T09:19:44.127849Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5044: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-30T09:19:44.127853Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 15 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715675 2025-06-30T09:19:44.127858Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 15 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715675 2025-06-30T09:19:44.127859Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715675 2025-06-30T09:19:44.127860Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715675, pathId: [OwnerId: 72057594046644480, LocalPathId: 15], version: 2 2025-06-30T09:19:44.127862Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 15] was 4 2025-06-30T09:19:44.127866Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715675, subscribers: 1 2025-06-30T09:19:44.127868Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [6:7521669645594730177:2432] 2025-06-30T09:19:44.127873Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:19:44.127884Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715675 2025-06-30T09:19:44.127885Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:44.127893Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715675 2025-06-30T09:19:44.127894Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:44.127904Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [6:7521669645594730177:2432] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715675 at schemeshard: 72057594046644480 2025-06-30T09:19:44.127974Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [6:7521669645594730184:2736], Recipient [6:7521669619824925324:2165]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:44.127982Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:44.127983Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:19:44.130238Z node 6 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:140: new alter topic request >> DataShardVolatile::NotCachingAbortingDeletes+UseSink [GOOD] >> DataShardVolatile::NotCachingAbortingDeletes-UseSink >> YdbProxy::OAuthToken [GOOD] >> BlobStorageBlockRace::BlocksRacingViaSyncLog [GOOD] >> BlobStorageBlockRace::BlocksRacingViaSyncLog2 >> TxUsage::WriteToTopic_Demo_14_Query [GOOD] |80.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_external_blobs/unittest >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix-EvWrite >> DataShardReadIterator::ShouldFailUknownColumns [GOOD] >> DataShardReadIterator::ShouldFailWrongSchema >> BlobStorageBlockRace::BlocksRacingViaSyncLog2 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests1Inflight1BlobSize2000000 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests10Inflight1BlobSize2000000 >> DataShardReadIterator::TryCommitLocksPrepared+Volatile+BreakLocks [GOOD] >> DataShardReadIterator::TryWriteManyRows+Commit >> TxUsage::WriteToTopic_Demo_16_Table >> DataShardReadIterator::ShouldReadRangeRightInclusive [GOOD] >> DataShardReadIterator::ShouldReadRangeOneByOne ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::OAuthToken [GOOD] Test command err: 2025-06-30T09:19:44.681727Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669642857336721:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:44.681759Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cb3/r3tmp/tmpzftufH/pdisk_1.dat 2025-06-30T09:19:44.726338Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:7602 TServer::EnableGrpc on GrpcPort 18423, node 1 2025-06-30T09:19:44.751771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:44.751790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:44.751793Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:44.751838Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7602 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:19:44.783143Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:44.783178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:44.784369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:44.810318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:45.089585Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669650104831592:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:45.089628Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002cb3/r3tmp/tmpuS9KoG/pdisk_1.dat 2025-06-30T09:19:45.103702Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:1442 TServer::EnableGrpc on GrpcPort 64797, node 2 2025-06-30T09:19:45.136298Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:45.136316Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:45.136319Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:45.136375Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1442 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:45.193234Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:45.193267Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:45.194084Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:45.194356Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... >> ExternalBlobsMultipleChannels::ExtBlobsMultipleColumns >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit100 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit101 >> ReadSessionImplTest::SuccessfulInit [GOOD] >> ReadSessionImplTest::SuccessfulInitAndThenTimeoutCallback [GOOD] >> ReadSessionImplTest::StopsRetryAfterFailedAttempt [GOOD] >> ReadSessionImplTest::StopsRetryAfterTimeout [GOOD] >> ReadSessionImplTest::UnpackBigBatchWithTwoPartitions [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease >> GroupLayoutSanitizer::TestBlock4Plus2 [GOOD] >> GroupLayoutSanitizer::TestMirror3of4 >> ReadSessionImplTest::DecompressRaw [GOOD] >> ReadSessionImplTest::DecompressGzip [GOOD] >> ReadSessionImplTest::DecompressZstd [GOOD] >> ReadSessionImplTest::DecompressRawEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressGzipEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressWithSynchronousExecutor [GOOD] >> ReadSessionImplTest::DataReceivedCallbackReal >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions >> ReadSessionImplTest::ProperlyOrdersDecompressedData [GOOD] >> ReadSessionImplTest::PacksBatches_ExactlyTwoMessagesInBatch [GOOD] >> ReadSessionImplTest::PacksBatches_OneMessageInEveryBatch [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit >> DataShardReadIteratorConsistency::LeaseConfirmationNotOutOfOrder [GOOD] >> DataShardReadIteratorConsistency::BrokenWriteLockBeforeIteration >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] >> DataShardVolatile::CompactedVolatileChangesCommit [GOOD] >> DataShardVolatile::CompactedVolatileChangesAbort >> CostMetricsGetMirror3dc::TestGetMirror3dcRequests10000Inflight1000BlobSize1000 [GOOD] >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests1Inflight1BlobSize1000 >> DataShardReadIterator::ShouldReadNoColumnsRangeRequestArrow [GOOD] >> DataShardReadIterator::ShouldReadNonExistingKey >> DataShardReadIterator::ShouldReadKeyPrefix1 [GOOD] >> DataShardReadIterator::ShouldReadKeyPrefix2 >> DataShardVolatile::NotCachingAbortingDeletes-UseSink [GOOD] >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck >> DataShardReadIterator::ShouldReturnBrokenLockWhenWriteInSeparateTransactions+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenWriteInSeparateTransactions-EvWrite ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> BlobStorageBlockRace::BlocksRacingViaSyncLog2 [GOOD] Test command err: RandomSeed# 7319350617601636495 1970-01-01T00:01:00.110512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:2:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 524289} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} 1970-01-01T00:01:00.110512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:2:4:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 524289} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} 1970-01-01T00:01:00.110512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:2:4:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 524289} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} 1970-01-01T00:01:00.210512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:3:4:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 524289} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} 1970-01-01T00:01:00.210512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:3:4:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 524289} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} 1970-01-01T00:01:01.210512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:3:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 1 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 2 }}}} 1970-01-01T00:01:01.310512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:4:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 1 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 2 }}}} 1970-01-01T00:01:01.310512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:4:4:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 1 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 2 }}}} 1970-01-01T00:01:01.310512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:4:4:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 2 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} 1970-01-01T00:01:01.410512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:5:4:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 2 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} 1970-01-01T00:01:01.410512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:5:4:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} 1970-01-01T00:01:01.610512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:3:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 524289} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} 1970-01-01T00:01:02.310512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:5:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 2 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} 1970-01-01T00:01:02.410512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:6:4:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} 1970-01-01T00:01:02.410512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:6:4:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} 1970-01-01T00:01:03.310512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:6:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 4 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 5 }}}} 1970-01-01T00:01:03.410512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:7:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 524289} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} 1970-01-01T00:01:03.410512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:7:4:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 524289} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} 1970-01-01T00:01:03.410512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:7:4:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 524289} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} 1970-01-01T00:01:03.510512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:8:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 1 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 2 }}}} 1970-01-01T00:01:03.510512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:8:4:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 1 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 2 }}}} 1970-01-01T00:01:03.510512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:8:4:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 1 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 2 }}}} 1970-01-01T00:01:03.610512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:9:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 2 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} 1970-01-01T00:01:03.610512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:9:4:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 2 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} 1970-01-01T00:01:03.610512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:9:4:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 2 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} 1970-01-01T00:01:03.710512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:10:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 1 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 2 }}}} 1970-01-01T00:01:03.710512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:10:4:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 1 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 2 }}}} 1970-01-01T00:01:04.610512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:10:4:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 5 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 6 }}}} 1970-01-01T00:01:04.710512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:11:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} 1970-01-01T00:01:04.710512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:11:4:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} 1970-01-01T00:01:04.710512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:11:4:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} 1970-01-01T00:01:04.810512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:12:4:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 2 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} 1970-01-01T00:01:04.810512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:12:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 6 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 7 }}}} 1970-01-01T00:01:04.810512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:12:4:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 2 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} 1970-01-01T00:01:04.910512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:13:4:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 4 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 5 }}}} 1970-01-01T00:01:04.910512Z Unwrap {EvVPutResult Status# OK ID# [1:1:1:1:13:4:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 4 } Cost# 403 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window ... 0] writtenParts# 6 *** checking blob [1000000000:1:1577:5:10:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1578:5:23:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1579:5:49:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1584:5:6:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1591:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1593:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1595:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1596:5:29:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1599:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1600:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1601:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1602:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1603:5:45:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1604:5:21:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1605:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1607:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1609:5:28:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1611:5:47:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1612:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1613:5:39:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1614:5:15:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1617:5:20:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1624:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1628:5:37:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1629:5:13:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1630:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1633:5:14:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1636:5:12:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1638:5:21:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1640:5:3:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1641:5:26:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1642:5:12:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1643:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1644:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1647:5:49:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1649:5:28:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1650:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1651:5:40:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1653:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1652:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1654:5:35:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1656:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1657:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1659:5:52:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1662:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1661:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1664:5:29:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1666:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1669:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1670:5:29:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1671:5:45:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1672:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1674:5:50:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1675:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1676:5:49:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1680:5:33:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1682:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1683:5:38:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1687:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1690:5:24:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1691:5:30:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1695:5:11:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1696:5:37:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1697:5:40:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1700:5:8:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1701:5:41:16384:0] writtenParts# 7 *** checking blob [1000000000:1:1702:5:47:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1703:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1704:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1707:5:4:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1709:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1716:5:52:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1718:5:4:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1721:5:49:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1722:5:32:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1724:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1727:5:19:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1728:5:45:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1730:5:44:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1731:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1733:5:32:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1734:5:8:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1735:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1736:5:7:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1737:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1740:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1741:5:24:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1742:5:50:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1744:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1746:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1747:5:44:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1748:5:30:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1750:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1752:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1751:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1753:5:17:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1756:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1757:5:48:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1759:5:30:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1761:5:9:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1762:5:5:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1763:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1764:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1768:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1774:5:28:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1776:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1777:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1779:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1778:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1782:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1783:5:46:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1785:5:25:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1786:5:51:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1792:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1793:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1794:5:12:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1800:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1799:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1798:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1801:5:8:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1802:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1803:5:47:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1804:5:13:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1805:5:19:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1806:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1807:5:31:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1808:5:24:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1810:5:46:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1811:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1812:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1816:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1818:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1823:5:35:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1825:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1826:5:30:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1827:5:6:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1829:5:35:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1831:5:17:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1833:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1834:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1836:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1835:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1837:5:47:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1841:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1842:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1845:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1847:5:51:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1848:5:47:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1849:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1851:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1856:5:9:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1857:5:25:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1858:5:38:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1862:5:39:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1863:5:25:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1865:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1866:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1867:5:6:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1868:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1870:5:14:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1871:5:47:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1872:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1874:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1875:5:0:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1877:5:50:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1878:5:16:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1880:5:2:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1882:5:1:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1884:5:6:16384:0] writtenParts# 6 *** checking blob [1000000000:1:1889:5:1:16384:0] writtenParts# 6 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] Test command err: 2025-06-30T09:19:46.168442Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.168456Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.168461Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.168649Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.168888Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-30T09:19:46.168928Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.169312Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.169319Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.169323Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.169425Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.169496Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-30T09:19:46.169507Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.169798Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.169802Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.169806Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.169882Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-30T09:19:46.169906Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.169910Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.169952Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: INTERNAL_ERROR Issues: "
: Error: Failed to establish connection to server "" ( cluster cluster). Attempts done: 1 " } 2025-06-30T09:19:46.170229Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.170234Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.170238Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.170319Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-30T09:19:46.170326Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.170329Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.170336Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: TIMEOUT Issues: "
: Error: Failed to establish connection to server. Attempts done: 1 " } 2025-06-30T09:19:46.170646Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-30T09:19:46.170653Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-30T09:19:46.170656Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.170753Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.170873Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.171992Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-30T09:19:46.172088Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:46.172200Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 2. Cluster: "TestCluster". Topic: "TestTopic". Partition: 2. Read offset: (NULL) 2025-06-30T09:19:46.172825Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-50) 2025-06-30T09:19:46.172888Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.172901Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-30T09:19:46.172907Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-30T09:19:46.172912Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-30T09:19:46.172924Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-30T09:19:46.172929Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-30T09:19:46.172934Z :DEBUG: Take Data. Partition 1. Read: {0, 6} (7-7) 2025-06-30T09:19:46.172939Z :DEBUG: Take Data. Partition 1. Read: {0, 7} (8-8) 2025-06-30T09:19:46.172946Z :DEBUG: Take Data. Partition 1. Read: {0, 8} (9-9) 2025-06-30T09:19:46.172951Z :DEBUG: Take Data. Partition 1. Read: {0, 9} (10-10) 2025-06-30T09:19:46.172955Z :DEBUG: Take Data. Partition 1. Read: {0, 10} (11-11) 2025-06-30T09:19:46.172959Z :DEBUG: Take Data. Partition 1. Read: {0, 11} (12-12) 2025-06-30T09:19:46.172964Z :DEBUG: Take Data. Partition 1. Read: {0, 12} (13-13) 2025-06-30T09:19:46.172968Z :DEBUG: Take Data. Partition 1. Read: {0, 13} (14-14) 2025-06-30T09:19:46.172973Z :DEBUG: Take Data. Partition 1. Read: {0, 14} (15-15) 2025-06-30T09:19:46.172978Z :DEBUG: Take Data. Partition 1. Read: {0, 15} (16-16) 2025-06-30T09:19:46.172996Z :DEBUG: Take Data. Partition 1. Read: {0, 16} (17-17) 2025-06-30T09:19:46.173001Z :DEBUG: Take Data. Partition 1. Read: {0, 17} (18-18) 2025-06-30T09:19:46.173005Z :DEBUG: Take Data. Partition 1. Read: {0, 18} (19-19) 2025-06-30T09:19:46.173010Z :DEBUG: Take Data. Partition 1. Read: {0, 19} (20-20) 2025-06-30T09:19:46.173014Z :DEBUG: Take Data. Partition 1. Read: {0, 20} (21-21) 2025-06-30T09:19:46.173019Z :DEBUG: Take Data. Partition 1. Read: {0, 21} (22-22) 2025-06-30T09:19:46.173024Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (23-23) 2025-06-30T09:19:46.173029Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (24-24) 2025-06-30T09:19:46.173033Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (25-25) 2025-06-30T09:19:46.173038Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (26-26) 2025-06-30T09:19:46.173042Z :DEBUG: Take Data. Partition 1. Read: {1, 4} (27-27) 2025-06-30T09:19:46.173047Z :DEBUG: Take Data. Partition 1. Read: {1, 5} (28-28) 2025-06-30T09:19:46.173052Z :DEBUG: Take Data. Partition 1. Read: {1, 6} (29-29) 2025-06-30T09:19:46.173056Z :DEBUG: Take Data. Partition 1. Read: {1, 7} (30-30) 2025-06-30T09:19:46.173061Z :DEBUG: Take Data. Partition 1. Read: {1, 8} (31-31) 2025-06-30T09:19:46.173066Z :DEBUG: Take Data. Partition 1. Read: {1, 9} (32-32) 2025-06-30T09:19:46.173080Z :DEBUG: Take Data. Partition 1. Read: {1, 10} (33-33) 2025-06-30T09:19:46.173085Z :DEBUG: Take Data. Partition 1. Read: {1, 11} (34-34) 2025-06-30T09:19:46.173089Z :DEBUG: Take Data. Partition 1. Read: {1, 12} (35-35) 2025-06-30T09:19:46.173094Z :DEBUG: Take Data. Partition 1. Read: {1, 13} (36-36) 2025-06-30T09:19:46.173102Z :DEBUG: Take Data. Partition 1. Read: {1, 14} (37-37) 2025-06-30T09:19:46.173107Z :DEBUG: Take Data. Partition 1. Read: {1, 15} (38-38) 2025-06-30T09:19:46.173111Z :DEBUG: Take Data. Partition 1. Read: {1, 16} (39-39) 2025-06-30T09:19:46.173116Z :DEBUG: Take Data. Partition 1. Read: {1, 17} (40-40) 2025-06-30T09:19:46.173120Z :DEBUG: Take Data. Partition 1. Read: {1, 18} (41-41) 2025-06-30T09:19:46.173125Z :DEBUG: Take Data. Partition 1. Read: {1, 19} (42-42) 2025-06-30T09:19:46.173129Z :DEBUG: Take Data. Partition 1. Read: {1, 20} (43-43) 2025-06-30T09:19:46.173134Z :DEBUG: Take Data. Partition 1. Read: {1, 21} (44-44) 2025-06-30T09:19:46.173138Z :DEBUG: Take Data. Partition 1. Read: {1, 22} (45-45) 2025-06-30T09:19:46.173143Z :DEBUG: Take Data. Partition 1. Read: {1, 23} (46-46) 2025-06-30T09:19:46.173147Z :DEBUG: Take Data. Partition 1. Read: {1, 24} (47-47) 2025-06-30T09:19:46.173151Z :DEBUG: Take Data. Partition 1. Read: {1, 25} (48-48) 2025-06-30T09:19:46.173156Z :DEBUG: Take Data. Partition 1. Read: {1, 26} (49-49) 2025-06-30T09:19:46.173160Z :DEBUG: Take Data. Partition 1. Read: {1, 27} (50-50) 2025-06-30T09:19:46.173174Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-30T09:19:46.173266Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 2 (51-100) 2025-06-30T09:19:46.173308Z :DEBUG: Take Data. Partition 2. Read: {0, 0} (51-51) 2025-06-30T09:19:46.173316Z :DEBUG: Take Data. Partition 2. Read: {0, 1} (52-52) 2025-06-30T09:19:46.173322Z :DEBUG: Take Data. Partition 2. Read: {0, 2} (53-53) 2025-06-30T09:19:46.173327Z :DEBUG: Take Data. Partition 2. Read: {0, 3} (54-54) 2025-06-30T09:19:46.173333Z :DEBUG: Take Data. Partition 2. Read: {0, 4} (55-55) 2025-06-30T09:19:46.173337Z :DEBUG: Take Data. Partition 2. Read: {0, 5} (56-56) 2025-06-30T09:19:46.173342Z :DEBUG: Take Data. Partition 2. Read: {0, 6} (57-57) 2025-06-30T09:19:46.173347Z :DEBUG: Take Data. Partition 2. Read: {0, 7} (58-58) 2025-06-30T09:19:46.173354Z :DEBUG: Take Data. Partition 2. Read: {0, 8} (59-59) 2025-06-30T09:19:46.173359Z :DEBUG: Take Data. Partition 2. Read: {0, 9} (60-60) 2025-06-30T09:19:46.173364Z :DEBUG: Take Data. Partition 2. Read: {0, 10} (61-61) 2025-06-30T09:19:46.173369Z :DEBUG: Take Data. Partition 2. Read: {0, 11} (62-62) 2025-06-30T09:19:46.173374Z :DEBUG: Take Data. Partition 2. Read: {0, 12} (63-63) 2025-06-30T09:19:46.173379Z :DEBUG: Take Data. Partition 2. Read: {0, 13} (64-64) 2025-06-30T09:19:46.173384Z :DEBUG: Take Data. Partition 2. Read: {0, 14} (65-65) 2025-06-30T09:19:46.173389Z :DEBUG: Take Data. Partition 2. Read: {0, 15} (66-66) 2025-06-30T09:19:46.173400Z :DEBUG: Take Data. Partition 2. Read: {0, 16} (67-67) 2025-06-30T09:19:46.173405Z :DEBUG: Take Data. Partition 2. Read: {0, 17} (68-68) 2025-06-30T09:19:46.173409Z :DEBUG: Take Data. Partition 2. Read: {0, 18} (69-69) 2025-06-30T09:19:46.173414Z :DEBUG: Take Data. Partition 2. Read: {0, 19} (70-70) 2025-06-30T09:19:46.173418Z :DEBUG: Take Data. Partition 2. Read: {0, 20} (71-71) 2025-06-30T09:19:46.173423Z :DEBUG: Take Data. Partition 2. Read: {0, 21} (72-72) 2025-06-30T09:19:46.173428Z :DEBUG: Take Data. Partition 2. Read: {1, 0} (73-73) 2025-06-30T09:19:46.173433Z :DEBUG: Take Data. Partition 2. Read: {1, 1} (74-74) 2025-06-30T09:19:46.173438Z :DEBUG: Take Data. Partition 2. Read: {1, 2} (75-75) 2025-06-30T09:19:46.173443Z :DEBUG: Take Data. Partition 2. Read: {1, 3} (76-76) 2025-06-30T09:19:46.173447Z :DEBUG: Take Data. Partition 2. Read: {1, 4} (77-77) 2025-06-30T09:19:46.173452Z :DEBUG: Take Data. Partition 2. Read: {1, 5} (78-78) 2025-06-30T09:19:46.173456Z :DEBUG: Take Data. Partition 2. Read: {1, 6} (79-79) 2025-06-30T09:19:46.173461Z :DEBUG: Take Data. Partition 2. Read: {1, 7} (80-80) 2025-06-30T09:19:46.173469Z :DEBUG: Take Data. Partition 2. Read: {1, 8} (81-81) 2025-06-30T09:19:46.173474Z :DEBUG: Take Data. Partition 2. Read: {1, 9} (82-82) 2025-06-30T09:19:46.173487Z :DEBUG: Take Data. Partition 2. Read: {1, 10} (83-83) 2025-06-30T09:19:46.173495Z :DEBUG: Take Data. Partition 2. Read: {1, 11} (84-84) 2025-06-30T09:19:46.173500Z :DEBUG: Take Data. Partition 2. Read: {1, 12} (85-85) 2025-06-30T09:19:46.173504Z :DEBUG: Take Data. Partition 2. Read: {1, 13} (86-86) 2025-06-30T09:19:46.173509Z :DEBUG: Take Data. Partition 2. Read: {1, 14} (87-87) 2025-06-30T09:19:46.173514Z :DEBUG: Take Data. Partition 2. Read: {1, 15} (88-88) 2025-06-30T09:19:46.173519Z :DEBUG: Take Data. Partition 2. Read: {1, 16} (89-89) 2025-06-30T09:19:46.173523Z :DEBUG: Take Data. Partition 2. Read: {1, 17} (90-90) 2025-06-30T09:19:46.173528Z :DEBUG: Take Data. Partition 2. Read: {1, 18} (91-91) 2025-06-30T09:19:46.173532Z :DEBUG: Take Data. Partition 2. Read: {1, 19} (92-92) 2025-06-30T09:19:46.173537Z :DEBUG: Take Data. Partition 2. Read: {1, 20} (93-93) 2025-06-30T09:19:46.173542Z :DEBUG: Take Data. Partition 2. Read: {1, 21} (94-94) 2025-06-30T09:19:46.173546Z :DEBUG: Take Data. Partition 2. Read: {1, 22} (95-95) 2025-06-30T09:19:46.173551Z :DEBUG: Take Data. Partition 2. Read: {1, 23} (96-96) 2025-06-30T09:19:46.173556Z :DEBUG: Take Data. Partition 2. Read: {1, 24} (97-97) 2025-06-30T09:19:46.173561Z :DEBUG: Take Data. Partition 2. Read: {1, 25} (98-98) 2025-06-30T09:19:46.173565Z :DEBUG: Take Data. Partition 2. Read: {1, 26} (99-99) 2025-06-30T09:19:46.173570Z :DEBUG: Take Data. Partition 2. Read: {1, 27} (100-100) 2025-06-30T09:19:46.173578Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-30T09:19:46.173622Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-30T09:19:46.174072Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.174078Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.174083Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.174192Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.174274Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.174327Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.174441Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:46.274718Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.274819Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-30T09:19:46.274849Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.274858Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-30T09:19:46.274899Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-30T09:19:46.475176Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-06-30T09:19:46.575465Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-30T09:19:46.575566Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-30T09:19:46.575656Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-30T09:19:46.576129Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.576134Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.576139Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.576219Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.576326Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.576402Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.576482Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:46.676884Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.676973Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-30T09:19:46.677006Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.677014Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-30T09:19:46.677045Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-06-30T09:19:46.677083Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-30T09:19:46.677130Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-30T09:19:46.677161Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-30T09:19:46.677206Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> AutoConfig::GetASPoolsith1CPU [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests10Inflight1BlobSize2000000 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests100Inflight1BlobSize2000000 >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks [GOOD] >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime [GOOD] >> ReadSessionImplTest::PartitionStreamStatus [GOOD] >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] >> DataShardReadIterator::ShouldFailWrongSchema [GOOD] >> DataShardReadIterator::ShouldFailReadNextAfterSchemeChange |80.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |80.1%| [LD] {RESULT} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |80.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsith1CPU [GOOD] >> CommitOffset::DistributedTxCommit_ChildFirst [GOOD] >> CommitOffset::DistributedTxCommit_CheckSessionResetAfterCommit >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit101 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit198 >> DataShardReadIterator::ShouldReadRangeOneByOne [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix1 >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder+EvWrite ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] Test command err: 2025-06-30T09:19:46.396227Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.396239Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.396242Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.396350Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.396479Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.397656Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.397753Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:46.397998Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:19:46.398059Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:19:46.398133Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-30T09:19:46.398148Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:19:46.398172Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.398178Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-30T09:19:46.398185Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-30T09:19:46.398188Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-30T09:19:46.398516Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.398519Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.398522Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.398574Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.398656Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.398704Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.398732Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-30T09:19:46.398879Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:19:46.398902Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-30T09:19:46.398936Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-30T09:19:46.398949Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-30T09:19:46.398978Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.398984Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-30T09:19:46.398990Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-30T09:19:46.399023Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Getting new event 2025-06-30T09:19:46.399039Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-30T09:19:46.399042Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-30T09:19:46.399044Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-30T09:19:46.399059Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 Getting new event 2025-06-30T09:19:46.399072Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-30T09:19:46.399074Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-30T09:19:46.399076Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-30T09:19:46.399083Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 Getting new event 2025-06-30T09:19:46.399086Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-30T09:19:46.399089Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-30T09:19:46.399091Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-30T09:19:46.399102Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 2025-06-30T09:19:46.399344Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.399347Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.399349Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.399408Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.399480Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.399546Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.399588Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 100 Compressed message data size: 91 2025-06-30T09:19:46.399710Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:19:46.399747Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-30T09:19:46.399775Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-30T09:19:46.399786Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-30T09:19:46.399802Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.399821Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-30T09:19:46.399836Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 2). Partition stream id: 1 Getting new event 2025-06-30T09:19:46.399858Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-30T09:19:46.399861Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-30T09:19:46.399869Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [2, 3). Partition stream id: 1 Getting new event 2025-06-30T09:19:46.399874Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-30T09:19:46.399877Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-30T09:19:46.399886Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 4). Partition stream id: 1 Getting new event 2025-06-30T09:19:46.399891Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-30T09:19:46.399894Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStream ... tream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 190 SeqNo: 231 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 191 SeqNo: 232 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 192 SeqNo: 233 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 193 SeqNo: 234 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 194 SeqNo: 235 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 195 SeqNo: 236 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 196 SeqNo: 237 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 197 SeqNo: 238 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 198 SeqNo: 239 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 199 SeqNo: 240 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 200 SeqNo: 241 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-30T09:19:47.217965Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 201). Partition stream id: 1 2025-06-30T09:19:47.246512Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-30T09:19:47.246519Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-30T09:19:47.246523Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:47.246643Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:47.246793Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:47.246864Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-30T09:19:47.246926Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 1000000 Compressed message data size: 3028 Post function Getting new event 2025-06-30T09:19:47.272313Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-10) 2025-06-30T09:19:47.272586Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:47.272881Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-30T09:19:47.273314Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-30T09:19:47.273456Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-30T09:19:47.274130Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-30T09:19:47.274270Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-30T09:19:47.274410Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (7-7) 2025-06-30T09:19:47.274557Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (8-8) 2025-06-30T09:19:47.275756Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (9-9) 2025-06-30T09:19:47.275895Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (10-10) 2025-06-30T09:19:47.275918Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 10, size 10000000 bytes 2025-06-30T09:19:47.275992Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 9 SeqNo: 50 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 51 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-30T09:19:47.277705Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 11). Partition stream id: 1 2025-06-30T09:19:47.278308Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:47.278314Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:47.278319Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:47.278398Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:47.278523Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:47.278886Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:47.278954Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:47.279044Z :DEBUG: [db] [sessionid] [cluster] Requesting status for partition stream id: 1 2025-06-30T09:19:47.279311Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:47.279314Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:47.279317Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:47.279394Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:47.279544Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:47.279586Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:47.279720Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:47.279758Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:19:47.279781Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:47.279790Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-30T09:19:47.279826Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 >> DataShardReadIteratorConsistency::BrokenWriteLockBeforeIteration [GOOD] >> DataShardReadIteratorConsistency::BrokenWriteLockDuringIteration >> AutoConfig::GetASPoolsWith2CPUs [GOOD] >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests1Inflight1BlobSize1000 [GOOD] >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests10Inflight1BlobSize1000 >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable-UseSimilarity >> DataShardVolatile::CompactedVolatileChangesAbort [GOOD] >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck [GOOD] |80.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |80.1%| [LD] {RESULT} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |80.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith2CPUs [GOOD] >> KqpVectorIndexes::OrderByCosineLevel1+Nullable-UseSimilarity >> TCdcStreamTests::MeteringDedicated [GOOD] >> TCdcStreamTests::ChangeOwner |80.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |80.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |80.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk >> DataShardReadIterator::ShouldReadNonExistingKey [GOOD] >> DataShardReadIterator::ShouldReadNotExistingRange >> DataShardReadIterator::ShouldReadKeyPrefix2 [GOOD] >> DataShardReadIterator::ShouldReadKeyPrefix3 >> ExternalBlobsMultipleChannels::ExtBlobsMultipleColumns [GOOD] |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |80.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk >> TCdcStreamTests::ChangeOwner [GOOD] >> TCdcStreamTests::DropIndexWithStream >> DataShardReadIterator::ShouldReturnBrokenLockWhenWriteInSeparateTransactions-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips-EvWrite >> KqpIndexes::UniqIndexComplexPkComplexFkOverlap ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_volatile/unittest >> DataShardVolatile::CompactedVolatileChangesAbort [GOOD] Test command err: 2025-06-30T09:18:56.190219Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:56.190304Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:56.190322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002df1/r3tmp/tmpERmbZ3/pdisk_1.dat 2025-06-30T09:18:56.299873Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:56.301079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:56.319233Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:56.320506Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275135648562 != 1751275135648566 2025-06-30T09:18:56.363118Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:62:2109] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:18:56.363487Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:18:56.363628Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:56.363652Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:56.374336Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:56.542450Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:62:2109] Handle TEvProposeTransaction 2025-06-30T09:18:56.542482Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:62:2109] TxId# 281474976715657 ProcessProposeTransaction 2025-06-30T09:18:56.542515Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:62:2109] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:605:2513] 2025-06-30T09:18:56.565858Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:605:2513] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-30T09:18:56.565901Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:605:2513] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:18:56.566127Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:605:2513] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:18:56.566144Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:605:2513] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:18:56.566208Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:605:2513] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:18:56.566246Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:605:2513] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 1000 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:18:56.566262Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:605:2513] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-30T09:18:56.566722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:56.566888Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:605:2513] txid# 281474976715657 HANDLE EvClientConnected 2025-06-30T09:18:56.567093Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:605:2513] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-30T09:18:56.567105Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:605:2513] txid# 281474976715657 SEND to# [1:557:2483] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-30T09:18:56.581956Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:18:56.582223Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:18:56.582319Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:18:56.582388Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:56.592487Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:18:56.592723Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:56.592754Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:56.592980Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:56.592992Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:56.592999Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:56.593069Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:56.593100Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:56.593115Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:18:56.603474Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:56.609180Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:56.609280Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:56.609311Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:18:56.609318Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:56.609325Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:56.609331Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:56.609411Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:18:56.609420Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:18:56.609531Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:56.609557Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:56.609579Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:56.609589Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:56.609598Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:18:56.609605Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:18:56.609613Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:18:56.609620Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:56.609626Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:56.609736Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:635:2536], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:56.609744Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:56.609751Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:627:2532], serverId# [1:635:2536], sessionId# [0:0:0] 2025-06-30T09:18:56.609761Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:635:2536] 2025-06-30T09:18:56.609766Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:18:56.609786Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:56.609837Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:18:56.609848Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:56.609868Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:56.609885Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09 ... ARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:47.806889Z node 26 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [26:932:2738], serverId# [26:933:2739], sessionId# [0:0:0] 2025-06-30T09:19:47.806908Z node 26 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553210, Sender [26:931:2737], Recipient [26:661:2549]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 2 } CompactBorrowed: false 2025-06-30T09:19:47.806917Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} queued, type NKikimr::NDataShard::TDataShard::TTxCompactTable 2025-06-30T09:19:47.806920Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:19:47.806931Z node 26 :TABLET_EXECUTOR DEBUG: TCompactionLogic PrepareForceCompaction for 72075186224037888 table 1001, mode Full, forced state None, forced mode Full 2025-06-30T09:19:47.806941Z node 26 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 1 of 72075186224037888 tableId# 2 localTid# 1001, requested from [26:931:2737], partsCount# 0, memtableSize# 656, memtableWaste# 3952, memtableRows# 2 2025-06-30T09:19:47.806948Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} hope 1 -> done Change{16, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-30T09:19:47.806951Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:19:47.806984Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy PrepareCompaction for 72075186224037888: task 1, edge 9223372036854775807/0, generation 0 2025-06-30T09:19:47.806988Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:16} starting compaction 2025-06-30T09:19:47.807009Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} starting Scan{1 on 1001, Compact{72075186224037888.1.16, eph 1}} 2025-06-30T09:19:47.807015Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} started compaction 1 2025-06-30T09:19:47.807017Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy PrepareCompaction for 72075186224037888 started compaction 1 generation 0 ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 12238895482980113683 2025-06-30T09:19:47.807222Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} Compact 1 on TGenCompactionParams{1001: gen 0 epoch +inf, 0 parts} step 16, product {tx status + 1 parts epoch 2} done 2025-06-30T09:19:47.807243Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CompactionFinished for 72075186224037888: compaction 1, generation 0 2025-06-30T09:19:47.807253Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CheckGeneration for 72075186224037888 generation 1, state Free, final id 0, final level 0 2025-06-30T09:19:47.807255Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CheckGeneration for 72075186224037888 generation 3, state Free, final id 0, final level 0 2025-06-30T09:19:47.807283Z node 26 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 1, ts 1970-01-01T00:00:01.504317Z 2025-06-30T09:19:47.807288Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} queued, type NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs 2025-06-30T09:19:47.807291Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:19:47.807296Z node 26 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 1, front# 1 2025-06-30T09:19:47.807301Z node 26 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001 sending TEvCompactTableResult to# [26:931:2737]pathId# [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-30T09:19:47.807345Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} hope 1 -> done Change{17, redo 83b alter 0b annex 0, ~{ 27 } -{ }, 0 gb} 2025-06-30T09:19:47.807350Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} release 4194304b of static, Memory{0 dyn 0} ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 6601355369047869555 ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 14081377201486073883 ========= Starting an immediate read ========= 2025-06-30T09:19:47.819940Z node 26 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz025bjzb128jk2y2dxqtrkv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=26&id=MTM3YzU2OWUtMTI4ZmZlMzYtMzM2N2ZlMDUtNmU1MTc5YmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.820193Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72075186224037888] send [26:864:2683] 2025-06-30T09:19:47.820200Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [26:864:2683] 2025-06-30T09:19:47.820246Z node 26 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [26:957:2745], Recipient [26:661:2549]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false KeysSize: 1 2025-06-30T09:19:47.820270Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2025-06-30T09:19:47.820276Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:19:47.820293Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-30T09:19:47.820300Z node 26 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1505/281474976715662 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-30T09:19:47.820306Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v1505/18446744073709551615 2025-06-30T09:19:47.820314Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CheckRead 2025-06-30T09:19:47.820330Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-30T09:19:47.820336Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:19:47.820341Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:19:47.820346Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:19:47.820359Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037888 2025-06-30T09:19:47.820365Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-30T09:19:47.820368Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:19:47.820373Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:19:47.820377Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:19:47.820392Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-30T09:19:47.820431Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is DelayComplete 2025-06-30T09:19:47.820436Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:19:47.820443Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:19:47.820448Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:19:47.820459Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-30T09:19:47.820463Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:19:47.820468Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-06-30T09:19:47.820473Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-30T09:19:47.820488Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{18, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-30T09:19:47.820497Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:19:47.922978Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-30T09:19:47.923018Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:19:47.923077Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{12, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-30T09:19:47.923088Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:19:47.923255Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:14} commited cookie 1 for step 13 2025-06-30T09:19:47.923309Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [26:507:2447] 2025-06-30T09:19:47.923317Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [26:507:2447] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit198 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit900 >> DataShardReadIterator::ShouldFailReadNextAfterSchemeChange [GOOD] >> DataShardReadIterator::ShouldFailReadNextAfterSchemeChangeExhausted >> TCdcStreamTests::DropIndexWithStream [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix1 [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix2 >> TCdcStreamTests::DropTableWithIndexWithStream ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::ExtBlobsMultipleColumns [GOOD] Test command err: 2025-06-30T09:19:46.354937Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:46.355000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:46.355011Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003166/r3tmp/tmpezm8tR/pdisk_1.dat 2025-06-30T09:19:46.444711Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:19:46.445629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:46.462002Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:46.463102Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275185986646 != 1751275185986650 2025-06-30T09:19:46.505346Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:46.505406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:46.516069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:46.590304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:46.812876Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:701:2582], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:46.812917Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:712:2587], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:46.812934Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:46.814244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:46.857547Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:46.976052Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:715:2590], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:19:47.015383Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:786:2630] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:47.085819Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz025akw11prggmwwvdtwpfx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjRiNGE2MTYtYmNiZmFkZGItN2M0OTk4YmEtOTk5NmVmNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.101254Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz025awhaczrrbdy49p6ewwp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTA4MjEwYjQtMmVmYmJiOTUtNTlhNjg4NTItMjE3NTIzNTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.112508Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz025awzdck2pkqm7nqcddxv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTZlMTFiMzUtODYwZGRiZmYtYzEyM2ZhYzktNjBjODUxZjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.123200Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz025axabeqtzgj3v08v7pdq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjhkODAwOTQtZjM1YzRlMTgtYWMzOGNiMS02NmI1MDQ0Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.134186Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jz025axne68cec5vzknxt1yb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY0YzdmZC1mZWRlYzdiYy0xNzlhNTIzZi03MzdiMzUzZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.147825Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz025ay0ezvktn1rvzk6qr3t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTYwYmU0MDktOTg4M2U0NWItOTU3NTE0ZmYtZGQ0Y2YxZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.160424Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jz025ayd0t754a54t9ynk1t8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjNiYjYzMDUtZWQyM2FiNGYtZjY5ZDkzY2EtOTJjYzNmNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.173931Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jz025aytdbp59sds82j6fgns, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2NhNjM1NGUtYmI2MjE1ZjQtMTNjMzUxMjctMTFjNTQ2MDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.185732Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jz025az86zkncc2m2517ptts, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGFiNzY2YTAtYTQwZjJiNDEtOGNjOWJhODYtNWVjZTBiOTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.196812Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jz025azk8860phjvvs8ptc1n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDkzNGFkOTItMzBjNWZkYTctODgxYmY3MzgtMWU5MGFhNTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.209707Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jz025azyadnhaqcc162zpfw6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTJjMTY5MWUtMWU2NjJhZC1mMGIyODBjYy01YTI5NGMzMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.225530Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jz025b0bddr9jhv3qbpsmzwk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjEzZmQwNjgtOTNiNzYwYzUtYmU2NWFhZjctZTU3ZGRkYTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.240519Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jz025b0v38q9w0cxtc3vjvzk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTQzNTE0NjQtZGExOGFiN2QtNGEwMmYzNGEtYTE1ZTZlOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.255869Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jz025b1a8mmjz1bjkwpk42hm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODBmNGJlZDQtZDU1NDMyZTItZTkwODZlMjYtMWI1MWUzM2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.271112Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jz025b1s01m57dn4fz4h809y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjUxNjhkMDQtMzA3MzAyY2ItYTcxMzBkNDgtMzQ2ZWQ4ZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.286489Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jz025b2948qhhyg1dgpjezqv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2VmODY0MzMtY2Q0OGZhODQtMTJiMzdlZWUtMzlkMjdkZGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.302155Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715676. Ctx: { TraceId: 01jz025b2sf1f5956aqwmed7ja, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzNmZDA5ZGItNjdiMmZkYTAtNTIwMTY2YjAtYTkwYjM4MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.316770Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jz025b3820c3zph6xb3gmwwr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzJhMzYzNDEtNWQwMDM0MGUtMmQ2ZWIzMmQtZjc0YTdiNmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.331446Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jz025b3p1w29dyccsgnt8ajv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTY4ZDM1MjMtMzBkMDA1MjgtMTgxNWM0NDAtM2E1MDc0OGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.346120Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jz025b45b87kgkgff1kz21yt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmYwNDE4ZDYtZThhNGZjZjUtNGYzYWVlY2QtZGNhNTQ3NTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:47.360917Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 28147 ... anner.cpp:120: TxId: 281474976715727. Ctx: { TraceId: 01jz025brra98zvqrkssk8e7rb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzE4MTUwMmItNGNhNDVhOTUtOTE0NGZhOTQtOTIyYzEwOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.023917Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715728. Ctx: { TraceId: 01jz025bs9azbg7n2mv0rwavmz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWM3ODAyZDMtMzg1YjllMjctZDZkNjQwMTYtYjY3YzE4YjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.041492Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715729. Ctx: { TraceId: 01jz025bst6k68esq2wm1rmydz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDQ0ZjMzZTQtNjIwZDg1OGItOTc0OGI3MmYtZWEwNTVjNGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.058772Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715730. Ctx: { TraceId: 01jz025btb23a1ez3vds7ee67x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTI0MGM1NS0xNWNlNzIyYS0zZWI4MTgzZi1lZmU2MjU5MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.075443Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715731. Ctx: { TraceId: 01jz025btx2vjfwv5cp70p2e74, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWEzNGQ3NzktNzUxODFmZGYtMmU4NmYyZjYtOTA1ZmMzMjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.090231Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715732. Ctx: { TraceId: 01jz025bvd1dah2k8wgnmckfvp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWU5ZmUyMTctN2Q4NDU3NTItZmMxZDAwZDItZDA2MzYxZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.101936Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715733. Ctx: { TraceId: 01jz025bvw0ejpv8tvgqxhxgn6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTg0Y2E1MTItNmE4YTZmMDEtODIyODg0OTMtZGJhZmE5Mg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.112106Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715734. Ctx: { TraceId: 01jz025bw72h6sxsy8aw46c9c0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Y3NmFkYzUtYTM5Njg3NzgtNTkwNzg0YjktYmEyMWRjOTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.123270Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715735. Ctx: { TraceId: 01jz025bwh6r703a1n9b0btamb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWE3NGIwN2QtYTMyYjkxYzQtOGRkNzRkYTMtN2ZlYzgyNjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.134316Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715736. Ctx: { TraceId: 01jz025bwx8rwmfpxax5pt5cyg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGU0MzNiMjctZmEzNjc5YjktOTAyMWNiNWYtMTNmY2FlODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.145554Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715737. Ctx: { TraceId: 01jz025bx83rjjnyqcqnvwf3b8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2QxZjgzNjctMTAzNmYzODQtNjI2YTYxZWEtNTljYzg1ZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.158803Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715738. Ctx: { TraceId: 01jz025bxk1844gd6qkz4va6jr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTFjMDk1MDgtNWExZGVhNTItOGVhMmI5M2EtZTcyZmU5YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.169228Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715739. Ctx: { TraceId: 01jz025by0ampaxsqr853hg21v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjdiMzU3ZmUtZDg2MGY4MGUtZWE4MmU4N2EtNDlhYmU4NzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.179851Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715740. Ctx: { TraceId: 01jz025byb99rqckzdn35eg2b1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODk4ZTY3Y2YtNTM3ODFhMTYtMTYyNTZiOGUtZTY4NWQ0NjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.190714Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715741. Ctx: { TraceId: 01jz025byn457kgbd1bkazvfaz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2Y5MjM5NjAtZDU2Y2IwNTUtZmNmNWRjY2YtOTc1YjE4OGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.203326Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715742. Ctx: { TraceId: 01jz025bz04pn3dr3359gjzbc4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGI4MGE2ZjYtMmE2YzNmYmUtMjM5MmRlOTctMzA4NTY3NGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.219739Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715743. Ctx: { TraceId: 01jz025bzd2a87j8hvywj95404, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Mjk5YmU2OC01OGE1OTA5NS01NmU1NTE3LTMxMWRhNjEy, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.234063Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715744. Ctx: { TraceId: 01jz025bzy0gsfcjqyx5nzc6kr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTEzYzIwMy0zNzU2Y2IwYS0yMjI1NWU5Yy03ZWJkNDFjYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.244735Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715745. Ctx: { TraceId: 01jz025c0b4jjab1qkvcj1cwn5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTdlNWVlMTQtMzAxNTgxZDktYWU4MTA5MTktNWFjNzZiZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.255094Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715746. Ctx: { TraceId: 01jz025c0pc31yat3g523j5sha, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTk4ZjQ5MjUtY2E1NjFmMzUtNDUzMTU4LTRkMDRjZWRi, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.265408Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715747. Ctx: { TraceId: 01jz025c101xsxh9w464b4dt14, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWM4M2UwNTUtYjEyYjQ5MzAtZjQ1MzFiNWEtNGQ5ZTc2ZmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.276112Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715748. Ctx: { TraceId: 01jz025c1b3z6jkh5fvsp2e6r5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmEyOWRhODMtZDQ0MmE3NDItZDE2YjA4YmYtMmJhN2JlYWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.288431Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715749. Ctx: { TraceId: 01jz025c1n9ke0w6bdzecswknx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2UxNTYxMDUtMzRhZjc0NmEtYmU3ODY1NTEtN2Y5MTNjYzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.299700Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715750. Ctx: { TraceId: 01jz025c223ghyt554g4mx5649, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGNkZDNhZjctZWY3NDgwYTEtY2I5ZmNiZGUtYzExMTU3OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.311139Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715751. Ctx: { TraceId: 01jz025c2d3pzrm0k4zwg6y7km, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzNhOWM4MjctNzY4MjQ5OGYtNGRkMzYzYTAtZWJmYzE5MWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.322546Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715752. Ctx: { TraceId: 01jz025c2ra5xdys4jr2phewzz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2VmYzE1ZWItYzRkYzEwZTQtNDlkNThiZjQtNmNiYmIwZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.334020Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715753. Ctx: { TraceId: 01jz025c34a5h3t7ba38g3ktvh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmVhNjRmMGQtNmY2MzQ4NzAtYjY1YzdjMWUtZWNjNjcwMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.346628Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715754. Ctx: { TraceId: 01jz025c3fdqngpyjam11a9cc1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTE3YjMwYjgtOWE0Y2ExMjItNTJhMmI3ZDctNmZiNDkyNzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.357082Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715755. Ctx: { TraceId: 01jz025c3wc4qvdj2b3s7yhzts, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTdkZDgzNjItOGU4Mzc1MjMtMmIwNDlhN2QtYWM2N2YzYWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.370282Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715756. Ctx: { TraceId: 01jz025c46f61tzvcvh0xh0wdd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjZmNDhjMWUtZmNlMjc1OWEtMTQ5ZTY2ZjktNmJmYjc0NjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.384104Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715757. Ctx: { TraceId: 01jz025c4m43kvtwe7p8c4v5a0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDczZWY4YjQtODFiNTg3MmQtNTRkM2ZmMi02ZDE5YWFmZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.397629Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715758. Ctx: { TraceId: 01jz025c52675themed21ehq35, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThmNTM3NS0zYmU0NDEzYy00MmFiYWYwYy1lZThkMzhiYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.411373Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715759. Ctx: { TraceId: 01jz025c5f535s5tyv4qvwcdyc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmU3ODkzMjgtNDAxMmM3ZDMtNmRkZmFhNGYtMTZkY2RjMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:19:48.436411Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715760. Ctx: { TraceId: 01jz025c5z08epkgrzy64hwcjj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Q1MWVlZDYtYmMzNjI4MTUtMzBiM2Q2ZTItMjJlYzc1Yzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_volatile/unittest >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck [GOOD] Test command err: 2025-06-30T09:18:56.088511Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:18:56.088606Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:18:56.088623Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e05/r3tmp/tmpBO9ghp/pdisk_1.dat 2025-06-30T09:18:56.199561Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:18:56.200567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:56.218180Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:56.219465Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275135538053 != 1751275135538057 2025-06-30T09:18:56.264683Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:62:2109] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:18:56.264987Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:18:56.265132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:56.265155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:56.278617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:56.453173Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:62:2109] Handle TEvProposeTransaction 2025-06-30T09:18:56.453210Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:62:2109] TxId# 281474976715657 ProcessProposeTransaction 2025-06-30T09:18:56.453252Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:62:2109] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:605:2513] 2025-06-30T09:18:56.479003Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:605:2513] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value2" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-30T09:18:56.479064Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:605:2513] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:18:56.479362Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:605:2513] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:18:56.479381Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:605:2513] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:18:56.479461Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:605:2513] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:18:56.479502Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:605:2513] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 1000 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:18:56.479522Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:605:2513] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-30T09:18:56.480091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:56.480260Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:605:2513] txid# 281474976715657 HANDLE EvClientConnected 2025-06-30T09:18:56.480511Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:605:2513] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-30T09:18:56.480524Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:605:2513] txid# 281474976715657 SEND to# [1:557:2483] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-30T09:18:56.497020Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:18:56.497392Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:18:56.497533Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:18:56.497622Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:18:56.509951Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:18:56.510291Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:18:56.510342Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:18:56.510587Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:18:56.510602Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:18:56.510611Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:18:56.510698Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:18:56.510761Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:18:56.510785Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:18:56.521405Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:18:56.528925Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:18:56.529058Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:18:56.529103Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:18:56.529111Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:18:56.529118Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:18:56.529126Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:18:56.529233Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:18:56.529243Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:18:56.529390Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:18:56.529426Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:18:56.529455Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:18:56.529465Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:18:56.529476Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:18:56.529484Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:18:56.529491Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:18:56.529499Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:18:56.529506Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:18:56.529659Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:635:2536], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:56.529667Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:56.529677Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:627:2532], serverId# [1:635:2536], sessionId# [0:0:0] 2025-06-30T09:18:56.529690Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:635:2536] 2025-06-30T09:18:56.529696Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:18:56.529727Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:18:56.529795Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:18:56.529810Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:18:56.529834Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:18:56.529862Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:28147 ... pp:1916: Add [0:7] at 72075186224037889 to execution unit ExecuteRead 2025-06-30T09:19:48.099760Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037889 on unit ExecuteRead 2025-06-30T09:19:48.099768Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1519 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1000 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1000 } 2025-06-30T09:19:48.099787Z node 28 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v1519/18446744073709551615 2025-06-30T09:19:48.099791Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[28:1053:2823], 1} after executionsCount# 1 2025-06-30T09:19:48.099794Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[28:1053:2823], 1} sends rowCount# 1, bytes# 32, quota rows left# 999, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:19:48.099801Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[28:1053:2823], 1} finished in read 2025-06-30T09:19:48.099806Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037889 is Executed 2025-06-30T09:19:48.099808Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037889 executing on unit ExecuteRead 2025-06-30T09:19:48.099812Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037889 to execution unit CompletedOperations 2025-06-30T09:19:48.099816Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037889 on unit CompletedOperations 2025-06-30T09:19:48.099823Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037889 is Executed 2025-06-30T09:19:48.099827Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037889 executing on unit CompletedOperations 2025-06-30T09:19:48.099833Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037889 has finished 2025-06-30T09:19:48.099837Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-30T09:19:48.099850Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037889:1:16} Tx{33, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{16, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-30T09:19:48.099855Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037889:1:16} Tx{33, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:19:48.099859Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-30T09:19:48.099922Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72075186224037889] send [28:903:2711] 2025-06-30T09:19:48.099925Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037889] push event to server [28:903:2711] 2025-06-30T09:19:48.099962Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [28:1053:2823], Recipient [28:668:2553]: NKikimrTxDataShard.TEvReadCancel ReadId: 1 2025-06-30T09:19:48.099967Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 1 } 2025-06-30T09:19:48.099986Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037890] ::Bootstrap [28:1056:2826] 2025-06-30T09:19:48.099989Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037890] lookup [28:1056:2826] 2025-06-30T09:19:48.100006Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037890] queue send [28:1056:2826] 2025-06-30T09:19:48.100014Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037890] forward result local node, try to connect [28:1056:2826] 2025-06-30T09:19:48.100018Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037890]::SendEvent [28:1056:2826] 2025-06-30T09:19:48.100043Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [28:1057:2827], Recipient [28:1009:2795]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:48.100048Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:48.100054Z node 28 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [28:1056:2826], serverId# [28:1057:2827], sessionId# [0:0:0] 2025-06-30T09:19:48.100060Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037890] connected with status OK role: Leader [28:1056:2826] 2025-06-30T09:19:48.100064Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037890] send queued [28:1056:2826] 2025-06-30T09:19:48.100066Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037890] push event to server [28:1056:2826] 2025-06-30T09:19:48.100085Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [28:1053:2823], Recipient [28:1009:2795]: NKikimrTxDataShard.TEvRead ReadId: 2 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1519 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 RangesSize: 1 2025-06-30T09:19:48.100094Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2025-06-30T09:19:48.100098Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:19:48.100108Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 0 2025-06-30T09:19:48.100113Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit CheckRead 2025-06-30T09:19:48.100118Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-30T09:19:48.100121Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit CheckRead 2025-06-30T09:19:48.100123Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit BuildAndWaitDependencies 2025-06-30T09:19:48.100126Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit BuildAndWaitDependencies 2025-06-30T09:19:48.100130Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:1] at 72075186224037890 2025-06-30T09:19:48.100134Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-30T09:19:48.100137Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-30T09:19:48.100139Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit ExecuteRead 2025-06-30T09:19:48.100141Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit ExecuteRead 2025-06-30T09:19:48.100149Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037890 Execute read# 1, request: { ReadId: 2 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1519 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 } 2025-06-30T09:19:48.100165Z node 28 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037890 promoting UnprotectedReadEdge to v1519/18446744073709551615 2025-06-30T09:19:48.100168Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037890 Complete read# {[28:1053:2823], 2} after executionsCount# 1 2025-06-30T09:19:48.100172Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037890 read iterator# {[28:1053:2823], 2} sends rowCount# 1, bytes# 32, quota rows left# 998, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:19:48.100177Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037890 read iterator# {[28:1053:2823], 2} finished in read 2025-06-30T09:19:48.100181Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-30T09:19:48.100184Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit ExecuteRead 2025-06-30T09:19:48.100186Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit CompletedOperations 2025-06-30T09:19:48.100188Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit CompletedOperations 2025-06-30T09:19:48.100192Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-30T09:19:48.100194Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit CompletedOperations 2025-06-30T09:19:48.100197Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:1] at 72075186224037890 has finished 2025-06-30T09:19:48.100200Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2025-06-30T09:19:48.100208Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{17, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-30T09:19:48.100212Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:19:48.100215Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2025-06-30T09:19:48.100286Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72075186224037890] send [28:1056:2826] 2025-06-30T09:19:48.100290Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037890] push event to server [28:1056:2826] 2025-06-30T09:19:48.100302Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [28:1053:2823], Recipient [28:1009:2795]: NKikimrTxDataShard.TEvReadCancel ReadId: 2 2025-06-30T09:19:48.100306Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037890 ReadCancel: { ReadId: 2 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 11 } items { uint32_value: 111 } }, { items { uint32_value: 21 } items { uint32_value: 21 } } >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests10Inflight1BlobSize1000 [GOOD] >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests100Inflight1BlobSize1000 >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReBalancingAfterSplit_sessionsWithPartition >> DataShardReadIteratorConsistency::BrokenWriteLockDuringIteration [GOOD] >> DataShardReadIteratorConsistency::WriteLockThenUncommittedReadUpgradeRetryAndRestart >> TBlobStorageWardenTest::TestCreatePDiskAndGroup >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder-EvWrite >> TCdcStreamTests::DropTableWithIndexWithStream [GOOD] >> DataShardReadIterator::ShouldReadNotExistingRange [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk1_100 >> DataShardReadIterator::ShouldReadKeyPrefix3 [GOOD] >> DataShardReadIterator::ShouldReadHeadFromFollower ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream/unittest >> TCdcStreamTests::DropTableWithIndexWithStream [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:17:43.683917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:17:43.683940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:43.683945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:17:43.683949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:17:43.683953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:17:43.683957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:17:43.683964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:17:43.683978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:17:43.684071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:17:43.684157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:17:43.704303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:17:43.704331Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:17:43.707456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:17:43.707517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:17:43.707558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:17:43.709531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:17:43.709621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:17:43.709766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:43.709852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:17:43.710726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:43.710798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:17:43.711125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:43.711141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:17:43.711170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:17:43.711186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:17:43.711195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:17:43.711220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:17:43.713228Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:17:43.732572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:17:43.732672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:43.732765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:17:43.732776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:17:43.732838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:17:43.732858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:17:43.733839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:43.733880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:17:43.733960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:43.733972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:17:43.733979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:17:43.733986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:17:43.734529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:43.734541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:17:43.734547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:17:43.734898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:43.734911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:17:43.734930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:43.734940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:17:43.735503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:17:43.735921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:17:43.735966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:17:43.736198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:17:43.736229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:17:43.736238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:43.736315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:17:43.736324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:17:43.736360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:17:43.736374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:17:43.736990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:17:43.737005Z node 1 :FLAT_TX_SCHEMESHARD ... scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:19:49.524936Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-30T09:19:49.525197Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:19:49.525208Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:19:49.525211Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-30T09:19:49.525214Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:19:49.525217Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:19:49.525228Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 4/5, is published: true 2025-06-30T09:19:49.525409Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:19:49.525416Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:19:49.525445Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:19:49.525459Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 5/5 2025-06-30T09:19:49.525462Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-06-30T09:19:49.525465Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 5/5 2025-06-30T09:19:49.525467Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-06-30T09:19:49.525470Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 5/5, is published: true 2025-06-30T09:19:49.525478Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [20:383:2349] message: TxId: 103 2025-06-30T09:19:49.525482Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-06-30T09:19:49.525486Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-30T09:19:49.525489Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:0 2025-06-30T09:19:49.525502Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:19:49.525506Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:1 2025-06-30T09:19:49.525508Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:1 2025-06-30T09:19:49.525511Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:19:49.525516Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:2 2025-06-30T09:19:49.525519Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:2 2025-06-30T09:19:49.525523Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:19:49.525525Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:3 2025-06-30T09:19:49.525528Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:3 2025-06-30T09:19:49.525531Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:19:49.525533Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:4 2025-06-30T09:19:49.525535Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:4 2025-06-30T09:19:49.525541Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-06-30T09:19:49.525602Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:19:49.525641Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:19:49.525803Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:19:49.525811Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 6], at schemeshard: 72057594046678944 2025-06-30T09:19:49.525823Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-30T09:19:49.525830Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:19:49.525836Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:19:49.525955Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:19:49.525965Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:19:49.525982Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:19:49.526331Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:19:49.526399Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:19:49.526407Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [20:765:2666] 2025-06-30T09:19:49.526808Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 103 2025-06-30T09:19:49.526929Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:19:49.526978Z node 20 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable/Stream" took 59us result status StatusPathDoesNotExist 2025-06-30T09:19:49.527015Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/Index/indexImplTable/Stream\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table/Index/indexImplTable\' (id: [OwnerId: 72057594046678944, LocalPathId: 4]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Table/Index/indexImplTable/Stream" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:19:49.527073Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:19:49.527090Z node 20 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" took 19us result status StatusPathDoesNotExist 2025-06-30T09:19:49.527111Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/Index/indexImplTable/Stream/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table/Index/indexImplTable\' (id: [OwnerId: 72057594046678944, LocalPathId: 4]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut_fat/unittest |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut_fat/unittest >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests100Inflight1BlobSize2000000 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests2Inflight2BlobSize2000000 >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadFromTimestamp_BeforeAutoscaleAwareSDK >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Table [GOOD] |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut_fat/unittest >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests100Inflight1BlobSize1000 [GOOD] >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests10Inflight10BlobSize1000 >> TxUsage::Sinks_Oltp_WriteToTopic_2_Query [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit900 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit900 >> TxUsage::WriteToTopic_Demo_21_RestartNo_Table [GOOD] >> DataShardReadIteratorConsistency::WriteLockThenUncommittedReadUpgradeRetryAndRestart [GOOD] >> DataShardReadIteratorConsistency::WriteLockThenUncommittedReadUpgradeRestartWithStateMigrationRetryAndRestartWithoutStateMigration >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips2+EvWrite >> TxUsage::Sinks_Oltp_WriteToTopic_3_Table >> TxUsage::WriteToTopic_Demo_21_RestartNo_Query >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Query [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix2 [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix3 >> KqpIndexes::UniqIndexComplexPkComplexFkOverlap [GOOD] >> KqpIndexes::UniqAndNoUniqSecondaryIndex >> DataShardReadIterator::ShouldFailReadNextAfterSchemeChangeExhausted [GOOD] >> DataShardReadIterator::NoErrorOnFinalACK >> TxUsage::WriteToTopic_Demo_23_RestartNo_Table >> ScrubFast::SingleBlob [GOOD] >> SelfHeal::ReassignThrottling >> TTopicReaderTests::TestRun_Read_Less_Messages_Than_Sent [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder+EvWrite |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut_fat/unittest >> TBlobStorageWardenTest::TestCreatePDiskAndEncryptedGroup >> ReadIteratorExternalBlobs::ExtBlobs [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithSpecificKeys >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests2Inflight2BlobSize2000000 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests10Inflight10BlobSize2000000 |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut_fat/unittest |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut_fat/unittest >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests10Inflight10BlobSize1000 [GOOD] >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests100Inflight10BlobSize1000 >> Mirror3of4::Compaction [GOOD] >> MultiGet::SequentialGet ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicReaderTests::TestRun_Read_Less_Messages_Than_Sent [GOOD] Test command err: === Starting PQ server === Server->StartServer(false); 2025-06-30T09:19:31.315407Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669585967031380:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:31.315438Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004915/r3tmp/tmpeQMZDC/pdisk_1.dat 2025-06-30T09:19:31.377916Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:19:31.396060Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:19:31.399417Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:31.417808Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:31.417845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:31.417934Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:31.422234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:31.433945Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 25785, node 1 2025-06-30T09:19:31.441664Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004915/r3tmp/yandex5Gmczn.tmp 2025-06-30T09:19:31.441697Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004915/r3tmp/yandex5Gmczn.tmp 2025-06-30T09:19:31.441802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004915/r3tmp/yandex5Gmczn.tmp 2025-06-30T09:19:31.441866Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:31.449245Z INFO: TTestServer started on Port 5196 GrpcPort 25785 TClient is connected to server localhost:5196 PQClient connected to localhost:25785 === TenantModeEnabled() = 0 === Init PQ - start server on port 25785 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:31.491256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:31.491295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:31.493608Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:19:31.494322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:31.507235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:19:31.507326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:19:31.507422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-30T09:19:31.507434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-30T09:19:31.507494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:19:31.507514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:31.508440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-30T09:19:31.508501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:19:31.508558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:19:31.508569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-30T09:19:31.508572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-30T09:19:31.508576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715657:0 2 -> 3 2025-06-30T09:19:31.509275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:19:31.509283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:19:31.509287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715657:0 3 -> 128 2025-06-30T09:19:31.509809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:31.509815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-30T09:19:31.509819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:19:31.509966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:19:31.509972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:19:31.509976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-30T09:19:31.509994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-30T09:19:31.512772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:19:31.513478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-30T09:19:31.513524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:19:31.514567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 1751275171562, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-30T09:19:31.514619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1751275171562 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-30T09:19:31.514627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-30T09:19:31.514703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715657:0 128 -> 240 2025-06-30T09:19:31.514712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-30T09:19:31.514773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-30T09:19:31.514784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-30T09:19:31.515325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-30T09:19:31.515334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057 ... 21750540:2575], Partitions=[], ActiveFamilyCount=1) generation 1 step 1 2025-06-30T09:19:50.992356Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037898][rt3.dc1--topic1] consumer cli start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-30T09:19:50.992361Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037898][rt3.dc1--topic1] consumer cli balancing duration: 0.000047s 2025-06-30T09:19:50.995899Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 3 SizeLag: 280 WriteTimestampEstimateMS: 1751275190981 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-06-30T09:19:50.995926Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 INIT DONE TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 2025-06-30T09:19:50.995954Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 sending to client partition status 2025-06-30T09:19:50.999037Z :INFO: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] [] Confirm partition stream create. Partition stream id: 1. Cluster: "-". Topic: "/topic1". Partition: 0. Read offset: (NULL) 2025-06-30T09:19:50.999401Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 grpc read done: success# 1, data# { start_partition_session_response { partition_session_id: 1 } } 2025-06-30T09:19:50.999493Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:533: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 got StartRead from client: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, commitOffset# (empty maybe) 2025-06-30T09:19:50.999532Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:1012: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 Start reading TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 clientCommitOffset (empty maybe) clientReadOffset 0 2025-06-30T09:19:50.999537Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:958: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 0 endOffset 3 2025-06-30T09:19:50.999558Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, endOffset# 3, WTime# 0, sizeLag# 280 2025-06-30T09:19:50.999565Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1TEvPartitionReady. Aval parts: 1 2025-06-30T09:19:50.999582Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 performing read request: guid# 2dfe2810-73982ae9-98b7767d-42c25b21, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 3, size# 336, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-30T09:19:50.999619Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 3 maxSize 336 maxTimeLagMs 0 readTimestampMs 0 readOffset 0 EndOffset 3 ClientCommitOffset 0 committedOffset 0 Guid 2dfe2810-73982ae9-98b7767d-42c25b21 2025-06-30T09:19:51.000074Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 3 Result { Offset: 0 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 1 WriteTimestampMS: 1751275190879 CreateTimestampMS: 1751275190878 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 2 WriteTimestampMS: 1751275190880 CreateTimestampMS: 1751275190878 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 3 WriteTimestampMS: 1751275190880 CreateTimestampMS: 1751275190878 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 2 SizeLag: 18446744073709551530 RealReadOffset: 2 WaitQuotaTimeMs: 0 EndOffset: 3 StartOffset: 0 } Cookie: 0 } 2025-06-30T09:19:51.000125Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 3 2025-06-30T09:19:51.000141Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid 2dfe2810-73982ae9-98b7767d-42c25b21 has messages 1 2025-06-30T09:19:51.000169Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 read done: guid# 2dfe2810-73982ae9-98b7767d-42c25b21, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 371 2025-06-30T09:19:51.000184Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 response to read: guid# 2dfe2810-73982ae9-98b7767d-42c25b21 2025-06-30T09:19:51.000266Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 Process answer. Aval parts: 0 2025-06-30T09:19:51.000401Z :DEBUG: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] [] Got ReadResponse, serverBytesSize = 371, now ReadSizeBudget = 0, ReadSizeServerDelta = 52428429 2025-06-30T09:19:51.000432Z :DEBUG: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52428429 2025-06-30T09:19:51.000536Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (0-2) 2025-06-30T09:19:51.000554Z :DEBUG: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] [] Returning serverBytesSize = 371 to budget 2025-06-30T09:19:51.000560Z :DEBUG: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] [] In ContinueReadingDataImpl, ReadSizeBudget = 371, ReadSizeServerDelta = 52428429 2025-06-30T09:19:51.000632Z :DEBUG: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-30T09:19:51.000684Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-30T09:19:51.000698Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (1-1) 2025-06-30T09:19:51.000705Z :DEBUG: [] Take Data. Partition 0. Read: {1, 1} (2-2) 2025-06-30T09:19:51.000716Z :DEBUG: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] [] The application data is transferred to the client. Number of messages 3, size 24 bytes 2025-06-30T09:19:51.000725Z :DEBUG: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] [] Returning serverBytesSize = 0 to budget 2025-06-30T09:19:51.000763Z :INFO: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] Closing read session. Close timeout: 0.000000s 2025-06-30T09:19:51.000771Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:2:0 2025-06-30T09:19:51.000779Z :INFO: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] Counters: { Errors: 0 CurrentSessionLifetimeMs: 13 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:19:51.000798Z :NOTICE: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:19:51.000778Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 grpc read done: success# 1, data# { read_request { bytes_size: 371 } } 2025-06-30T09:19:51.000804Z :DEBUG: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] [] Abort session to cluster 2025-06-30T09:19:51.000850Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 got read request: guid# ef186fd6-d6cbe507-18ae2278-1c653dee 2025-06-30T09:19:51.000915Z :NOTICE: [] [] [4aae0995-ee7e52f3-bd895b5b-549681d3] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:19:51.001180Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 grpc read done: success# 0, data# { } 2025-06-30T09:19:51.001199Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 grpc read failed 2025-06-30T09:19:51.001205Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 grpc closed 2025-06-30T09:19:51.001221Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/cli session shared/cli_5_1_4552074504269005212_v1 is DEAD 2025-06-30T09:19:51.001336Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/cli_5_1_4552074504269005212_v1 2025-06-30T09:19:51.001620Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037898][rt3.dc1--topic1] pipe [5:7521669668221750540:2575] disconnected; active server actors: 1 2025-06-30T09:19:51.001638Z node 6 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037898][rt3.dc1--topic1] pipe [5:7521669668221750540:2575] client cli disconnected session shared/cli_5_1_4552074504269005212_v1 2025-06-30T09:19:51.161777Z node 5 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1986: ActorId: [5:7521669672516717866:2582] TxId: 281474976715694. Ctx: { TraceId: 01jz025evsfwdkpxe1xspy4tmr, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=ZWE3NGQ1NGItMjg1ZTZjNmMtYzEzNWY0NTMtN2JkNjEyMzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 6 2025-06-30T09:19:51.162008Z node 5 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [5:7521669672516717870:2582], TxId: 281474976715694, task: 2. Ctx: { TraceId : 01jz025evsfwdkpxe1xspy4tmr. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ZWE3NGQ1NGItMjg1ZTZjNmMtYzEzNWY0NTMtN2JkNjEyMzM=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [5:7521669672516717866:2582], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> DataShardReadIteratorConsistency::WriteLockThenUncommittedReadUpgradeRestartWithStateMigrationRetryAndRestartWithoutStateMigration [GOOD] >> DataShardReadIteratorFastCancel::ShouldProcessFastCancel >> DataShardReadIterator::ShouldReadRangeChunk1_100 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk1 >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit900 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1001 >> DataShardReadIterator::ShouldReadHeadFromFollower [GOOD] >> DataShardReadIterator::ShouldReadFromHead >> KqpIndexes::UniqAndNoUniqSecondaryIndex [GOOD] >> KqpIndexes::UniqAndNoUniqSecondaryIndexWithCover >> DataShardReadIterator::ShouldReadRangePrefix3 [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable-UseSimilarity [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix4 >> KqpPrefixedVectorIndexes::OrderByCosineLevel2+Nullable-UseSimilarity >> ConvertMiniKQLTypeToYdbTypeTest::TTzDateTime [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzTimeStamp [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::UuidType [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::VariantTuple >> Describe::LocationWithKillTablets [GOOD] >> Describe::DescribePartitionPermissions >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips2+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips2-EvWrite >> ConvertYdbPermissionNameToACLAttrs::TestEqualGranularAndDeprecatedAcl [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalEmpty [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalOptionalEmpty [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalOptionalEmpty2 [GOOD] >> ConvertYdbValueToMiniKQLValueTest::List [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Dict [GOOD] |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut_fat/unittest >> ConvertMiniKQLTypeToYdbTypeTest::VariantTuple [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::VariantStruct [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Void [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Tuple [GOOD] >> TxUsage::WriteToTopic_Demo_11_Table [GOOD] >> DataShardReadIterator::NoErrorOnFinalACK [GOOD] >> DataShardReadIterator::ShouldCancelMvccSnapshotFromFuture >> JsonProtoConversion::NlohmannJsonToProtoMap [GOOD] >> TxUsage::WriteToTopic_Demo_11_Query >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests100Inflight10BlobSize1000 [GOOD] >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests10000Inflight100BlobSize1000 |80.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::Tuple [GOOD] |80.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::Dict [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests10Inflight10BlobSize2000000 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests100Inflight10BlobSize2000000 |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::NlohmannJsonToProtoMap [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder-EvWrite >> KqpVectorIndexes::OrderByCosineLevel1+Nullable-UseSimilarity [GOOD] >> KqpVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1001 [GOOD] >> DataShardReadIterator::ShouldReadFromFollower >> DataShardReadIterator::ShouldReadRangeChunk1 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk2 >> TBlobStorageWardenTest::TestCreatePDiskAndGroup [GOOD] >> DataShardReadIterator::ShouldReadFromHead [GOOD] >> DataShardReadIterator::ShouldReadFromHeadWithConflict+UseSink >> BSCRestartPDisk::RestartNotAllowed >> MultiGet::SequentialGet [GOOD] >> NodeDisconnected::BsQueueRetries >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions [GOOD] >> PersQueueSdkReadSessionTest::SettingsValidation >> KqpIndexes::UniqAndNoUniqSecondaryIndexWithCover [GOOD] >> BSCRestartPDisk::RestartOneByOneWithReconnects >> DataShardReadIteratorFastCancel::ShouldProcessFastCancel [GOOD] >> DataShardReadIteratorLatency::ReadSplitLatency ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut_fat/unittest >> TBlobStorageWardenTest::TestCreatePDiskAndGroup [GOOD] Test command err: 2025-06-30T09:19:49.894175Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:19:49.894926Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "/home/runner/.ya/build/build_root/cl3w/003803/r3tmp/tmpAAZCrd//pdisk0.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 1040187392 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 31 } 2025-06-30T09:19:49.894989Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 0 Path# "/home/runner/.ya/build/build_root/cl3w/003803/r3tmp/tmpAAZCrd//pdisk0.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:19:49.895120Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [3e000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:19:49.895356Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [3e000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 2025-06-30T09:19:49.895368Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [3e000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:19:49.895455Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [3e000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 2025-06-30T09:19:49.895460Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [3e000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:19:49.895533Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [3e000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 2025-06-30T09:19:49.895538Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [3e000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:19:49.895603Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [3e000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 2025-06-30T09:19:49.895608Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 1040187392 2025-06-30T09:19:49.895765Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 31 PipeClientId# [1:41:2077] ControllerId# 72057594037932033 2025-06-30T09:19:49.895769Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:19:49.895788Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:19:49.895803Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:19:49.899448Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:19:49.899499Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:19:49.900045Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "pdisk0.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 1040187392 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 31 } 2025-06-30T09:19:49.900074Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 1040187392 2025-06-30T09:19:49.900221Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 31 PipeClientId# [2:85:2062] ControllerId# 72057594037932033 2025-06-30T09:19:49.900230Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:19:49.900249Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:19:49.900263Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:19:49.900423Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:19:49.900430Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:19:49.901171Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:19:49.901364Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:19:49.901453Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:19:49.966411Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:19:49.966442Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-30T09:19:49.967477Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-30T09:19:49.967603Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:19:49.967613Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:19:49.968499Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-30T09:19:49.968611Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:19:49.968675Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:19:49.968683Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-30T09:19:49.968709Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-30T09:19:49.968788Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-30T09:19:49.969111Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-30T09:19:49.969143Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:19:49.969258Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-30T09:19:49.969265Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:19:49.969293Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "]\330\370\301\250\336\213B\251\277\243\370\343!VLf/|\024" } 2025-06-30T09:19:49.969313Z node 1 :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 2 SessionId# [0:0:0] Inserted# true Subscription# {SessionId# [0:0:0] SubscriptionCookie# 0} NextSubscribeCookie# 1 2025-06-30T09:19:49.969323Z node 1 :BS_NODE DEBUG: {NWDC29@distconf_binding.cpp:80} Initiated bind NodeId# 2 Binding# {2.0/4048073042781494529@[0:0:0]} SessionId# [0:0:0] 2025-06-30T09:19:49.969407Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "pdisk0.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 1040187392 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 31 } 2025-06-30T09:19:49.969448Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-30T09:19:49.969472Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-30T09:19:49.969545Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-30T09:19:49.969555Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:19:49.971424Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 3 2025-06-30T09:19:49.971441Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesI ... EBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:52.136560Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:52.136643Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 0 VSlotId: 1 } Success: true } 2025-06-30T09:19:52.136660Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 0 VSlotId: 2 } 2025-06-30T09:19:52.136769Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:52.136787Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 0 VSlotId: 1 } } 2025-06-30T09:19:52.136903Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:52.146505Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:52.146573Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 0 VSlotId: 2 } Success: true } 2025-06-30T09:19:52.146599Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 0 VSlotId: 3 } 2025-06-30T09:19:52.146682Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:52.146695Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 0 VSlotId: 2 } } 2025-06-30T09:19:52.146785Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:52.161318Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:52.161450Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 0 VSlotId: 3 } Success: true } 2025-06-30T09:19:52.161536Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:52.161559Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 0 VSlotId: 3 } } 2025-06-30T09:19:52.253694Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-30T09:19:52.253920Z node 1 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:807} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 1 ServiceSet { PDisks { NodeID: 1 PDiskID: 1000 Path: "/home/runner/.ya/build/build_root/cl3w/003803/r3tmp/tmpvBtvfj/new_pdisk.dat" PDiskGuid: 6015544300130039905 PDiskCategory: 0 PDiskConfig { ChunkSize: 33554432 } EntityStatus: CREATE ExpectedSerial: "" ManagementStage: DISCOVER_SERIAL SpaceColorBorder: GREEN } } InstanceId: "eb58f5a3-55672bed-572b60ec-e0932d67" AvailDomain: 31 } 2025-06-30T09:19:52.253947Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {PDisks { NodeID: 1 PDiskID: 1000 Path: "/home/runner/.ya/build/build_root/cl3w/003803/r3tmp/tmpvBtvfj/new_pdisk.dat" PDiskGuid: 6015544300130039905 PDiskCategory: 0 PDiskConfig { ChunkSize: 33554432 } EntityStatus: CREATE ExpectedSerial: "" ManagementStage: DISCOVER_SERIAL SpaceColorBorder: GREEN } } 2025-06-30T09:19:52.253988Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1000 Path# "/home/runner/.ya/build/build_root/cl3w/003803/r3tmp/tmpvBtvfj/new_pdisk.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:19:52.256843Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-30T09:19:52.257114Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 ErasureSpecies: "none" VDiskKind: "Default" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } Command { QueryBaseConfig { } } } 2025-06-30T09:19:52.418201Z node 1 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:807} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 1 ServiceSet { VDisks { VDiskID { GroupID: 3187671040 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 6015544300130039905 } VDiskKind: Default EntityStatus: CREATE StoragePoolName: "" GroupSizeInUnits: 0 } Groups { GroupID: 3187671040 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 6015544300130039905 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 3187671040 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } InstanceId: "eb58f5a3-55672bed-572b60ec-e0932d67" AvailDomain: 31 } 2025-06-30T09:19:52.418254Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {VDisks { VDiskID { GroupID: 3187671040 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 6015544300130039905 } VDiskKind: Default EntityStatus: CREATE StoragePoolName: "" GroupSizeInUnits: 0 } Groups { GroupID: 3187671040 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 6015544300130039905 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 3187671040 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } 2025-06-30T09:19:52.418312Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [be000000:1:0:0:0] VSlotId# 1:1000:1000 PDiskGuid# 6015544300130039905 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:19:52.418542Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [be000000:1:0:0:0] VSlotId# 1:1000:1000 PDiskGuid# 6015544300130039905 2025-06-30T09:19:52.419076Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639257 Sender# [1:403:2351] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:19:52.982757Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 3187671040 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1000 VSlotId: 1000 PDiskGuid: 6015544300130039905 Status: INIT_PENDING OnlyPhantomsRemain: false } } 2025-06-30T09:19:52.983069Z node 2 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [1:43:2079] SessionId# [2:123:2048] Cookie# 4048073042781494529 2025-06-30T09:19:52.983092Z node 2 :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 1 SessionId# [2:123:2048] Inserted# false Subscription# {SessionId# [2:123:2048] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-30T09:19:52.983137Z node 2 :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 1 Cookie# 4048073042781494529 SessionId# [2:123:2048] Binding# Record# {CacheUpdate { KeyValuePairs { Key: "Gbe000000" Generation: 1 Value: "\010\200\200\200\360\013\020\001\030\000\"\026\n\024\n\022\010\001\020\350\007\030\350\007 \341\360\201\237\265\265\340\275S0\0008\000B\000J\000P\200\200\200\360\013X\000j\000p\000\220\001\000" } } } 2025-06-30T09:19:52.983148Z node 2 :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 1 SessionId# [2:123:2048] Inserted# false Subscription# {SessionId# [2:123:2048] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-30T09:19:52.983295Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 3187671040 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1000 VSlotId: 1000 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:19:52.983604Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [2:87:2064] SessionId# [1:137:2048] Cookie# 4048073042781494529 2025-06-30T09:19:52.983622Z node 1 :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 2 SessionId# [1:137:2048] Inserted# false Subscription# {SessionId# [1:137:2048] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-30T09:19:52.983655Z node 1 :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 2 Cookie# 4048073042781494529 SessionId# [1:137:2048] Binding# {2.2/4048073042781494529@[1:137:2048]} Record# {RootNodeId: 2 CacheUpdate { KeyValuePairs { Key: "Gbe000000" Generation: 1 Value: "\010\200\200\200\360\013\020\001\030\000\"\026\n\024\n\022\010\001\020\350\007\030\350\007 \341\360\201\237\265\265\340\275S0\0008\000B\000J\000P\200\200\200\360\013X\000j\000p\000\220\001\000" } } } 2025-06-30T09:19:53.004595Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1000 VSlotId: 1000 } } 2025-06-30T09:19:53.075193Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:53.075383Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 3187671040 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1000 VSlotId: 1000 PDiskGuid: 6015544300130039905 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:19:53.111034Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:53.111156Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 3187671040 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1000 VSlotId: 1000 PDiskGuid: 6015544300130039905 Status: READY OnlyPhantomsRemain: false } } Sending TEvPut 2025-06-30T09:19:53.111342Z node 1 :BS_NODE DEBUG: {NW46@node_warden_proxy.cpp:134} HandleForwarded GroupId# 3187671040 EnableProxyMock# false NoGroup# false 2025-06-30T09:19:53.111351Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 3187671040 2025-06-30T09:19:53.111980Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 Sending TEvGet Sending TEvVGet Sending TEvPut 2025-06-30T09:19:53.131600Z node 2 :BS_NODE DEBUG: {NW46@node_warden_proxy.cpp:134} HandleForwarded GroupId# 3187671040 EnableProxyMock# false NoGroup# false 2025-06-30T09:19:53.131625Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 3187671040 2025-06-30T09:19:53.131635Z node 2 :BS_NODE DEBUG: {NW98@node_warden_group.cpp:266} RequestGroupConfig GroupId# 3187671040 2025-06-30T09:19:53.131734Z node 2 :BS_NODE INFO: {NW79@node_warden_group_resolver.cpp:74} TGroupResolverActor::Bootstrap GroupId# 3187671040 2025-06-30T09:19:53.131764Z node 2 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [2:22:2050] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:19:53.132608Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG02@get_group.cpp:58} TEvControllerGetGroup Sender# [2:22:2050] Cookie# 0 Recipient# [1:442:2381] RecipientRewrite# [1:403:2351] Request# {NodeID: 2 GroupIDs: 3187671040 } StopGivingGroups# false 2025-06-30T09:19:53.132647Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG01@get_group.cpp:22} Handle TEvControllerGetGroup Request# {NodeID: 2 GroupIDs: 3187671040 } Sending TEvGet >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup >> DataShardReadIterator::TryWriteManyRows+Commit [GOOD] >> DataShardReadIterator::TryWriteManyRows-Commit >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression >> DataShardReadIterator::ShouldReadRangePrefix4 [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix5 >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup [GOOD] >> VectorIndexBuildTestReboots::BaseCase-Prefixed[PipeResets] [GOOD] |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |80.2%| [TA] $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UniqAndNoUniqSecondaryIndexWithCover [GOOD] Test command err: Trying to start YDB, gRPC: 62908, MsgBus: 12483 2025-06-30T09:19:48.900556Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669660725318455:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:48.900584Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002b93/r3tmp/tmpLa6ghB/pdisk_1.dat 2025-06-30T09:19:48.975004Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669660725318432:2080] 1751275188900398 != 1751275188900401 2025-06-30T09:19:48.977572Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62908, node 1 2025-06-30T09:19:48.990559Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:48.990572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:48.990574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:48.990615Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:49.003088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:49.003137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:49.004161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12483 TClient is connected to server localhost:12483 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:49.053321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:49.055691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:19:49.061276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:49.129457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:49.155363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:49.168167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:49.312392Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669665020287327:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:49.312425Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:49.356510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:49.366144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:49.378765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:49.393013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:49.406804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:49.421478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:49.478963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:49.492872Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669665020287982:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:49.492931Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:49.492960Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669665020287987:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:49.494126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:49.504534Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669665020287989:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:19:49.572594Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669665020288040:3401] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:49.690428Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521669665020288310:3574], Recipient [1:7521669660725318746:2139]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:49.690450Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:49.690453Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:19:49.690461Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521669665020288306:3571], Recipient [1:7521669660725318746:2139]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06 ... -30T09:19:53.383169Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 4/5 2025-06-30T09:19:53.383171Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 4/5, is published: true 2025-06-30T09:19:53.383186Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [3:7521669677102469580:2144], Recipient [3:7521669677102469580:2144]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:19:53.383188Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:19:53.383191Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715672:4, at schemeshard: 72057594046644480 2025-06-30T09:19:53.383193Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715672:4 ProgressState 2025-06-30T09:19:53.383198Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:19:53.383201Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:4 progress is 5/5 2025-06-30T09:19:53.383203Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 5/5 2025-06-30T09:19:53.383206Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:4 progress is 5/5 2025-06-30T09:19:53.383208Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 5/5 2025-06-30T09:19:53.383210Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 5/5, is published: true 2025-06-30T09:19:53.383221Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7521669681397439146:2470] message: TxId: 281474976715672 2025-06-30T09:19:53.383223Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 5/5 2025-06-30T09:19:53.383231Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-30T09:19:53.383233Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715672:0 2025-06-30T09:19:53.383267Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 5 2025-06-30T09:19:53.383271Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-30T09:19:53.383272Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715672:1 2025-06-30T09:19:53.383276Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-30T09:19:53.383278Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-30T09:19:53.383280Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715672:2 2025-06-30T09:19:53.383287Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-30T09:19:53.383289Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:3 2025-06-30T09:19:53.383290Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715672:3 2025-06-30T09:19:53.383294Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 3 2025-06-30T09:19:53.383296Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:4 2025-06-30T09:19:53.383297Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715672:4 2025-06-30T09:19:53.383304Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 21] was 3 2025-06-30T09:19:53.383373Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:53.383384Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:53.383430Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:53.383439Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7521669681397439146:2470] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-30T09:19:53.383805Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [3:7521669681397439159:3578], Recipient [3:7521669677102469580:2144]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:53.383817Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:53.383820Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:19:53.384075Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [3:7521669681397439289:3670], Recipient [3:7521669677102469580:2144]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:53.384085Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:53.384087Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:19:53.384091Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [3:7521669681397439290:3671], Recipient [3:7521669677102469580:2144]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:53.384092Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:53.384093Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:19:53.384096Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [3:7521669681397439294:3674], Recipient [3:7521669677102469580:2144]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:53.384098Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:53.384099Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:19:53.590768Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:53.609713Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7521669677102469580:2144]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:53.609734Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:53.609758Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [3:7521669677102469580:2144], Recipient [3:7521669677102469580:2144]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:19:53.609761Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:19:53.651295Z node 3 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jz025h5tbckw0bzxhq66cm2a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTdlOTUzODQtZTg4ZmRkMzAtYzAzZjYzZWItN2QyZmQ5Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-30T09:19:53.651393Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=3&id=ZTdlOTUzODQtZTg4ZmRkMzAtYzAzZjYzZWItN2QyZmQ5Zg==, ActorId: [3:7521669681397439145:2469], ActorState: ExecuteState, TraceId: 01jz025h5tbckw0bzxhq66cm2a, Create QueryResponse for error on request, msg: 2025-06-30T09:19:53.703913Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7521669681397439580:2549], TxId: 281474976715678, task: 1. Ctx: { SessionId : ydb://session/3?node_id=3&id=ZTdlOTUzODQtZTg4ZmRkMzAtYzAzZjYzZWItN2QyZmQ5Zg==. CustomerSuppliedId : . TraceId : 01jz025h9m5f4fb2dz98ak9hnk. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-30T09:19:53.703976Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7521669681397439581:2550], TxId: 281474976715678, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jz025h9m5f4fb2dz98ak9hnk. SessionId : ydb://session/3?node_id=3&id=ZTdlOTUzODQtZTg4ZmRkMzAtYzAzZjYzZWItN2QyZmQ5Zg==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:7521669681397439577:2469], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-30T09:19:53.704036Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=3&id=ZTdlOTUzODQtZTg4ZmRkMzAtYzAzZjYzZWItN2QyZmQ5Zg==, ActorId: [3:7521669681397439145:2469], ActorState: ExecuteState, TraceId: 01jz025h9m5f4fb2dz98ak9hnk, Create QueryResponse for error on request, msg: 2025-06-30T09:19:53.819721Z node 3 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jz025hb9fbft542dd26yg5vf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTdlOTUzODQtZTg4ZmRkMzAtYzAzZjYzZWItN2QyZmQ5Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-30T09:19:53.819825Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=3&id=ZTdlOTUzODQtZTg4ZmRkMzAtYzAzZjYzZWItN2QyZmQ5Zg==, ActorId: [3:7521669681397439145:2469], ActorState: ExecuteState, TraceId: 01jz025hb9fbft542dd26yg5vf, Create QueryResponse for error on request, msg: 2025-06-30T09:19:53.824550Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips2-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeLeftBorder+EvWrite ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup [GOOD] Test command err: RandomSeed# 5200965056738989201 2025-06-30T09:19:54.480061Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480095Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480107Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480117Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480127Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480136Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480146Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480156Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480335Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480356Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480370Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480383Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480396Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480410Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480423Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480436Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480452Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480459Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480468Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480474Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480480Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480487Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480492Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480499Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:54.480942Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480953Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480962Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480970Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480978Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480986Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.480995Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:54.481002Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 >> DataShardReadIterator::ShouldCancelMvccSnapshotFromFuture [GOOD] >> DataShardReadIterator::ShouldCommitLocksWhenReadWriteInOneTransaction >> BSCRestartPDisk::RestartOneByOne >> DataShardReadIterator::ShouldReadRangeChunk2 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk3 >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue+EvWrite >> TxUsage::WriteToTopic_Demo_16_Table [GOOD] >> DataShardReadIteratorLatency::ReadSplitLatency [GOOD] >> DataShardReadIteratorPageFaults::CancelPageFaultedReadThenDropTable >> DataShardReadIterator::ShouldReadFromFollower [GOOD] >> DataShardReadIterator::ShouldNotReadFutureMvccFromFollower >> TxUsage::WriteToTopic_Demo_16_Query >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed >> DataShardReadIterator::ShouldReadFromHeadWithConflict+UseSink [GOOD] >> DataShardReadIterator::ShouldReadFromHeadWithConflict-UseSink >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed [GOOD] >> BSCRestartPDisk::RestartNotAllowed [GOOD] |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> DataShardReadIterator::ShouldReadRangePrefix5 [GOOD] >> DataShardReadIterator::ShouldReceiveErrorAfterSplit |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |80.2%| [TA] $(B)/ydb/core/tx/datashard/ut_volatile/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartNotAllowed [GOOD] Test command err: RandomSeed# 13018755024915140026 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed [GOOD] Test command err: RandomSeed# 11231871266577882306 2025-06-30T09:19:56.046877Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.046919Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.046932Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.046946Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.046958Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.046972Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.046984Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.047190Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047214Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047229Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047247Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047265Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047281Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047299Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047319Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.047327Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.047333Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.047347Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.047357Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.047363Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.047369Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-30T09:19:56.047820Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047836Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047848Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047864Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047875Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047886Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-30T09:19:56.047896Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 |80.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests100Inflight10BlobSize2000000 [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10000Inflight1000BlobSize1000 >> TBlobStorageWardenTest::TestCreatePDiskAndEncryptedGroup [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeLeftBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeLeftBorder-EvWrite >> DataShardReadIterator::ShouldReadRangeChunk3 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk5 >> DataShardReadIterator::ShouldCommitLocksWhenReadWriteInOneTransaction [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue+EvWrite [GOOD] >> DataShardReadIterator::ShouldCommitLocksWhenReadWriteInSeparateTransactions >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue-EvWrite |80.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut_fat/unittest >> TBlobStorageWardenTest::TestCreatePDiskAndEncryptedGroup [GOOD] Test command err: 2025-06-30T09:19:52.236970Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:19:52.237866Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpunbIFT//pdisk0.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 1040187392 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 31 } 2025-06-30T09:19:52.237968Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 0 Path# "/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpunbIFT//pdisk0.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:19:52.238104Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [3e000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:19:52.238355Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [3e000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 2025-06-30T09:19:52.238369Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [3e000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:19:52.238488Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [3e000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 2025-06-30T09:19:52.238495Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [3e000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:19:52.238577Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [3e000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 2025-06-30T09:19:52.238583Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [3e000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:19:52.238653Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [3e000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 2025-06-30T09:19:52.238659Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 1040187392 2025-06-30T09:19:52.238834Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 31 PipeClientId# [1:41:2077] ControllerId# 72057594037932033 2025-06-30T09:19:52.238839Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:19:52.238863Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:19:52.238878Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:19:52.242793Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:19:52.242829Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:19:52.243357Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "pdisk0.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 1040187392 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 31 } 2025-06-30T09:19:52.243392Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 1040187392 2025-06-30T09:19:52.243525Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 31 PipeClientId# [2:85:2062] ControllerId# 72057594037932033 2025-06-30T09:19:52.243532Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:19:52.243547Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:19:52.243561Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:19:52.243726Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:19:52.243731Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:19:52.244481Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:19:52.244660Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:19:52.244722Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:19:52.352929Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:19:52.352953Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-30T09:19:52.353666Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-30T09:19:52.353737Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:19:52.353743Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:19:52.354361Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-30T09:19:52.354444Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:19:52.354494Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:19:52.354499Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-30T09:19:52.354513Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-30T09:19:52.354570Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-30T09:19:52.354919Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-30T09:19:52.354957Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:19:52.355120Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-30T09:19:52.355127Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:19:52.355174Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\317\2131#\235h\t\200\203\206R\026\205f8E\"\254\352Z" } 2025-06-30T09:19:52.355198Z node 1 :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 2 SessionId# [0:0:0] Inserted# true Subscription# {SessionId# [0:0:0] SubscriptionCookie# 0} NextSubscribeCookie# 1 2025-06-30T09:19:52.355209Z node 1 :BS_NODE DEBUG: {NWDC29@distconf_binding.cpp:80} Initiated bind NodeId# 2 Binding# {2.0/7258056780327862161@[0:0:0]} SessionId# [0:0:0] 2025-06-30T09:19:52.355292Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "pdisk0.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 1040187392 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 1040187392 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 31 } 2025-06-30T09:19:52.355337Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-30T09:19:52.355367Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-30T09:19:52.355453Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-30T09:19:52.355464Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:19:52.356808Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 3 2025-06-30T09:19:52.356819Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo ... 61\316\303\335\321\377\0010\0018\000B\000J\000P\200\200\200\360\013X\000j\000p\000\220\001\000" } } } 2025-06-30T09:19:56.086435Z node 2 :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 1 SessionId# [2:123:2048] Inserted# false Subscription# {SessionId# [2:123:2048] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-30T09:19:56.086544Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 3187671040 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1000 VSlotId: 1000 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:19:56.086731Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [2:87:2064] SessionId# [1:137:2048] Cookie# 7258056780327862161 2025-06-30T09:19:56.086761Z node 1 :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 2 SessionId# [1:137:2048] Inserted# false Subscription# {SessionId# [1:137:2048] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-30T09:19:56.086788Z node 1 :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 2 Cookie# 7258056780327862161 SessionId# [1:137:2048] Binding# {2.2/7258056780327862161@[1:137:2048]} Record# {RootNodeId: 2 CacheUpdate { KeyValuePairs { Key: "Gbe000000" Generation: 1 Value: "\010\200\200\200\360\013\020\001\030\000\"\027\n\025\n\023\010\001\020\350\007\030\350\007 \307\300\255\361\316\303\335\321\377\0010\0018\000B\000J\000P\200\200\200\360\013X\000j\000p\000\220\001\000" } } } 2025-06-30T09:19:56.093421Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1000 VSlotId: 1000 } } 2025-06-30T09:19:56.162424Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:56.162603Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 3187671040 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1000 VSlotId: 1000 PDiskGuid: 18420696767502442567 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:19:56.180441Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-30T09:19:56.180596Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 3187671040 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1000 VSlotId: 1000 PDiskGuid: 18420696767502442567 Status: READY OnlyPhantomsRemain: false } } Sending TEvPut 2025-06-30T09:19:56.180789Z node 1 :BS_NODE DEBUG: {NW46@node_warden_proxy.cpp:134} HandleForwarded GroupId# 3187671040 EnableProxyMock# false NoGroup# false 2025-06-30T09:19:56.180798Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 3187671040 2025-06-30T09:19:56.181052Z node 1 :BS_NODE DEBUG: {NW68@node_warden_group.cpp:85} ConfigureLocalProxy propose GroupId# 3187671040 MainKey# {Id# '/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpBsvAr2//key.txt' Version# 1} 2025-06-30T09:19:56.181142Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:19:56.181257Z node 1 :BS_CONTROLLER DEBUG: {BSCTXPGK11@propose_group_key.cpp:119} Handle TEvControllerProposeGroupKey Request# {NodeId: 1 GroupId: 3187671040 LifeCyclePhase: 1 MainKeyId: "/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpBsvAr2//key.txt" EncryptedGroupKey: "\344>H\251$2\340\260k#\210\260R\346K\361\254\346\000\336\245>{=\244\272\026\326]!\300\032\311\253K}" MainKeyVersion: 1 GroupKeyNonce: 3187671040 } 2025-06-30T09:19:56.181269Z node 1 :BS_CONTROLLER DEBUG: {BSCTXPGK07@propose_group_key.cpp:82} TTxProposeGroupKey Execute 2025-06-30T09:19:56.239357Z node 1 :BS_CONTROLLER DEBUG: {BSCTXPGK08@propose_group_key.cpp:96} TTxProposeGroupKey Complete 2025-06-30T09:19:56.239481Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG02@get_group.cpp:58} TEvControllerGetGroup Sender# [1:403:2351] Cookie# 0 Recipient# [1:403:2351] RecipientRewrite# [1:403:2351] Request# {NodeID: 1 GroupIDs: 3187671040 } StopGivingGroups# false 2025-06-30T09:19:56.239501Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG01@get_group.cpp:22} Handle TEvControllerGetGroup Request# {NodeID: 1 GroupIDs: 3187671040 } 2025-06-30T09:19:56.239582Z node 1 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:807} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 1 ServiceSet { Groups { GroupID: 3187671040 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 18420696767502442567 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpBsvAr2//key.txt" EncryptedGroupKey: "\344>H\251$2\340\260k#\210\260R\346K\361\254\346\000\336\245>{=\244\272\026\326]!\300\032\311\253K}" GroupKeyNonce: 3187671040 MainKeyVersion: 1 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } } 2025-06-30T09:19:56.239602Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {Groups { GroupID: 3187671040 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 18420696767502442567 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpBsvAr2//key.txt" EncryptedGroupKey: "\344>H\251$2\340\260k#\210\260R\346K\361\254\346\000\336\245>{=\244\272\026\326]!\300\032\311\253K}" GroupKeyNonce: 3187671040 MainKeyVersion: 1 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } Sending TEvGet Sending TEvVGet Sending TEvPut 2025-06-30T09:19:56.286307Z node 2 :BS_NODE DEBUG: {NW46@node_warden_proxy.cpp:134} HandleForwarded GroupId# 3187671040 EnableProxyMock# false NoGroup# false 2025-06-30T09:19:56.286334Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 3187671040 2025-06-30T09:19:56.286343Z node 2 :BS_NODE DEBUG: {NW98@node_warden_group.cpp:266} RequestGroupConfig GroupId# 3187671040 2025-06-30T09:19:56.286390Z node 2 :BS_NODE INFO: {NW79@node_warden_group_resolver.cpp:74} TGroupResolverActor::Bootstrap GroupId# 3187671040 2025-06-30T09:19:56.286464Z node 2 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [2:22:2050] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:19:56.286636Z node 2 :BS_NODE DEBUG: {NW68@node_warden_group.cpp:85} ConfigureLocalProxy propose GroupId# 3187671040 MainKey# {Id# '/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpBsvAr2//key.txt' Version# 1} 2025-06-30T09:19:56.290053Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG02@get_group.cpp:58} TEvControllerGetGroup Sender# [2:22:2050] Cookie# 0 Recipient# [1:442:2381] RecipientRewrite# [1:403:2351] Request# {NodeID: 2 GroupIDs: 3187671040 } StopGivingGroups# false 2025-06-30T09:19:56.290131Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG01@get_group.cpp:22} Handle TEvControllerGetGroup Request# {NodeID: 2 GroupIDs: 3187671040 } 2025-06-30T09:19:56.290442Z node 2 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:807} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 2 ServiceSet { Groups { GroupID: 3187671040 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 18420696767502442567 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpBsvAr2//key.txt" EncryptedGroupKey: "\344>H\251$2\340\260k#\210\260R\346K\361\254\346\000\336\245>{=\244\272\026\326]!\300\032\311\253K}" GroupKeyNonce: 3187671040 MainKeyVersion: 1 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } } 2025-06-30T09:19:56.290479Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {Groups { GroupID: 3187671040 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 18420696767502442567 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpBsvAr2//key.txt" EncryptedGroupKey: "\344>H\251$2\340\260k#\210\260R\346K\361\254\346\000\336\245>{=\244\272\026\326]!\300\032\311\253K}" GroupKeyNonce: 3187671040 MainKeyVersion: 1 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } 2025-06-30T09:19:56.290566Z node 2 :BS_NODE INFO: {NW81@node_warden_group_resolver.cpp:270} TGroupResolverActor::PassAway GroupId# 3187671040 2025-06-30T09:19:56.291314Z node 1 :BS_CONTROLLER DEBUG: {BSCTXPGK11@propose_group_key.cpp:119} Handle TEvControllerProposeGroupKey Request# {NodeId: 2 GroupId: 3187671040 LifeCyclePhase: 1 MainKeyId: "/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpBsvAr2//key.txt" EncryptedGroupKey: "\230\034\0344T10\357$f$|\371\205\032\214 \372\302\2032\341\262\222\334\003\242E\326h\215\321\310\316\312\310" MainKeyVersion: 1 GroupKeyNonce: 3187671040 } 2025-06-30T09:19:56.291347Z node 1 :BS_CONTROLLER DEBUG: {BSCTXPGK07@propose_group_key.cpp:82} TTxProposeGroupKey Execute 2025-06-30T09:19:56.291357Z node 1 :BS_CONTROLLER ERROR: {BSCTXPGK04@propose_group_key.cpp:47} Group LifeCyclePhase does not match ELCP_INITIAL GroupId.GetRawId()# 3187671040 LifeCyclePhase# 3 2025-06-30T09:19:56.291375Z node 1 :BS_CONTROLLER DEBUG: {BSCTXPGK08@propose_group_key.cpp:96} TTxProposeGroupKey Complete 2025-06-30T09:19:56.291403Z node 1 :BS_CONTROLLER ERROR: {BSCTXPGK10@propose_group_key.cpp:108} TTxProposeGroupKey error GroupId# 3187671040 Status# ERROR Request# {NodeId: 2 GroupId: 3187671040 LifeCyclePhase: 1 MainKeyId: "/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpBsvAr2//key.txt" EncryptedGroupKey: "\230\034\0344T10\357$f$|\371\205\032\214 \372\302\2032\341\262\222\334\003\242E\326h\215\321\310\316\312\310" MainKeyVersion: 1 GroupKeyNonce: 3187671040 } 2025-06-30T09:19:56.291489Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG02@get_group.cpp:58} TEvControllerGetGroup Sender# [1:403:2351] Cookie# 0 Recipient# [1:403:2351] RecipientRewrite# [1:403:2351] Request# {NodeID: 2 GroupIDs: 3187671040 } StopGivingGroups# false 2025-06-30T09:19:56.291500Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG01@get_group.cpp:22} Handle TEvControllerGetGroup Request# {NodeID: 2 GroupIDs: 3187671040 } 2025-06-30T09:19:56.291714Z node 2 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:807} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 2 ServiceSet { Groups { GroupID: 3187671040 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 18420696767502442567 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpBsvAr2//key.txt" EncryptedGroupKey: "\344>H\251$2\340\260k#\210\260R\346K\361\254\346\000\336\245>{=\244\272\026\326]!\300\032\311\253K}" GroupKeyNonce: 3187671040 MainKeyVersion: 1 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } } 2025-06-30T09:19:56.291740Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {Groups { GroupID: 3187671040 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 18420696767502442567 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/cl3w/00370e/r3tmp/tmpBsvAr2//key.txt" EncryptedGroupKey: "\344>H\251$2\340\260k#\210\260R\346K\361\254\346\000\336\245>{=\244\272\026\326]!\300\032\311\253K}" GroupKeyNonce: 3187671040 MainKeyVersion: 1 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } Sending TEvGet |80.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> KqpPrefixedVectorIndexes::OrderByCosineLevel2+Nullable-UseSimilarity [GOOD] >> DataShardReadIterator::ShouldNotReadFutureMvccFromFollower [GOOD] >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc+UseSink >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity |80.2%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/test-results/unittest/{meta.json ... results_accumulator.log} |80.2%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |80.2%| [LD] {RESULT} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDrop >> DataShardReadIteratorPageFaults::CancelPageFaultedReadThenDropTable [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanWithRetry >> DataShardReadIteratorPageFaults::LocksNotLostOnPageFault >> DataShardReadIterator::ShouldReadFromHeadWithConflict-UseSink [GOOD] >> DataShardReadIterator::ShouldReceiveErrorAfterSplit [GOOD] >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict+UseSink >> DataShardReadIterator::ShouldReceiveErrorAfterSplitWhenExhausted |80.2%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_volatile/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity [GOOD] |80.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |80.2%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |80.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber >> TopicAutoscaling::ReBalancingAfterSplit_sessionsWithPartition [GOOD] >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |80.2%| [LD] {RESULT} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber >> TxUsage::WriteToTopic_Demo_21_RestartNo_Query [GOOD] >> JsonProtoConversion::ProtoMapToJson [GOOD] |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> DataShardReadIterator::ShouldReadRangeChunk5 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk7 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity [GOOD] Test command err: Trying to start YDB, gRPC: 26462, MsgBus: 19486 2025-06-30T09:19:48.414211Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669661908976996:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:48.414282Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002ba8/r3tmp/tmpyFkdG8/pdisk_1.dat 2025-06-30T09:19:48.467350Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:48.467446Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669661908976974:2080] 1751275188414008 != 1751275188414011 TServer::EnableGrpc on GrpcPort 26462, node 1 2025-06-30T09:19:48.481231Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:48.481245Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:48.481248Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:48.481301Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19486 TClient is connected to server localhost:19486 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:19:48.551230Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:48.551260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:48.552346Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:48.554131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:48.567124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:48.632796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:48.658383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:48.672627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:48.800577Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669661908978579:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:48.800603Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:48.844600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.854204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.867935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.924917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.938890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.952104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.960571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.975227Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669661908979233:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:48.975272Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669661908979238:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:48.975272Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:48.976120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:48.978486Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669661908979240:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:19:49.048111Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669666203946587:3403] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:49.194244Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521669666203946857:3576], Recipient [1:7521669661908977322:2161]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:49.194270Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:49.194273Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:19:49.194280Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521669666203946853:3573], Recipient [1:7521669661908977322:2161]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-30T09:19:49.194281Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:19:49.204974Z node 1 ... ldInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: emb, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7521669687224079289:2492], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1751275194368, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710762, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 12 UploadBytes: 198 ReadRows: 40 ReadBytes: 440, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0}, txId# 281474976710762 2025-06-30T09:19:54.361223Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:19:54.361295Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:54.361312Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Unlocking 2025-06-30T09:19:54.361325Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Unlocking TBuildInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: emb, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7521669687224079289:2492], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1751275194368, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710762, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 12 UploadBytes: 198 ReadRows: 40 ReadBytes: 440, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:19:54.361332Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:19:54.361336Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-30T09:19:54.361369Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:54.361381Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Done 2025-06-30T09:19:54.361390Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Done TBuildInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: emb, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7521669687224079289:2492], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1751275194368, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710762, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 12 UploadBytes: 198 ReadRows: 40 ReadBytes: 440, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:19:54.361396Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:336: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 281474976715674, subscribers count# 1 2025-06-30T09:19:54.361398Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:19:54.361404Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:54.361409Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [2:7521669687224079289:2492] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715674 at schemeshard: 72057594046644480 2025-06-30T09:19:54.361499Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 274792450, Sender [2:7521669687224079289:2492], Recipient [2:7521669682929109554:2147]: NKikimrIndexBuilder.TEvGetRequest DatabaseName: "/Root" IndexBuildId: 281474976715674 2025-06-30T09:19:54.361507Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5095: StateWork, processing event TEvIndexBuilder::TEvGetRequest 2025-06-30T09:19:54.361527Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/Root" IndexBuildId: 281474976715674 2025-06-30T09:19:54.361627Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:103: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 281474976715674 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "index" index_columns: "emb" global_vector_kmeans_tree_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1751275194 } EndTime { seconds: 1751275194 } UserSID: "" } 2025-06-30T09:19:54.361630Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:19:54.361646Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:54.361665Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [2:7521669687224079289:2492] msg type: 274792451 msg: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 281474976715674 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "index" index_columns: "emb" global_vector_kmeans_tree_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1751275194 } EndTime { seconds: 1751275194 } UserSID: "" } at schemeshard: 72057594046644480 2025-06-30T09:19:54.361725Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [2:7521669687224079292:3695], Recipient [2:7521669682929109554:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:54.361728Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:54.361730Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:19:54.366266Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [2:7521669687224079655:3978], Recipient [2:7521669682929109554:2147]: NKikimrSchemeOp.TDescribePath Path: "/Root/TestTable" Options { ShowPrivateTable: false } 2025-06-30T09:19:54.366283Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:19:54.512765Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:54.526107Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7521669682929109554:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:54.526129Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:54.526140Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:7521669682929109554:2147], Recipient [2:7521669682929109554:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:19:54.526144Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:19:55.526535Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7521669682929109554:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:55.526562Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:55.526574Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:7521669682929109554:2147], Recipient [2:7521669682929109554:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:19:55.526578Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:19:56.530700Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7521669682929109554:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:56.530717Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:56.530729Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:7521669682929109554:2147], Recipient [2:7521669682929109554:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:19:56.530732Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:19:57.531043Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7521669682929109554:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:57.531074Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:57.531085Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:7521669682929109554:2147], Recipient [2:7521669682929109554:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:19:57.531088Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeLeftBorder-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder+EvWrite >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Table |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::ProtoMapToJson [GOOD] |80.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |80.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol >> TxUsage::WriteToTopic_Demo_23_RestartNo_Table [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartNo_Query >> CommitOffset::Commit_WithSession_ParentNotFinished_SameSession [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession_ParentCommittedToEnd >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips+EvWrite >> DataShardReadIteratorPageFaults::LocksNotLostOnPageFault [GOOD] >> DataShardReadIteratorState::ShouldCalculateQuota [GOOD] |80.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |80.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |80.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc+UseSink [GOOD] >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc-UseSink >> DataShardReadIterator::ShouldCommitLocksWhenReadWriteInSeparateTransactions [GOOD] >> DataShardReadIterator::HandlePersistentSnapshotGoneInContinue [GOOD] >> DataShardReadIterator::HandleMvccGoneInContinue [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCreateClean |80.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Table >> DataShardReadIterator::ShouldReceiveErrorAfterSplitWhenExhausted [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10000Inflight1000BlobSize1000 [GOOD] >> Describe::DescribePartitionPermissions [GOOD] >> DirectReadWithServer::KillPQTablet ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIteratorState::ShouldCalculateQuota [GOOD] Test command err: 2025-06-30T09:19:35.629003Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:35.629102Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:35.629123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042e9/r3tmp/tmpkR9NXT/pdisk_1.dat 2025-06-30T09:19:35.735438Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:19:35.736415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:35.753286Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:35.754124Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275175229773 != 1751275175229777 2025-06-30T09:19:35.796381Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:35.796427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:35.807099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:35.880386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:35.897748Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:19:35.897975Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:19:35.898049Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:19:35.898098Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:19:35.905820Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:19:35.906028Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:19:35.906052Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:19:35.906209Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:19:35.906219Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:19:35.906224Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:19:35.906277Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:19:35.906293Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:19:35.906305Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:19:35.906363Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:19:35.910646Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:19:35.910715Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:19:35.910759Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:19:35.910766Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:19:35.910772Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:19:35.910779Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:35.910836Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:35.910843Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:35.910913Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:19:35.910928Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:19:35.910937Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:19:35.910946Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:35.910952Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:19:35.910957Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:19:35.910960Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:19:35.910965Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:19:35.910969Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:19:35.910984Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:35.910988Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:35.910994Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:19:35.911069Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:19:35.911074Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:19:35.911091Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:19:35.911133Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:19:35.911142Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:19:35.911156Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:19:35.911163Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:19:35.911166Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:19:35.911171Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:19:35.911174Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:35.911220Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:19:35.911224Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:19:35.911227Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:19:35.911230Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:35.911238Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:19:35.911240Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:19:35.911244Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:19:35.911247Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:19:35.911251Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:19:35.911419Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:19:35.911426Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:35.911429Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:35.911435Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-30T09:19:35.911445Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:19:35.911798Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:19:35.911809Z ... 5] at 72075186224037888 aborting because locks are not valid 2025-06-30T09:19:59.602056Z node 14 :GLOBAL WARN: log.cpp:784: fline=events.h:105;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=5; 2025-06-30T09:19:59.602065Z node 14 :TX_DATASHARD INFO: datashard_write_operation.cpp:720: Write transaction 5 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-06-30T09:19:59.602073Z node 14 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715663 DataShard: 72075186224037888 Generation: 2 Counter: 5 SchemeShard: 72057594046644480 PathId: 2 2025-06-30T09:19:59.602086Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-30T09:19:59.602089Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteWrite 2025-06-30T09:19:59.602091Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-30T09:19:59.602094Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-06-30T09:19:59.602111Z node 14 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-30T09:19:59.602118Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is DelayComplete 2025-06-30T09:19:59.602121Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-30T09:19:59.602123Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:19:59.602125Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:19:59.602136Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-30T09:19:59.602140Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:19:59.602145Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-06-30T09:19:59.602165Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-30T09:19:59.602170Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-06-30T09:19:59.602175Z node 14 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 5 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_LOCKS_BROKEN 2025-06-30T09:19:59.602186Z node 14 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-30T09:19:59.602200Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:59.602247Z node 14 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [14:963:2713], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [14:909:2713]Got LOCKS BROKEN for table `/Root/table-1`. ShardID=72075186224037888, Sink=[14:963:2713].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-30T09:19:59.602261Z node 14 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [14:956:2713], SessionActorId: [14:909:2713], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[14:909:2713]. isRollback=0 2025-06-30T09:19:59.602312Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:1938: SessionId: ydb://session/3?node_id=14&id=NjRhYTE0NjQtZjQzMGU4OGEtZWY2MjFiMjUtZGRmNTc1MWE=, ActorId: [14:909:2713], ActorState: ExecuteState, TraceId: 01jz025q37bh1wrmb7ye3v8p36, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [14:957:2713] from: [14:956:2713] 2025-06-30T09:19:59.602331Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [14:956:2713], Recipient [14:833:2661]: NKikimrDataEvents.TEvWrite TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715663 DataShard: 72075186224037888 Generation: 2 Counter: 5 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-30T09:19:59.602334Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-30T09:19:59.602354Z node 14 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [14:957:2713] TxId: 281474976715665. Ctx: { TraceId: 01jz025q37bh1wrmb7ye3v8p36, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=NjRhYTE0NjQtZjQzMGU4OGEtZWY2MjFiMjUtZGRmNTc1MWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-30T09:19:59.602403Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [14:833:2661], Recipient [14:833:2661]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-30T09:19:59.602410Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-30T09:19:59.602418Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-30T09:19:59.602432Z node 14 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715663 DataShard: 72075186224037888 Generation: 2 Counter: 5 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-30T09:19:59.602441Z node 14 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715663, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-30T09:19:59.602450Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2025-06-30T09:19:59.602456Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-30T09:19:59.602460Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2025-06-30T09:19:59.602464Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:19:59.602469Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:19:59.602475Z node 14 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v3500/18446744073709551615 ImmediateWriteEdge# v3501/0 ImmediateWriteEdgeReplied# v3501/0 2025-06-30T09:19:59.602482Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-30T09:19:59.602486Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-30T09:19:59.602489Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:19:59.602491Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2025-06-30T09:19:59.602494Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2025-06-30T09:19:59.602497Z node 14 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:6] at 72075186224037888 2025-06-30T09:19:59.602504Z node 14 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715663 DataShard: 72075186224037888 Generation: 2 Counter: 5 SchemeShard: 72057594046644480 PathId: 2 2025-06-30T09:19:59.602507Z node 14 :TX_DATASHARD DEBUG: execute_write_unit.cpp:421: Skip empty write operation for [0:6] at 72075186224037888 2025-06-30T09:19:59.602512Z node 14 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-30T09:19:59.602520Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:19:59.602523Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2025-06-30T09:19:59.602526Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-30T09:19:59.602528Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-30T09:19:59.602532Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is DelayComplete 2025-06-30T09:19:59.602535Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-30T09:19:59.602537Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:19:59.602539Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:19:59.602543Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-30T09:19:59.602545Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:19:59.602548Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-30T09:19:59.602553Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-30T09:19:59.602555Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-30T09:19:59.602559Z node 14 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-30T09:19:59.602564Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:59.602583Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=14&id=NjRhYTE0NjQtZjQzMGU4OGEtZWY2MjFiMjUtZGRmNTc1MWE=, ActorId: [14:909:2713], ActorState: ExecuteState, TraceId: 01jz025q37bh1wrmb7ye3v8p36, Create QueryResponse for error on request, msg: ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::HandleMvccGoneInContinue [GOOD] Test command err: 2025-06-30T09:19:33.989239Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:33.989366Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:33.989391Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042ff/r3tmp/tmpiuf9Aq/pdisk_1.dat 2025-06-30T09:19:34.106962Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:19:34.108034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:34.126580Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:34.127998Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275173401132 != 1751275173401136 2025-06-30T09:19:34.172269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:34.172321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:34.183063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:34.257238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:34.278580Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:19:34.278938Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:19:34.279060Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:19:34.279130Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:19:34.288724Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:19:34.289002Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:19:34.289033Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:19:34.289257Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:19:34.289267Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:19:34.289274Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:19:34.289344Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:19:34.289364Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:19:34.289381Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:19:34.299817Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:19:34.306679Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:19:34.306877Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:19:34.306913Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:19:34.306919Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:19:34.306924Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:19:34.306931Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:34.307033Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:34.307043Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:34.307170Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:19:34.307198Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:19:34.307212Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:19:34.307221Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:34.307230Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:19:34.307237Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:19:34.307242Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:19:34.307249Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:19:34.307255Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:19:34.307280Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:34.307287Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:34.307295Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:19:34.307409Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:19:34.307416Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:19:34.307440Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:19:34.307493Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:19:34.307507Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:19:34.307529Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:19:34.307540Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:19:34.307546Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:19:34.307553Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:19:34.307558Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:34.307617Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:19:34.307623Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:19:34.307628Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:19:34.307632Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:34.307644Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:19:34.307648Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:19:34.307653Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:19:34.307657Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:19:34.307664Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:19:34.308006Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:19:34.308017Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:19:34.318572Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:19:34.318612Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:34.318624Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:34.318641Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... cution unit LoadTxDetails 2025-06-30T09:19:59.574628Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit LoadTxDetails 2025-06-30T09:19:59.574653Z node 14 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3500:281474976715665 keys extracted: 0 2025-06-30T09:19:59.574659Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is Executed 2025-06-30T09:19:59.574663Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit LoadTxDetails 2025-06-30T09:19:59.574667Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-30T09:19:59.574671Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-30T09:19:59.574677Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:281474976715665] is the new logically complete end at 72075186224037889 2025-06-30T09:19:59.574682Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:281474976715665] is the new logically incomplete end at 72075186224037889 2025-06-30T09:19:59.574685Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:281474976715665] at 72075186224037889 2025-06-30T09:19:59.574691Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is Executed 2025-06-30T09:19:59.574694Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-30T09:19:59.574698Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit CreateVolatileSnapshot 2025-06-30T09:19:59.574703Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit CreateVolatileSnapshot 2025-06-30T09:19:59.574722Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is ExecutedNoMoreRestarts 2025-06-30T09:19:59.574725Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit CreateVolatileSnapshot 2025-06-30T09:19:59.574729Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit DropVolatileSnapshot 2025-06-30T09:19:59.574733Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit DropVolatileSnapshot 2025-06-30T09:19:59.574757Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is Executed 2025-06-30T09:19:59.574760Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit DropVolatileSnapshot 2025-06-30T09:19:59.574764Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit CompleteOperation 2025-06-30T09:19:59.574768Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit CompleteOperation 2025-06-30T09:19:59.574803Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is DelayComplete 2025-06-30T09:19:59.574806Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit CompleteOperation 2025-06-30T09:19:59.574810Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit CompletedOperations 2025-06-30T09:19:59.574814Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit CompletedOperations 2025-06-30T09:19:59.574820Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is Executed 2025-06-30T09:19:59.574824Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit CompletedOperations 2025-06-30T09:19:59.574828Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715665] at 72075186224037889 has finished 2025-06-30T09:19:59.574832Z node 14 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:59.574836Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-30T09:19:59.574840Z node 14 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-30T09:19:59.574843Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-30T09:19:59.585312Z node 14 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-30T09:19:59.585356Z node 14 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:19:59.585368Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715665] at 72075186224037888 on unit CompleteOperation 2025-06-30T09:19:59.585391Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715665] from 72075186224037888 at tablet 72075186224037888 send result to client [14:1022:2808], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:19:59.585404Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:59.585437Z node 14 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-06-30T09:19:59.585448Z node 14 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:19:59.585454Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715665] at 72075186224037889 on unit CompleteOperation 2025-06-30T09:19:59.585463Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715665] from 72075186224037889 at tablet 72075186224037889 send result to client [14:1022:2808], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:19:59.585468Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:19:59.585940Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [14:556:2482], Recipient [14:628:2533]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715665 } ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-30T09:19:59.585976Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-30T09:19:59.585991Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CheckRead 2025-06-30T09:19:59.586011Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-30T09:19:59.586017Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:19:59.586023Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:19:59.586028Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:19:59.586039Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:8] at 72075186224037888 2025-06-30T09:19:59.586046Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-30T09:19:59.586050Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:19:59.586055Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:19:59.586059Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:19:59.586075Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715665 } ResultFormat: FORMAT_ARROW } 2025-06-30T09:19:59.586153Z node 14 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/281474976715665 2025-06-30T09:19:59.586163Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[14:556:2482], 1} after executionsCount# 1 2025-06-30T09:19:59.586172Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[14:556:2482], 1} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:19:59.586202Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[14:556:2482], 1} finished in read 2025-06-30T09:19:59.586213Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-30T09:19:59.586217Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:19:59.586221Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:19:59.586226Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:19:59.586239Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-30T09:19:59.586243Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:19:59.586247Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 72075186224037888 has finished 2025-06-30T09:19:59.586252Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-30T09:19:59.586271Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 >> JsonProtoConversion::NlohmannJsonToProtoArray [GOOD] |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict+UseSink [GOOD] >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict-UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReceiveErrorAfterSplitWhenExhausted [GOOD] Test command err: 2025-06-30T09:19:33.953456Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:33.953561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:33.953583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042f9/r3tmp/tmpRm0bcm/pdisk_1.dat 2025-06-30T09:19:34.054436Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:19:34.055179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:34.073647Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:34.074994Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275173529350 != 1751275173529354 2025-06-30T09:19:34.117089Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:34.117140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:34.127763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:34.201167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:34.219188Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:19:34.219634Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:19:34.219824Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:19:34.219980Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:19:34.231060Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:19:34.231351Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:19:34.231393Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:19:34.231611Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:19:34.231624Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:19:34.231632Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:19:34.231718Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:19:34.231755Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:19:34.231773Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:19:34.248520Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:19:34.253027Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:19:34.253124Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:19:34.253156Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:19:34.253161Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:19:34.253165Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:19:34.253170Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:34.253243Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:34.253250Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:34.253354Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:19:34.253378Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:19:34.253388Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:19:34.253397Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:34.253404Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:19:34.253409Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:19:34.253413Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:19:34.253418Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:19:34.253422Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:19:34.253440Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:34.253444Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:34.253450Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:19:34.253542Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:19:34.253547Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:19:34.253567Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:19:34.253616Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:19:34.253626Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:19:34.253642Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:19:34.253649Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:19:34.253653Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:19:34.253657Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:19:34.253660Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:34.253702Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:19:34.253705Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:19:34.253708Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:19:34.253710Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:34.253718Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:19:34.253721Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:19:34.253725Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:19:34.253728Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:19:34.253732Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:19:34.254005Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:19:34.254014Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:19:34.264341Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:19:34.264380Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:34.264390Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:34.264403Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... .cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [15:1109:2866] 2025-06-30T09:19:59.909949Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-06-30T09:19:59.909955Z node 15 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037891 2025-06-30T09:19:59.909961Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-30T09:19:59.910012Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553157, Sender [15:1036:2814], Recipient [15:629:2533]: NKikimrTxDataShard.TEvSplitTransferSnapshotAck TabletId: 72075186224037891 OperationCookie: 281474976715665 2025-06-30T09:19:59.910023Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037888 Received snapshot Ack from dst 72075186224037891 for split OpId 281474976715665 2025-06-30T09:19:59.910083Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [15:1036:2814], Recipient [15:1036:2814]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:59.910089Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:59.910157Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [15:1104:2861], Recipient [15:629:2533]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037891 ClientId: [15:1104:2861] ServerId: [15:1105:2862] } 2025-06-30T09:19:59.910162Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-30T09:19:59.910233Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [15:25:2072], Recipient [15:1036:2814]: {TEvRegisterTabletResult TabletId# 72075186224037891 Entry# 3000} 2025-06-30T09:19:59.910238Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-30T09:19:59.910243Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037891 time 3000 2025-06-30T09:19:59.910248Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-30T09:19:59.910295Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2025-06-30T09:19:59.910306Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:59.910314Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037891 2025-06-30T09:19:59.910320Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037891 has no attached operations 2025-06-30T09:19:59.910325Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037891 2025-06-30T09:19:59.910330Z node 15 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-30T09:19:59.910337Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-06-30T09:19:59.910350Z node 15 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037892 ack snapshot OpId 281474976715665 2025-06-30T09:19:59.910365Z node 15 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037892 2025-06-30T09:19:59.910380Z node 15 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037892 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:19:59.910390Z node 15 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-30T09:19:59.910398Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037892, actorId: [15:1111:2868] 2025-06-30T09:19:59.910402Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037892 2025-06-30T09:19:59.910407Z node 15 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037892 2025-06-30T09:19:59.910410Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-30T09:19:59.910452Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553157, Sender [15:1038:2816], Recipient [15:629:2533]: NKikimrTxDataShard.TEvSplitTransferSnapshotAck TabletId: 72075186224037892 OperationCookie: 281474976715665 2025-06-30T09:19:59.910459Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037888 Received snapshot Ack from dst 72075186224037892 for split OpId 281474976715665 2025-06-30T09:19:59.910515Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [15:1103:2860], Recipient [15:629:2533]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037892 ClientId: [15:1103:2860] ServerId: [15:1106:2863] } 2025-06-30T09:19:59.910521Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-30T09:19:59.910535Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [15:1105:2862], Recipient [15:1036:2814]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:59.910539Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:59.910546Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [15:1104:2861], serverId# [15:1105:2862], sessionId# [0:0:0] 2025-06-30T09:19:59.910555Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [15:1038:2816], Recipient [15:1038:2816]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:59.910559Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:59.910632Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [15:25:2072], Recipient [15:1038:2816]: {TEvRegisterTabletResult TabletId# 72075186224037892 Entry# 3000} 2025-06-30T09:19:59.910640Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-30T09:19:59.910644Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 3000 2025-06-30T09:19:59.910648Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-30T09:19:59.910686Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [15:1106:2863], Recipient [15:1038:2816]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:59.910691Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:59.910696Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [15:1103:2860], serverId# [15:1106:2863], sessionId# [0:0:0] 2025-06-30T09:19:59.910719Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [15:25:2072], Recipient [15:1036:2814]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 3000 ReadStep# 3000 } 2025-06-30T09:19:59.910724Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-30T09:19:59.910730Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037891 coordinator 72057594046316545 last step 0 next step 3000 2025-06-30T09:19:59.911047Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037891: waitStep# 3000 readStep# 3000 observedStep# 3000 2025-06-30T09:19:59.911067Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037891 promoting UnprotectedReadEdge to v3000/18446744073709551615 2025-06-30T09:19:59.911088Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-30T09:19:59.911095Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:59.911100Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037892 2025-06-30T09:19:59.911104Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037892 has no attached operations 2025-06-30T09:19:59.911108Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037892 2025-06-30T09:19:59.911113Z node 15 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2025-06-30T09:19:59.911120Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-30T09:19:59.911219Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [15:25:2072], Recipient [15:1038:2816]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 3000 ReadStep# 3000 } 2025-06-30T09:19:59.911226Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-30T09:19:59.911230Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 0 next step 3000 2025-06-30T09:19:59.911235Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037892: waitStep# 3000 readStep# 3000 observedStep# 3000 2025-06-30T09:19:59.911243Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037892 promoting UnprotectedReadEdge to v3000/18446744073709551615 2025-06-30T09:19:59.932034Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037888 ack split to schemeshard 281474976715665 2025-06-30T09:19:59.932960Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553158, Sender [15:374:2368], Recipient [15:636:2537] 2025-06-30T09:19:59.932987Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715665, at datashard: 72075186224037888, state: SplitSrcWaitForPartitioningChanged 2025-06-30T09:19:59.933494Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037888 ack split partitioning changed to schemeshard 281474976715665 2025-06-30T09:19:59.933512Z node 15 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037888 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-30T09:19:59.933660Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [15:620:2527], Recipient [15:629:2533]: NKikimr::TEvTablet::TEvFollowerGcApplied >> DataShardReadIterator::ShouldReadRangeChunk7 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk100 |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::NlohmannJsonToProtoArray [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_3_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_3_Table [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10000Inflight1000BlobSize1000 [GOOD] Test command err: RandomSeed# 8178585246498120632 2025-06-30T09:19:05.177728Z 6 00h00m30.010000s :BS_PROXY_GET ERROR: [97e204e7ea2c674c] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:1:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:05.177788Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [dfc4ed583609cd48] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:2:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:1:0]"} Marker# BPG29 2025-06-30T09:19:05.177821Z 6 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:1:1000:0] PatchedBlobId# [1:1:2:10:1:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:05.177854Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:2:1000:0] PatchedBlobId# [1:1:2:10:4098:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-30T09:19:07.161187Z 3 00h00m30.010000s :BS_PROXY_GET ERROR: [d03776f79930fc96] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:3:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:2:0]"} Marker# BPG29 2025-06-30T09:19:07.161243Z 3 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:3:1000:0] PatchedBlobId# [1:1:2:10:147459:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:2:0] Marker# BSVSP01 2025-06-30T09:19:09.173389Z 9 00h00m30.010000s :BS_PROXY_GET ERROR: [9dfcf5b1cb598a50] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:4:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:2:0]"} Marker# BPG29 2025-06-30T09:19:09.173449Z 9 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:4:1000:0] PatchedBlobId# [1:1:2:10:24580:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:2:0] Marker# BSVSP01 2025-06-30T09:19:09.186219Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [1d846c39cc8e6436] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:2:10:24580:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:0:0]"} Marker# BPG29 2025-06-30T09:19:09.186280Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:2:10:24580:1000:0] PatchedBlobId# [1:1:3:10:4100:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:0:0] Marker# BSVSP01 2025-06-30T09:19:09.192137Z 6 00h00m30.010000s :BS_PROXY_GET ERROR: [47fbc47712795c16] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:3:10:4100:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:2:0]"} Marker# BPG29 2025-06-30T09:19:09.192197Z 6 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:3:10:4100:1000:0] PatchedBlobId# [1:1:4:10:151556:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:2:0] Marker# BSVSP01 2025-06-30T09:19:09.193285Z 9 00h00m30.010000s :BS_PROXY_GET ERROR: [0667dc0d6d96084a] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:4:10:151556:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:2:0]"} Marker# BPG29 2025-06-30T09:19:09.193319Z 9 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:4:10:151556:1000:0] PatchedBlobId# [1:1:5:10:8196:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:2:0] Marker# BSVSP01 2025-06-30T09:19:09.198590Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [f4a8bb36be85a0b6] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:5:10:8196:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-30T09:19:09.198647Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:5:10:8196:1000:0] PatchedBlobId# [1:1:6:10:12292:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-30T09:19:09.211328Z 5 00h00m30.010000s :BS_PROXY_GET ERROR: [5036a033a7693026] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:6:10:12292:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:1:0]"} Marker# BPG29 2025-06-30T09:19:09.211423Z 5 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:6:10:12292:1000:0] PatchedBlobId# [1:1:7:10:61444:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:1:0] Marker# BSVSP01 2025-06-30T09:19:09.212506Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [99d088bc7d69ea8e] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:7:10:61444:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:09.212539Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:7:10:61444:1000:0] PatchedBlobId# [1:1:8:10:16388:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:09.213309Z 6 00h00m30.010000s :BS_PROXY_GET ERROR: [72a24c6e613bf493] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:8:10:16388:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:2:0]"} Marker# BPG29 2025-06-30T09:19:09.213337Z 6 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:8:10:16388:1000:0] PatchedBlobId# [1:1:9:10:20484:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:2:0] Marker# BSVSP01 2025-06-30T09:19:09.218214Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [1874a5e978aa72a1] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:9:10:20484:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:1:0]"} Marker# BPG29 2025-06-30T09:19:09.218270Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:9:10:20484:1000:0] PatchedBlobId# [1:1:10:10:20484:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:1:0] Marker# BSVSP01 2025-06-30T09:19:09.223778Z 3 00h00m30.010000s :BS_PROXY_GET ERROR: [57b7a417040f4bcd] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:10:10:20484:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:2:0]"} Marker# BPG29 2025-06-30T09:19:09.223834Z 3 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:20484:1000:0] PatchedBlobId# [1:1:11:10:4:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:2:0] Marker# BSVSP01 2025-06-30T09:19:11.782943Z 6 00h00m30.010000s :BS_PROXY_GET ERROR: [ec8eaa8357c5c396] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:5:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:2:0]"} Marker# BPG29 2025-06-30T09:19:11.782998Z 6 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:5:1000:0] PatchedBlobId# [1:1:2:10:4101:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:2:0] Marker# BSVSP01 2025-06-30T09:19:11.790404Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [96bc5d8b5fcc8228] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:2:10:4101:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:1:0]"} Marker# BPG29 2025-06-30T09:19:11.790464Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:2:10:4101:1000:0] PatchedBlobId# [1:1:3:10:102405:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:1:0] Marker# BSVSP01 2025-06-30T09:19:11.795763Z 3 00h00m30.010000s :BS_PROXY_GET ERROR: [517295c8c2aeb5eb] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:3:10:102405:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:2:0]"} Marker# BPG29 2025-06-30T09:19:11.795818Z 3 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:3:10:102405:1000:0] PatchedBlobId# [1:1:4:10:8197:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:2:0] Marker# BSVSP01 2025-06-30T09:19:11.800996Z 9 00h00m30.010000s :BS_PROXY_GET ERROR: [3ceca7e1909d3ac5] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:4:10:8197:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:2:0]"} Marker# BPG29 2025-06-30T09:19:11.801052Z 9 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:4:10:8197:1000:0] PatchedBlobId# [1:1:5:10:12293:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:2:0] Marker# BSVSP01 2025-06-30T09:19:11.809213Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [25c390dd7b3bf82b] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:5:10:12293:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:0:0]"} Marker# BPG29 2025-06-30T09:19:11.809268Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:5:10:12293:1000:0] PatchedBlobId# [1:1:6:10:12293:1000:0] ... 000000:_:2:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:101:10:77902:1000:0] PatchedBlobId# [1:1:102:10:8270:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:2:0] Marker# BSVSP01 2025-06-30T09:19:28.202044Z 2 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:99:10:4155:1000:0] PatchedBlobId# [1:1:100:10:4155:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-30T09:19:28.202092Z 2 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:102:10:131121:1000:0] PatchedBlobId# [1:1:103:10:12337:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-30T09:19:28.203433Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [9814e0c872925e55] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:98:10:121:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:28.204143Z 5 00h00m30.010000s :BS_PROXY_GET ERROR: [7b3f592951135103] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:103:10:12336:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:1:0]"} Marker# BPG29 2025-06-30T09:19:28.204246Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:98:10:121:1000:0] PatchedBlobId# [1:1:99:10:121:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:28.204990Z 5 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:103:10:12336:1000:0] PatchedBlobId# [1:1:104:10:12336:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:1:0] Marker# BSVSP01 2025-06-30T09:19:28.205326Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [414255a8e82acf0f] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:98:10:83:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-30T09:19:28.205552Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [dc79b5006160b968] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:102:10:8280:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-30T09:19:28.206119Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:98:10:83:1000:0] PatchedBlobId# [1:1:99:10:147539:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-30T09:19:28.206249Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:102:10:8280:1000:0] PatchedBlobId# [1:1:103:10:12376:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-30T09:19:28.207728Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [7075def14ae052ae] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:99:10:73855:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:28.208427Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:99:10:73855:1000:0] PatchedBlobId# [1:1:100:10:4223:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:28.208740Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [36647d6d25ecedc3] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:102:10:32888:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:0:0]"} Marker# BPG29 2025-06-30T09:19:28.208788Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [3922e64ce00b4720] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:102:10:131188:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:0:0]"} Marker# BPG29 2025-06-30T09:19:28.209395Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:102:10:32888:1000:0] PatchedBlobId# [1:1:103:10:12408:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:0:0] Marker# BSVSP01 2025-06-30T09:19:28.209427Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:102:10:131188:1000:0] PatchedBlobId# [1:1:103:10:12404:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:0:0] Marker# BSVSP01 2025-06-30T09:19:28.209860Z 2 00h00m30.010000s :BS_PROXY_GET ERROR: [6dcd1071fc3e154c] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:100:10:102495:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:1:0]"} Marker# BPG29 2025-06-30T09:19:28.209952Z 2 00h00m30.010000s :BS_PROXY_GET ERROR: [3eacfb668539418c] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:100:10:102455:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:1:0]"} Marker# BPG29 2025-06-30T09:19:28.210457Z 2 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:100:10:102495:1000:0] PatchedBlobId# [1:1:101:10:8287:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-30T09:19:28.210528Z 2 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:100:10:102455:1000:0] PatchedBlobId# [1:1:101:10:8247:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-30T09:19:28.213032Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [6d4bda39c4e20ab7] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:100:10:102465:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-30T09:19:28.213150Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [360d24d40e711594] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:100:10:102485:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-30T09:19:28.213407Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:100:10:102465:1000:0] PatchedBlobId# [1:1:101:10:8257:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-30T09:19:28.213475Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:100:10:102485:1000:0] PatchedBlobId# [1:1:101:10:8277:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-30T09:19:28.215466Z 2 00h00m30.010000s :BS_PROXY_GET ERROR: [28bc72936d203c1e] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:99:10:31:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:1:0]"} Marker# BPG29 2025-06-30T09:19:28.215842Z 2 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:99:10:31:1000:0] PatchedBlobId# [1:1:100:10:4127:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-30T09:19:28.217090Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [adae8647653712ed] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:98:10:74:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-30T09:19:28.217140Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [325997016b0577d9] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:101:10:8267:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-30T09:19:28.217285Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:98:10:74:1000:0] PatchedBlobId# [1:1:99:10:24650:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-30T09:19:28.217316Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:101:10:8267:1000:0] PatchedBlobId# [1:1:102:10:12363:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-30T09:19:28.220005Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [cbc473f579cfa448] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:101:10:8258:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-30T09:19:28.220060Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [c04829dc31c150b0] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:98:10:112:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-30T09:19:28.220217Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:101:10:8258:1000:0] PatchedBlobId# [1:1:102:10:106562:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-30T09:19:28.220244Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:98:10:112:1000:0] PatchedBlobId# [1:1:99:10:4208:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 >> TxUsage::Sinks_Oltp_WriteToTopic_3_Query >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder-EvWrite >> TxUsage::Sinks_Oltp_WriteToTopics_4_Table >> Viewer::JsonStorageListingV1PDiskIdFilter [GOOD] >> PersQueueSdkReadSessionTest::SettingsValidation [GOOD] >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly >> JsonProtoConversion::JsonToProtoArray [GOOD] >> JsonProtoConversion::ProtoMapToJson_ReceiveMessageResult [GOOD] >> JsonProtoConversion::JsonToProtoMap [GOOD] >> DataShardReadIterator::TryWriteManyRows-Commit [GOOD] >> DataShardReadIteratorBatchMode::RangeFull >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc-UseSink [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips-EvWrite ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::JsonStorageListingV1PDiskIdFilter [GOOD] Test command err: 2025-06-30T09:19:07.829201Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:121:2167], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:07.829278Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:07.829404Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:07.939180Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 20508, node 1 TClient is connected to server localhost:17755 json result: {"Success":true,"Result":{"Total":5,"Entities":[{"Name":"/Root/test","Type":"ext_sub_domain"},{"Name":"/Root/slice","Type":"ext_sub_domain"},{"Name":"/Root/qwerty","Type":"ext_sub_domain"},{"Name":"/Root/MyDatabase","Type":"ext_sub_domain"},{"Name":"/Root/TestDatabase","Type":"ext_sub_domain"}]},"Version":2} 2025-06-30T09:19:18.354375Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:2845:2400], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:18.354918Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:18.354974Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:18.355041Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:1484:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:18.355087Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:1487:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:18.355291Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:18.355314Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:18.355411Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:2839:2343], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:18.355491Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:18.355506Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:18.355701Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:18.355879Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:2848:2343], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:18.355919Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:2852:2343], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:18.356103Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:18.356522Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:18.356749Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:18.356767Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:18.356782Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:18.357010Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:18.357044Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:2855:2343], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:18.357060Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:18.358133Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:2801:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:18.358413Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:18.358459Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:2804:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:18.358569Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:18.358771Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:18.358825Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:18.516329Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:18.677186Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:19:18.682455Z node 2 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-30T09:19:18.909933Z node 2 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 25152, node 2 TClient is connected to server localhost:25327 2025-06-30T09:19:18.980214Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:18.980235Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:18.980240Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:18.980457Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:30.259573Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:2841:2400], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:30.259999Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:30.260167Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:30.260220Z node 19 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [19:1287:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:30.260477Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:30.260612Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:30.260734Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [13:2844:2343], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:30.260959Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:30.261154Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:30.261436Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError ... le .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:47.549579Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:47.549692Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:47.549828Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:47.549869Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:47.549884Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:47.550074Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:47.550113Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:47.550283Z node 32 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [32:2806:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:47.550528Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:47.550550Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:47.651794Z node 29 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:47.748135Z node 29 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:19:47.751613Z node 29 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-30T09:19:47.790549Z node 29 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 21834, node 29 TClient is connected to server localhost:61316 2025-06-30T09:19:47.831355Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:47.831379Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:47.831385Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:47.831539Z node 29 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:57.722406Z node 38 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [38:1323:2238], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:57.722814Z node 38 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:57.722927Z node 38 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:57.723421Z node 40 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [40:2429:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:57.723489Z node 41 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [41:2463:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:57.723623Z node 43 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [43:2469:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:57.723988Z node 46 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [46:2478:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:57.724179Z node 40 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:57.724218Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:57.724309Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:57.724323Z node 43 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:57.724356Z node 46 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:57.724563Z node 40 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:57.724602Z node 43 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:57.724634Z node 44 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [44:2472:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:57.724672Z node 46 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:57.724814Z node 44 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:57.725047Z node 39 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:57.725084Z node 39 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [39:2845:2342], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:57.725099Z node 39 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:57.725126Z node 44 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:57.725423Z node 45 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [45:2475:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:57.725850Z node 45 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:57.725988Z node 45 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:57.726252Z node 42 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [42:2466:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:57.726520Z node 42 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:57.726620Z node 42 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-30T09:19:57.847697Z node 38 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:58.008236Z node 38 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:19:58.032551Z node 38 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-30T09:19:58.143722Z node 38 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 11000, node 38 TClient is connected to server localhost:64227 2025-06-30T09:19:58.235196Z node 38 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:58.235222Z node 38 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:58.235227Z node 38 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:58.235439Z node 38 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::ProtoMapToJson_ReceiveMessageResult [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleBool [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalString [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalEmpty [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalOptionalEmpty [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalOptionalEmpty2 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::List [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Dict [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoArray [GOOD] |80.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoMap [GOOD] |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |80.3%| [LD] {RESULT} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut >> JsonProtoConversion::JsonToProtoSingleValue [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk100 [GOOD] |80.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLValueToYdbValueTest::Dict [GOOD] |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ext_index/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc-UseSink [GOOD] Test command err: 2025-06-30T09:19:34.938205Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:34.938315Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:34.938335Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042ec/r3tmp/tmpJBpKP9/pdisk_1.dat 2025-06-30T09:19:35.052105Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:19:35.053021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:35.071035Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:35.072271Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275174381439 != 1751275174381443 2025-06-30T09:19:35.114596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:35.114642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:35.125271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:35.199208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:35.218374Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:19:35.218670Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:19:35.218807Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:19:35.218895Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:19:35.230889Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:19:35.231151Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:19:35.231185Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:19:35.231399Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:19:35.231410Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:19:35.231419Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:19:35.231498Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:19:35.231521Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:19:35.231536Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:19:35.241894Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:19:35.248005Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:19:35.248095Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:19:35.248123Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:19:35.248130Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:19:35.248136Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:19:35.248144Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:35.248214Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:35.248223Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:35.248321Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:19:35.248348Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:19:35.248362Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:19:35.248374Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:35.248383Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:19:35.248390Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:19:35.248395Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:19:35.248401Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:19:35.248407Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:19:35.248430Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:35.248436Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:35.248444Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:19:35.248544Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:19:35.248551Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:19:35.248576Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:19:35.248636Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:19:35.248648Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:19:35.248668Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:19:35.248679Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:19:35.248684Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:19:35.248692Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:19:35.248697Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:35.248751Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:19:35.248756Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:19:35.248761Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:19:35.248766Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:35.248778Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:19:35.248783Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:19:35.248791Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:19:35.248795Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:19:35.248801Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:19:35.249129Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:19:35.249142Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:19:35.259399Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:19:35.259425Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:35.259432Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:35.259443Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... Columns: 4 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW } 2025-06-30T09:20:01.540877Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2025-06-30T09:20:01.540886Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[16:556:2482], 3} after executionsCount# 2 2025-06-30T09:20:01.540892Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[16:556:2482], 3} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:01.540909Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[16:556:2482], 3} finished in read 2025-06-30T09:20:01.540918Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-30T09:20:01.540924Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:20:01.540928Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:20:01.540933Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:20:01.540941Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-30T09:20:01.540946Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:20:01.540950Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 72075186224037888 has finished 2025-06-30T09:20:01.540954Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:20:01.540959Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-30T09:20:01.540965Z node 16 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:20:01.540973Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:20:01.541141Z node 16 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=16&id=ZjAzNjkyYmEtNzVkY2M2MS04YjBlMDAwYS01M2VjMTdmNg==, workerId: [16:1103:2871], local sessions count: 0 2025-06-30T09:20:01.541324Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [16:556:2482], Recipient [16:629:2533]: NKikimrTxDataShard.TEvRead ReadId: 4 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-30T09:20:01.541357Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-30T09:20:01.541372Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit CheckRead 2025-06-30T09:20:01.541386Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-30T09:20:01.541392Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:20:01.541397Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:9] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:20:01.541402Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:20:01.541417Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:9] at 72075186224037888 2025-06-30T09:20:01.541423Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-30T09:20:01.541428Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:20:01.541433Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:9] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:20:01.541437Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:20:01.541453Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 4 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW } 2025-06-30T09:20:01.541488Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2025-06-30T09:20:01.541496Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[16:556:2482], 4} after executionsCount# 1 2025-06-30T09:20:01.541503Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[16:556:2482], 4} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:01.541521Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[16:556:2482], 4} finished in read 2025-06-30T09:20:01.541529Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-30T09:20:01.541534Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:20:01.541542Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:9] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:20:01.541547Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:20:01.541555Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-30T09:20:01.541560Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:20:01.541564Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:9] at 72075186224037888 has finished 2025-06-30T09:20:01.541569Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-30T09:20:01.541665Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [16:556:2482], Recipient [16:629:2533]: NKikimrTxDataShard.TEvRead ReadId: 5 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-30T09:20:01.541685Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-30T09:20:01.541693Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit CheckRead 2025-06-30T09:20:01.541703Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-30T09:20:01.541707Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:20:01.541712Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:10] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:20:01.541717Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:20:01.541724Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:10] at 72075186224037888 2025-06-30T09:20:01.541729Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-30T09:20:01.541734Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:20:01.541738Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:10] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:20:01.541743Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:20:01.541757Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 5 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW } 2025-06-30T09:20:01.541784Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3000/18446744073709551615 2025-06-30T09:20:01.541790Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[16:556:2482], 5} after executionsCount# 1 2025-06-30T09:20:01.541796Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[16:556:2482], 5} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:01.541816Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[16:556:2482], 5} finished in read 2025-06-30T09:20:01.541824Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-30T09:20:01.541829Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:20:01.541834Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:10] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:20:01.541839Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:20:01.541846Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-30T09:20:01.541851Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:20:01.541855Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:10] at 72075186224037888 has finished 2025-06-30T09:20:01.541860Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ext_index/ut/unittest >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity [GOOD] >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict-UseSink [GOOD] |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoSingleValue [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity [GOOD] Test command err: Trying to start YDB, gRPC: 16559, MsgBus: 6892 2025-06-30T09:19:48.275042Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669659674615945:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:48.275067Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002bb0/r3tmp/tmpWThAMG/pdisk_1.dat 2025-06-30T09:19:48.330301Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:48.330416Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669659674615924:2080] 1751275188274873 != 1751275188274876 TServer::EnableGrpc on GrpcPort 16559, node 1 2025-06-30T09:19:48.339984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:19:48.339996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:19:48.339998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:19:48.340039Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6892 TClient is connected to server localhost:6892 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:19:48.407043Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:48.407077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:48.408263Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:48.409089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:48.417651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:48.444694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:48.468417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:48.482039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:19:48.638455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669659674617521:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:48.638497Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:48.680040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.688223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.700535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.713853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.727970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.742365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.755843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:48.771832Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669659674618175:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:48.771868Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:48.771878Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669659674618180:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:48.772799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:48.775588Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669659674618182:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:19:48.860589Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669659674618233:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:49.055114Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521669663969585798:3572], Recipient [1:7521669659674616242:2144]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:49.055130Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:49.055133Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:19:49.055142Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521669663969585794:3569], Recipient [1:7521669659674616242:2144]: {TEvModifySchemeTransaction txid# 281474976715672 TabletId# 72057594046644480} 2025-06-30T09:19:49.055144Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:19:49.064795Z node 1 :FL ... 7594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: user, IndexColumn: emb, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [3:7521669705194912787:2492], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710767, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710768, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 111 UploadBytes: 2424 ReadRows: 90 ReadBytes: 2310, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0}, txId# 281474976710768 2025-06-30T09:19:58.645248Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:19:58.645316Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:58.645351Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Unlocking 2025-06-30T09:19:58.645385Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Unlocking TBuildInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: user, IndexColumn: emb, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [3:7521669705194912787:2492], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710767, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710768, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 111 UploadBytes: 2424 ReadRows: 90 ReadBytes: 2310, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:19:58.645392Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:19:58.645398Z node 3 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-30T09:19:58.645468Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:58.645483Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Done 2025-06-30T09:19:58.645501Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Done TBuildInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: user, IndexColumn: emb, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [3:7521669705194912787:2492], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710767, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710768, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 111 UploadBytes: 2424 ReadRows: 90 ReadBytes: 2310, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:19:58.645508Z node 3 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:336: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 281474976715674, subscribers count# 1 2025-06-30T09:19:58.645511Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:19:58.645518Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:58.645527Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7521669705194912787:2492] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715674 at schemeshard: 72057594046644480 2025-06-30T09:19:58.645600Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 274792450, Sender [3:7521669705194912787:2492], Recipient [3:7521669700899943048:2143]: NKikimrIndexBuilder.TEvGetRequest DatabaseName: "/Root" IndexBuildId: 281474976715674 2025-06-30T09:19:58.645609Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5095: StateWork, processing event TEvIndexBuilder::TEvGetRequest 2025-06-30T09:19:58.645617Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/Root" IndexBuildId: 281474976715674 2025-06-30T09:19:58.645711Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:103: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 281474976715674 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "index" index_columns: "user" index_columns: "emb" global_vector_kmeans_tree_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1751275198 } EndTime { seconds: 1751275198 } UserSID: "" } 2025-06-30T09:19:58.645720Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:19:58.645740Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:19:58.645767Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7521669705194912787:2492] msg type: 274792451 msg: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 281474976715674 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "index" index_columns: "user" index_columns: "emb" global_vector_kmeans_tree_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1751275198 } EndTime { seconds: 1751275198 } UserSID: "" } at schemeshard: 72057594046644480 2025-06-30T09:19:58.645840Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [3:7521669705194912790:3692], Recipient [3:7521669700899943048:2143]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:58.645847Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:19:58.645849Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:19:58.650779Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [3:7521669705194913645:4351], Recipient [3:7521669700899943048:2143]: NKikimrSchemeOp.TDescribePath Path: "/Root/TestTable" Options { ShowPrivateTable: false } 2025-06-30T09:19:58.650800Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:19:58.676998Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:58.691879Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7521669700899943048:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:58.691903Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:58.691913Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [3:7521669700899943048:2143], Recipient [3:7521669700899943048:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:19:58.691915Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:19:59.692400Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7521669700899943048:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:59.692433Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:19:59.692446Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [3:7521669700899943048:2143], Recipient [3:7521669700899943048:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:19:59.692450Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:20:00.692828Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7521669700899943048:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:20:00.692857Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:20:00.692872Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [3:7521669700899943048:2143], Recipient [3:7521669700899943048:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:20:00.692876Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:20:01.693240Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7521669700899943048:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:20:01.693273Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:20:01.693287Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [3:7521669700899943048:2143], Recipient [3:7521669700899943048:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:20:01.693291Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TBoardSubscriberTest::SimpleSubscriber ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReadRangeChunk100 [GOOD] Test command err: 2025-06-30T09:19:36.721008Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:36.721080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:36.721093Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042e5/r3tmp/tmpbpCy6X/pdisk_1.dat 2025-06-30T09:19:36.814976Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:19:36.815801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:36.832480Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:36.833399Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275176341331 != 1751275176341335 2025-06-30T09:19:36.875403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:36.875443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:36.886147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:36.960096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:36.977317Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:19:36.977517Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:19:36.977602Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:19:36.977664Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:19:36.985388Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:19:36.985570Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:19:36.985590Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:19:36.985745Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:19:36.985754Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:19:36.985760Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:19:36.985812Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:19:36.985831Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:19:36.985842Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:19:36.996117Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:19:37.001105Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:19:37.001211Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:19:37.001241Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:19:37.001246Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:19:37.001250Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:19:37.001255Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:37.001337Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:37.001348Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:37.001461Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:19:37.001488Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:19:37.001501Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:19:37.001513Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:37.001523Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:19:37.001530Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:19:37.001534Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:19:37.001541Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:19:37.001547Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:19:37.001572Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:37.001578Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:37.001586Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:19:37.001695Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:19:37.001701Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:19:37.001726Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:19:37.001775Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:19:37.001786Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:19:37.001802Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:19:37.001810Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:19:37.001813Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:19:37.001818Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:19:37.001821Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:37.001868Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:19:37.001873Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:19:37.001878Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:19:37.001882Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:37.001911Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:19:37.001915Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:19:37.001922Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:19:37.001926Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:19:37.001933Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:19:37.002263Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:19:37.002278Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:19:37.012634Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:19:37.012678Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:37.012687Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:37.012702Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542804, quota bytes left# 18446744073708987711, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.296920Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.296926Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.296932Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.296987Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542705, quota bytes left# 18446744073708981375, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.297009Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.297015Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.297021Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.297076Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542606, quota bytes left# 18446744073708975039, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.297093Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.297101Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.297106Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.297159Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542507, quota bytes left# 18446744073708968703, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.297186Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.297192Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.297197Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.297251Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542408, quota bytes left# 18446744073708962367, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.297281Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.297287Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.297293Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.297347Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542309, quota bytes left# 18446744073708956031, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.297362Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.297369Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.297374Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.297430Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542210, quota bytes left# 18446744073708949695, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.297453Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.297459Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.297465Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.297519Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542111, quota bytes left# 18446744073708943359, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.297544Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.297550Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.297556Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.297638Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542012, quota bytes left# 18446744073708937023, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.297669Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.297676Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.297681Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.297737Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709541913, quota bytes left# 18446744073708930687, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.297758Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.297764Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.297769Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.297824Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709541814, quota bytes left# 18446744073708924351, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.297847Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.297853Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.297858Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.297928Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709541715, quota bytes left# 18446744073708918015, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.297950Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.297957Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.297962Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.298019Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709541616, quota bytes left# 18446744073708911679, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.298048Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:940:2740], Recipient [15:940:2740]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.298056Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 0 2025-06-30T09:20:02.298062Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 0 2025-06-30T09:20:02.298077Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:556:2482], 1} sends rowCount# 1, bytes# 64, quota rows left# 18446744073709541615, quota bytes left# 18446744073708911615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:02.298086Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:3103: 72075186224037890 read iterator# {[15:556:2482], 1} finished in ReadContinue |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ext_index/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict-UseSink [GOOD] Test command err: 2025-06-30T09:19:34.403913Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:34.404025Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:34.404040Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042f8/r3tmp/tmpkEwPyP/pdisk_1.dat 2025-06-30T09:19:34.501329Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:19:34.502328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:34.519067Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:34.520089Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275173973007 != 1751275173973011 2025-06-30T09:19:34.562152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:34.562200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:34.572739Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:34.646358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:34.664918Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:19:34.665266Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:19:34.665421Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:19:34.665510Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:19:34.674788Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:19:34.675052Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:19:34.675084Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:19:34.675267Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:19:34.675280Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:19:34.675289Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:19:34.675359Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:19:34.675385Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:19:34.675401Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:19:34.685716Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:19:34.690181Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:19:34.690275Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:19:34.690303Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:19:34.690309Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:19:34.690313Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:19:34.690319Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:34.690385Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:34.690393Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:34.690488Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:19:34.690512Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:19:34.690521Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:19:34.690532Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:34.690541Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:19:34.690549Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:19:34.690554Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:19:34.690560Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:19:34.690567Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:19:34.690589Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:34.690593Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:34.690599Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:19:34.690687Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:19:34.690692Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:19:34.690713Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:19:34.690800Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:19:34.690814Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:19:34.690834Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:19:34.690844Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:19:34.690849Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:19:34.690856Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:19:34.690862Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:34.690906Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:19:34.690910Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:19:34.690913Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:19:34.690916Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:34.690925Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:19:34.690930Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:19:34.690937Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:19:34.690941Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:19:34.690947Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:19:34.691237Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:19:34.691246Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:19:34.701523Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:19:34.701549Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:34.701557Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:34.701568Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-30T09:20:02.323632Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v3001/18446744073709551615 2025-06-30T09:20:02.323644Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CheckRead 2025-06-30T09:20:02.323673Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-30T09:20:02.323680Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:20:02.323688Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:20:02.323693Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:20:02.323711Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037888 2025-06-30T09:20:02.323718Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-30T09:20:02.323723Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:20:02.323728Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:20:02.323733Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:20:02.323750Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 ResultFormat: FORMAT_ARROW MaxRowsInResult: 2 } 2025-06-30T09:20:02.323848Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Continue 2025-06-30T09:20:02.323857Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Continue at tablet# 72075186224037888 2025-06-30T09:20:02.323869Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-30T09:20:02.344865Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [15:1010:2803], Recipient [15:629:2533]: {TEvReadSet step# 3001 txid# 281474976715667 TabletSource# 72075186224037891 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037891 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-30T09:20:02.344904Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:20:02.344914Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037888 source 72075186224037891 dest 72075186224037888 producer 72075186224037891 txId 281474976715667 2025-06-30T09:20:02.344950Z node 15 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037888 got read set: {TEvReadSet step# 3001 txid# 281474976715667 TabletSource# 72075186224037891 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037891 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-30T09:20:02.345035Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:705: Complete [3001 : 281474976715667] from 72075186224037888 at tablet 72075186224037888 send result to client [15:1098:2852], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:20:02.345054Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:20:02.345069Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:4] at 72075186224037888 for ExecuteRead 2025-06-30T09:20:02.345166Z node 15 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1365: ActorId: [15:1098:2852] TxId: 281474976715667. Ctx: { TraceId: 01jz025sps3atj01mqbnk69h3q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=NTE4ZjE3YTQtZTI3ZGI1ZjYtMjJlM2FlYzgtNWEzOWU4OGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got propose result, shard: 72075186224037888, status: COMPLETE, error: 2025-06-30T09:20:02.345243Z node 15 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [15:1098:2852] TxId: 281474976715667. Ctx: { TraceId: 01jz025sps3atj01mqbnk69h3q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=NTE4ZjE3YTQtZTI3ZGI1ZjYtMjJlM2FlYzgtNWEzOWU4OGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-30T09:20:02.345261Z node 15 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [15:1098:2852] TxId: 281474976715667. Ctx: { TraceId: 01jz025sps3atj01mqbnk69h3q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=NTE4ZjE3YTQtZTI3ZGI1ZjYtMjJlM2FlYzgtNWEzOWU4OGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-30T09:20:02.345290Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [15:629:2533], Recipient [15:629:2533]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:20:02.345298Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:20:02.345641Z node 15 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 5, sender: [15:556:2482], selfId: [15:60:2107], source: [15:1076:2852] 2025-06-30T09:20:02.345692Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:20:02.345794Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:20:02.345804Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:20:02.345810Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:282: Return cached ready operation [0:4] at 72075186224037888 2025-06-30T09:20:02.345817Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:20:02.345893Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 2, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 ResultFormat: FORMAT_ARROW MaxRowsInResult: 2 } 2025-06-30T09:20:02.346018Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2025-06-30T09:20:02.346028Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[15:556:2482], 1} after executionsCount# 2 2025-06-30T09:20:02.346038Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[15:556:2482], 1} sends rowCount# 2, bytes# 32, quota rows left# 18446744073709551613, quota bytes left# 18446744073709551583, hasUnreadQueries# 1, total queries# 6, firstUnprocessed# 0 2025-06-30T09:20:02.346084Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-30T09:20:02.346091Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:20:02.346098Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:20:02.346103Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:20:02.346118Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-30T09:20:02.346122Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:20:02.346127Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037888 has finished 2025-06-30T09:20:02.346133Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:20:02.346139Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-30T09:20:02.346145Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:20:02.346150Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:20:02.346211Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:629:2533], Recipient [15:629:2533]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.346226Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037888 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 2 2025-06-30T09:20:02.346241Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037888 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 2 2025-06-30T09:20:02.346256Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037888 readContinue iterator# {[15:556:2482], 1} sends rowCount# 2, bytes# 32, quota rows left# 18446744073709551611, quota bytes left# 18446744073709551551, hasUnreadQueries# 1, total queries# 6, firstUnprocessed# 2 2025-06-30T09:20:02.346496Z node 15 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=15&id=NTE4ZjE3YTQtZTI3ZGI1ZjYtMjJlM2FlYzgtNWEzOWU4OGQ=, workerId: [15:1076:2852], local sessions count: 0 2025-06-30T09:20:02.346526Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:629:2533], Recipient [15:629:2533]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-30T09:20:02.346538Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037888 ReadContinue for iterator# {[15:556:2482], 1}, firstUnprocessedQuery# 4 2025-06-30T09:20:02.346554Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037888 ReadContinue: iterator# {[15:556:2482], 1}, FirstUnprocessedQuery# 4 2025-06-30T09:20:02.346571Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037888 readContinue iterator# {[15:556:2482], 1} sends rowCount# 2, bytes# 32, quota rows left# 18446744073709551609, quota bytes left# 18446744073709551519, hasUnreadQueries# 0, total queries# 6, firstUnprocessed# 4 2025-06-30T09:20:02.346587Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:3103: 72075186224037888 read iterator# {[15:556:2482], 1} finished in ReadContinue 2025-06-30T09:20:02.346630Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [15:64:2111], Recipient [15:1010:2803]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715666 LockNode: 15 Status: STATUS_NOT_FOUND |80.3%| [TA] $(B)/ydb/core/http_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardReadIteratorBatchMode::RangeFull [GOOD] >> DataShardReadIteratorBatchMode::RangeFromInclusive >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder-EvWrite [GOOD] |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_board_subscriber/unittest |80.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_board_subscriber/unittest >> KqpJoin::IndexLoookupJoinStructJoin-StreamLookupJoin >> KqpJoinOrder::CanonizedJoinOrderTPCH17 >> KqpIndexLookupJoin::LeftJoinSkipNullFilter+StreamLookup >> KqpJoinOrder::DatetimeConstantFold-ColumnStore |80.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |80.3%| [TA] {RESULT} $(B)/ydb/core/http_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |80.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection >> TBoardSubscriberTest::SimpleSubscriber [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin4-RemoveLimitOperator >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips-EvWrite [GOOD] >> BSCRestartPDisk::RestartOneByOneWithReconnects [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder-EvWrite [GOOD] Test command err: 2025-06-30T09:19:34.493722Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:34.493818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:34.493835Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042f2/r3tmp/tmpZxhtI8/pdisk_1.dat 2025-06-30T09:19:34.597865Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:19:34.598797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:34.617541Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:34.618945Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275174006948 != 1751275174006952 2025-06-30T09:19:34.661397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:34.661442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:34.672109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:34.745712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:34.763306Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:19:34.763604Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:19:34.763717Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:19:34.763793Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:19:34.772481Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:19:34.772697Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:19:34.772721Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:19:34.772855Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:19:34.772862Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:19:34.772868Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:19:34.772922Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:19:34.772936Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:19:34.772946Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:19:34.783280Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:19:34.786802Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:19:34.786924Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:19:34.786966Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:19:34.786973Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:19:34.786979Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:19:34.786987Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:34.787086Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:34.787096Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:34.787237Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:19:34.787269Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:19:34.787286Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:19:34.787296Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:34.787306Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:19:34.787314Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:19:34.787322Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:19:34.787329Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:19:34.787334Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:19:34.787361Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:34.787369Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:34.787377Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:19:34.787509Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:19:34.787517Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:19:34.787545Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:19:34.787607Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:19:34.787621Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:19:34.787642Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:19:34.787652Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:19:34.787657Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:19:34.787665Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:19:34.787671Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:34.787729Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:19:34.787735Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:19:34.787739Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:19:34.787744Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:34.787755Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:19:34.787760Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:19:34.787765Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:19:34.787769Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:19:34.787777Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:19:34.788070Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:19:34.788084Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:19:34.798494Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:19:34.798538Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:34.798548Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:34.798565Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... hard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3500:281474976715666 keys extracted: 0 2025-06-30T09:20:03.197944Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-30T09:20:03.197946Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit LoadTxDetails 2025-06-30T09:20:03.197949Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-30T09:20:03.197952Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-30T09:20:03.197956Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:281474976715666] is the new logically complete end at 72075186224037889 2025-06-30T09:20:03.197958Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:281474976715666] is the new logically incomplete end at 72075186224037889 2025-06-30T09:20:03.197961Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:281474976715666] at 72075186224037889 2025-06-30T09:20:03.197964Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-30T09:20:03.197966Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-30T09:20:03.197969Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CreateVolatileSnapshot 2025-06-30T09:20:03.197972Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CreateVolatileSnapshot 2025-06-30T09:20:03.197983Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is ExecutedNoMoreRestarts 2025-06-30T09:20:03.197985Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CreateVolatileSnapshot 2025-06-30T09:20:03.197988Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit DropVolatileSnapshot 2025-06-30T09:20:03.197991Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit DropVolatileSnapshot 2025-06-30T09:20:03.197993Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-30T09:20:03.197996Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit DropVolatileSnapshot 2025-06-30T09:20:03.197998Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CompleteOperation 2025-06-30T09:20:03.198001Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-30T09:20:03.198021Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is DelayComplete 2025-06-30T09:20:03.198024Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CompleteOperation 2025-06-30T09:20:03.198026Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CompletedOperations 2025-06-30T09:20:03.198029Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CompletedOperations 2025-06-30T09:20:03.198033Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-30T09:20:03.198036Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CompletedOperations 2025-06-30T09:20:03.198039Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715666] at 72075186224037889 has finished 2025-06-30T09:20:03.198041Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:20:03.198044Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-30T09:20:03.198046Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-30T09:20:03.198048Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-30T09:20:03.208436Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-30T09:20:03.208472Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:20:03.208485Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037888 on unit CompleteOperation 2025-06-30T09:20:03.208510Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037888 at tablet 72075186224037888 send result to client [15:1033:2810], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:20:03.208523Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:20:03.208555Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-06-30T09:20:03.208564Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:20:03.208570Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-30T09:20:03.208579Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037889 at tablet 72075186224037889 send result to client [15:1033:2810], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:20:03.208585Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:20:03.209017Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [15:556:2482], Recipient [15:629:2533]: NKikimrTxDataShard.TEvRead ReadId: 3 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-30T09:20:03.209048Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-30T09:20:03.209064Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-30T09:20:03.209087Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:20:03.209094Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:20:03.209101Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:20:03.209110Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:20:03.209120Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-30T09:20:03.209126Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:20:03.209131Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:20:03.209136Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:20:03.209141Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:20:03.209160Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 3 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW } 2025-06-30T09:20:03.209255Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 1011121314, counter# 18446744073709551615 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-30T09:20:03.209264Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/281474976715666 2025-06-30T09:20:03.209273Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[15:556:2482], 3} after executionsCount# 1 2025-06-30T09:20:03.209284Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[15:556:2482], 3} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:03.209323Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[15:556:2482], 3} finished in read 2025-06-30T09:20:03.209337Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:20:03.209341Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:20:03.209346Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:20:03.209351Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:20:03.209365Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:20:03.209370Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:20:03.209375Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-30T09:20:03.209381Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-30T09:20:03.209401Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 |80.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::SimpleSubscriber [GOOD] >> KqpJoinOrder::TPCDS90-ColumnStore >> KqpJoinOrder::SortingsWithLookupJoin1+RemoveLimitOperator >> BSCRestartPDisk::RestartOneByOne [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartOneByOneWithReconnects [GOOD] Test command err: RandomSeed# 14842423734469417476 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips-EvWrite [GOOD] Test command err: 2025-06-30T09:19:33.604047Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:33.604112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:33.604123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00431e/r3tmp/tmpmFrmLh/pdisk_1.dat 2025-06-30T09:19:33.697600Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:19:33.698523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:33.715428Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:33.716532Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275173121952 != 1751275173121956 2025-06-30T09:19:33.758675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:33.758729Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:33.769333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:33.843347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:33.860877Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:19:33.861175Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:19:33.861286Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:19:33.861355Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:19:33.871733Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:19:33.871984Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:19:33.872012Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:19:33.872202Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:19:33.872212Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:19:33.872220Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:19:33.872287Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:19:33.872307Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:19:33.872320Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:19:33.882670Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:19:33.887819Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:19:33.887917Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:19:33.887949Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:19:33.887956Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:19:33.887961Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:19:33.887968Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:33.888055Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:33.888063Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:33.888175Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:19:33.888201Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:19:33.888213Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:19:33.888224Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:33.888232Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:19:33.888239Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:19:33.888244Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:19:33.888250Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:19:33.888255Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:19:33.888278Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:33.888284Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:33.888291Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:19:33.888392Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:19:33.888398Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:19:33.888419Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:19:33.888471Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:19:33.888484Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:19:33.888502Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:19:33.888511Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:19:33.888516Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:19:33.888522Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:19:33.888527Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:33.888579Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:19:33.888584Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:19:33.888589Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:19:33.888594Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:33.888606Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:19:33.888610Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:19:33.888618Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:19:33.888623Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:19:33.888628Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:19:33.888920Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:19:33.888932Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:19:33.899264Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:19:33.899300Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:33.899309Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:33.899321Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3500:281474976715666 keys extracted: 0 2025-06-30T09:20:03.695955Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-30T09:20:03.695959Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit LoadTxDetails 2025-06-30T09:20:03.695964Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-30T09:20:03.695969Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-30T09:20:03.695976Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:281474976715666] is the new logically complete end at 72075186224037889 2025-06-30T09:20:03.695980Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:281474976715666] is the new logically incomplete end at 72075186224037889 2025-06-30T09:20:03.695985Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:281474976715666] at 72075186224037889 2025-06-30T09:20:03.695991Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-30T09:20:03.695995Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-30T09:20:03.695999Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CreateVolatileSnapshot 2025-06-30T09:20:03.696004Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CreateVolatileSnapshot 2025-06-30T09:20:03.696022Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is ExecutedNoMoreRestarts 2025-06-30T09:20:03.696027Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CreateVolatileSnapshot 2025-06-30T09:20:03.696031Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit DropVolatileSnapshot 2025-06-30T09:20:03.696036Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit DropVolatileSnapshot 2025-06-30T09:20:03.696040Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-30T09:20:03.696045Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit DropVolatileSnapshot 2025-06-30T09:20:03.696051Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CompleteOperation 2025-06-30T09:20:03.696056Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-30T09:20:03.696084Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is DelayComplete 2025-06-30T09:20:03.696089Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CompleteOperation 2025-06-30T09:20:03.696094Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CompletedOperations 2025-06-30T09:20:03.696098Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CompletedOperations 2025-06-30T09:20:03.696106Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-30T09:20:03.696111Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CompletedOperations 2025-06-30T09:20:03.696116Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715666] at 72075186224037889 has finished 2025-06-30T09:20:03.696121Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:20:03.696125Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-30T09:20:03.696129Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-30T09:20:03.696133Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-30T09:20:03.708837Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-30T09:20:03.708890Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:20:03.708903Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037888 on unit CompleteOperation 2025-06-30T09:20:03.708931Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037888 at tablet 72075186224037888 send result to client [15:1033:2810], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:20:03.708946Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:20:03.709041Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-06-30T09:20:03.709051Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:20:03.709057Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-30T09:20:03.709066Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037889 at tablet 72075186224037889 send result to client [15:1033:2810], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:20:03.709074Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:20:03.709507Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [15:556:2482], Recipient [15:629:2533]: NKikimrTxDataShard.TEvRead ReadId: 10 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-30T09:20:03.709550Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-30T09:20:03.709569Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-30T09:20:03.709594Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:20:03.709600Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:20:03.709608Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:20:03.709614Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:20:03.709626Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-30T09:20:03.709632Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:20:03.709637Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:20:03.709642Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:20:03.709647Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:20:03.709666Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 10 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW } 2025-06-30T09:20:03.709781Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 1011121314, counter# 18446744073709551615 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-30T09:20:03.709791Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/281474976715666 2025-06-30T09:20:03.709801Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[15:556:2482], 10} after executionsCount# 1 2025-06-30T09:20:03.709812Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[15:556:2482], 10} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:20:03.709853Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[15:556:2482], 10} finished in read 2025-06-30T09:20:03.709868Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:20:03.709897Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:20:03.709902Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:20:03.709906Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:20:03.709922Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:20:03.709927Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:20:03.709933Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-30T09:20:03.709940Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-30T09:20:03.709965Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin-NotNull >> KqpJoin::LeftJoinWithNull+StreamLookupJoin >> TestKinesisHttpProxy::CreateStreamInIncorrectDb >> TestYmqHttpProxy::TestGetQueueUrl ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartOneByOne [GOOD] Test command err: RandomSeed# 16736803617756860695 >> GroupLayoutSanitizer::TestMirror3of4 [GOOD] >> GroupReconfiguration::BsControllerDoesNotDisableGroup >> KqpJoin::IndexLoookupJoinStructJoin-StreamLookupJoin [GOOD] >> KqpJoin::JoinAggregate >> KqpIndexLookupJoin::LeftJoinSkipNullFilter+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftJoinSkipNullFilter-StreamLookup >> DataShardReadIteratorBatchMode::RangeFromInclusive [GOOD] >> DataShardReadIteratorBatchMode::RangeFromNonInclusive >> CommitOffset::DistributedTxCommit_CheckSessionResetAfterCommit [GOOD] >> CommitOffset::DistributedTxCommit_CheckOffsetCommitForDifferentCases |80.4%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> TestYmqHttpProxy::TestCreateQueue >> TopicAutoscaling::ReadFromTimestamp_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadFromTimestamp_PQv1 >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndSameParams >> GroupReconfiguration::BsControllerDoesNotDisableGroup [GOOD] >> GroupReconfiguration::BsControllerDoesNotDisableGroupNoRequestsToNodesWVDisks >> TestKinesisHttpProxy::TestPing >> TxUsage::WriteToTopic_Demo_16_Query [GOOD] >> TestKinesisHttpProxy::DifferentContentTypes >> GroupReconfiguration::BsControllerDoesNotDisableGroupNoRequestsToNodesWVDisks [GOOD] >> GroupReconfiguration::BsControllerConfigurationRequestIsFastEnough >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin+NotNull >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Table >> KqpJoin::LeftJoinWithNull+StreamLookupJoin [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_Simple >> TestYmqHttpProxy::TestSendMessageEmptyQueueUrl |80.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |80.4%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |80.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table >> KqpIndexLookupJoin::LeftJoinSkipNullFilter-StreamLookup [GOOD] >> KqpJoin::JoinAggregate [GOOD] >> DataShardReadIteratorBatchMode::RangeFromNonInclusive [GOOD] >> DataShardReadIteratorBatchMode::RangeToInclusive ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftJoinSkipNullFilter-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 4771, MsgBus: 8550 2025-06-30T09:20:03.588379Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669726380535793:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:03.588408Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a4e/r3tmp/tmpmqokoK/pdisk_1.dat 2025-06-30T09:20:03.644168Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4771, node 1 2025-06-30T09:20:03.660079Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:03.660092Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:03.660094Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:03.660162Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8550 2025-06-30T09:20:03.690758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:03.690797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:03.691922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8550 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:03.746720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:03.759425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:03.823275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:03.848196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:03.861358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:04.004806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669730675504663:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.004842Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.046720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.056010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.063952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.078731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.092931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.108071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.127859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.151151Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669730675505316:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.151176Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.151192Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669730675505321:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.152234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:04.155569Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669730675505323:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:20:04.218847Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669730675505374:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:04.417423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.427028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.435414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_c ... nfo.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:05.080760Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:05.081298Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:20:05.082724Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:20:05.088154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.118548Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.180617Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.199289Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.385376Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669731856506577:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.385399Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.395036Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.407198Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.417092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.431525Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.445537Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.458406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.516295Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.532066Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669731856507233:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.532103Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.532189Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669731856507238:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.533164Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:05.541590Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669731856507240:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:20:05.623805Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669731856507291:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:05.841601Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.865989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.885026Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.910031Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.929093Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.974846Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:05.989487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) |80.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |80.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinAggregate [GOOD] Test command err: Trying to start YDB, gRPC: 23976, MsgBus: 20209 2025-06-30T09:20:03.615145Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669727359064891:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:03.615184Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a48/r3tmp/tmpejq2Ty/pdisk_1.dat 2025-06-30T09:20:03.700141Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:03.700363Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669727359064866:2080] 1751275203614936 != 1751275203614939 TServer::EnableGrpc on GrpcPort 23976, node 1 2025-06-30T09:20:03.717369Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:03.717385Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:03.717387Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:03.717427Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:03.718074Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:03.718100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:03.719128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20209 TClient is connected to server localhost:20209 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:03.788731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:03.800525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:03.823129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:03.849315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:03.864021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:04.014024Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669731654033780:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.014061Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.065831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.074695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.086175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.100135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.114620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.128341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.142087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.160070Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669731654034433:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.160103Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.160189Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669731654034438:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.161269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:04.169110Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669731654034440:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:20:04.267335Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669731654034491:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:04.424606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.432165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.442671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCre ... tus: LookupError; 2025-06-30T09:20:05.012662Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:05.012953Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521669731076655842:2080] 1751275204981996 != 1751275204981999 TServer::EnableGrpc on GrpcPort 2338, node 2 2025-06-30T09:20:05.025282Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:05.025295Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:05.025298Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:05.025343Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27538 TClient is connected to server localhost:27538 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:05.092851Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:05.092891Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:05.093282Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:05.095204Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:05.095339Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:20:05.106085Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.128108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.154668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.167223Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.360161Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669735371624734:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.360186Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.369934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.379714Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.436155Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.446112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.459125Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.474585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.488082Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.503871Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669735371625391:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.503897Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.503937Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669735371625396:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.504686Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:05.513802Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669735371625398:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:20:05.612535Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669735371625449:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:05.774947Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.784483Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.795922Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.985461Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin+NotNull [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_Simple [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartNo_Query [GOOD] >> TestKinesisHttpProxy::CreateStreamInIncorrectDb [GOOD] >> TestYmqHttpProxy::TestGetQueueUrl [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Table >> TestKinesisHttpProxy::CreateStreamWithInvalidName ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpJoin::LeftJoinPushdownPredicate_Simple [GOOD] Test command err: Trying to start YDB, gRPC: 19483, MsgBus: 7519 2025-06-30T09:20:04.570618Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669729103874010:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:04.570637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a37/r3tmp/tmpcKbBrg/pdisk_1.dat TServer::EnableGrpc on GrpcPort 19483, node 1 2025-06-30T09:20:04.641252Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:04.643780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:04.643793Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:04.643795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:04.643835Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7519 2025-06-30T09:20:04.673141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:04.673171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:04.674210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7519 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:04.714846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:04.718362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:20:04.728277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:04.810943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:04.842779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:04.855610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:04.980951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669729103875589:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.980998Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.026249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.034479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.045227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.104557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.123364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.138847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.150872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.166704Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669733398843541:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.166760Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.166816Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669733398843546:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.167801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:05.176926Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669733398843548:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:20:05.272675Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669733398843599:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:05.436416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.447192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.457685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 2 ... livered_message; 2025-06-30T09:20:06.030209Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:06.030485Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521669737658164769:2080] 1751275206009014 != 1751275206009017 TServer::EnableGrpc on GrpcPort 29999, node 2 2025-06-30T09:20:06.040572Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:06.040582Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:06.040585Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:06.040631Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15927 TClient is connected to server localhost:15927 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:06.116441Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:06.116475Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:06.117085Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:06.118008Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:20:06.119974Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:20:06.125781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:06.144556Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.209196Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.231153Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.453914Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669737658166375:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.453948Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.464960Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.480293Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.535059Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.544061Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.558350Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.571462Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.589069Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.612253Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669737658167029:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.612292Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.612437Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669737658167034:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.613630Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:06.620704Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669737658167036:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:20:06.687834Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669737658167087:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:06.909931Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.927205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.939396Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:07.011268Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 26410, MsgBus: 27707 2025-06-30T09:20:04.505854Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669731332294801:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:04.505892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a32/r3tmp/tmp3C9VLD/pdisk_1.dat 2025-06-30T09:20:04.575655Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:04.575766Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669731332294779:2080] 1751275204505702 != 1751275204505705 TServer::EnableGrpc on GrpcPort 26410, node 1 2025-06-30T09:20:04.594350Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:04.594371Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:04.594373Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:04.594418Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27707 TClient is connected to server localhost:27707 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:20:04.651704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:04.651759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:04.652781Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:04.661066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:04.674478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:04.743845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:04.770027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:04.788450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:04.979398Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669731332296373:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.979435Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.039123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.052210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.062413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.077313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.086785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.103770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.118407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.133823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669735627264323:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.133850Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.133984Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669735627264328:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.134875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:05.142111Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669735627264330:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:20:05.239499Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669735627264381:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:05.421916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.444445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.508018Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, ... sk_1.dat 2025-06-30T09:20:05.931814Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:05.955351Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:05.958854Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521669733938122065:2080] 1751275205906236 != 1751275205906239 TServer::EnableGrpc on GrpcPort 28602, node 2 2025-06-30T09:20:05.968427Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:05.968441Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:05.968443Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:05.968500Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12412 2025-06-30T09:20:06.026584Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:06.026628Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:12412 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:20:06.031170Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:06.037961Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:06.039813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:20:06.043892Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.066088Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.088973Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.101683Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.436715Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669738233090957:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.436747Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.459392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.482120Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.496705Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.512390Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.526367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.539599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.554585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.576991Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669738233091611:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.577017Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.577121Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669738233091616:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.578306Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:06.581986Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:20:06.582121Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669738233091618:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:20:06.671931Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669738233091669:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:06.888767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.914502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.926358Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TestYmqHttpProxy::TestGetQueueUrlOfNotExistingQueue >> TestYmqHttpProxy::TestCreateQueue [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithBadQueueName >> TestYmqHttpProxy::TestSendMessage >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndSameParams [GOOD] >> TestKinesisHttpProxy::TestPing [GOOD] >> TestKinesisHttpProxy::UnauthorizedGetShardIteratorRequest |80.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut >> DirectReadWithServer::KillPQTablet [GOOD] >> DirectReadWithServer::KillPQRBTablet [GOOD] >> LocalPartition::Restarts |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |80.4%| [LD] {RESULT} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut >> TestKinesisHttpProxy::DifferentContentTypes [GOOD] >> TestYmqHttpProxy::TestSendMessageEmptyQueueUrl [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndDifferentParams >> TestKinesisHttpProxy::TestRequestBadJson >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Table [GOOD] >> DataShardReadIteratorBatchMode::RangeToInclusive [GOOD] >> DataShardReadIteratorBatchMode::RangeToNonInclusive >> TestKinesisHttpProxy::GoodRequestPutRecords >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData >> TestYmqHttpProxy::TestSendMessageFifoQueue >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages >> BasicUsage::RetryDiscoveryWithCancel [GOOD] >> BasicUsage::RecreateObserver >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests10000Inflight100BlobSize1000 [GOOD] >> KqpJoinOrder::TPCHEveryQueryWorks-ColumnStore >> TestKinesisHttpProxy::MissingAction >> ReadSessionImplTest::DataReceivedCallbackReal [GOOD] >> ReadSessionImplTest::DataReceivedCallback >> TestYmqHttpProxy::TestGetQueueUrlOfNotExistingQueue [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> VectorIndexBuildTestReboots::BaseCase-Prefixed[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:16:55.817683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:55.817712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:55.817718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:55.817724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:55.817736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:55.817741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:55.817752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:55.817773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:55.817909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:55.817987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:55.834933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:16:55.834961Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:55.835085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:55.841272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:55.841341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:55.841389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:55.844481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:55.844674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:55.844811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:55.844871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:55.845558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:55.845606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:55.845879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:55.845891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:55.845924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:55.845933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:55.845940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:55.845976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:16:55.847458Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:16:55.868362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:55.868429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:55.868479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:55.868486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:55.868535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:55.868546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:55.871068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:55.871113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:55.871173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:55.871184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:55.871188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:55.871193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:55.871805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:55.871819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:55.871826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:55.872229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:55.872238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:55.872243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:55.872253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:55.872817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:55.873216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:55.873263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:55.873445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:55.873470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:55.873479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:55.873535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:16:55.873541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:55.873566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:55.873576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: ... CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 7 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:54.497924Z node 266 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplPostingTable0build" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:19:54.497951Z node 266 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplPostingTable0build" took 32us result status StatusPathDoesNotExist 2025-06-30T09:19:54.497973Z node 266 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/dir/Table/index1/indexImplPostingTable0build\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/dir/Table/index1\' (id: [OwnerId: 72057594046678944, LocalPathId: 5]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/dir/Table/index1/indexImplPostingTable0build" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/dir/Table/index1" LastExistedPrefixPathId: 5 LastExistedPrefixDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:19:54.498041Z node 266 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplPostingTable1build" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:19:54.498055Z node 266 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplPostingTable1build" took 16us result status StatusPathDoesNotExist 2025-06-30T09:19:54.498072Z node 266 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/dir/Table/index1/indexImplPostingTable1build\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/dir/Table/index1\' (id: [OwnerId: 72057594046678944, LocalPathId: 5]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/dir/Table/index1/indexImplPostingTable1build" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/dir/Table/index1" LastExistedPrefixPathId: 5 LastExistedPrefixDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:19:54.498114Z node 266 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplPostingTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:19:54.498130Z node 266 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplPostingTable" took 17us result status StatusSuccess 2025-06-30T09:19:54.498221Z node 266 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1/indexImplPostingTable" PathDescription { Self { Name: "indexImplPostingTable" PathId: 7 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplPostingTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 7 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ... posting table contains 400 rows ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests10000Inflight100BlobSize1000 [GOOD] Test command err: RandomSeed# 15739681740140934772 2025-06-30T09:19:47.040383Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [a42cceb145b35141] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:20360:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:1:0]"} Marker# BPG29 2025-06-30T09:19:47.040437Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:20360:1000:0] PatchedBlobId# [1:1:2:10:24456:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-30T09:19:48.254262Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [248c5079851bd35b] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:20361:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:48.254310Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:20361:1000:0] PatchedBlobId# [1:1:2:10:69513:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:48.255353Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [670a38a727218fc0] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:2:10:69513:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:48.255373Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:2:10:69513:1000:0] PatchedBlobId# [1:1:3:10:24457:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:48.256080Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [09b86f6899feac73] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:3:10:24457:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:48.256098Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:3:10:24457:1000:0] PatchedBlobId# [1:1:4:10:3977:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:48.256738Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [e0b4261204903583] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:4:10:3977:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:48.256755Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:4:10:3977:1000:0] PatchedBlobId# [1:1:5:10:3977:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:48.257354Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [fbb0f5fbb8062d19] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:5:10:3977:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:48.257370Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:5:10:3977:1000:0] PatchedBlobId# [1:1:6:10:8073:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:48.257942Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [0506960bf75612f3] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:6:10:8073:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:48.257958Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:6:10:8073:1000:0] PatchedBlobId# [1:1:7:10:130953:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:48.258509Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [9217841ee4ddc791] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:7:10:130953:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:48.258527Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:7:10:130953:1000:0] PatchedBlobId# [1:1:8:10:12169:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:48.259156Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [2b30a453491e708d] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:8:10:12169:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:48.259173Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:8:10:12169:1000:0] PatchedBlobId# [1:1:9:10:16265:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:48.259714Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [05320a0bf929cf1b] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:9:10:16265:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:48.259733Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:9:10:16265:1000:0] PatchedBlobId# [1:1:10:10:40841:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:48.260339Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [78b58547cc8f0e9e] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:10:10:40841:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:48.260354Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:40841:1000:0] PatchedBlobId# [1:1:11:10:20361:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:49.426615Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [2327a074f547e8f1] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:20362:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:49.426664Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:20362:1000:0] PatchedBlobId# [1:1:2:10:24458:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:49.428050Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [078bdee044d5c36a] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:2:10:24458:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:49.428083Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:2:10:24458:1000:0] PatchedBlobId# [1:1:3:10:147338:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:49.429002Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [3dbf187e5add6e40] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:3:10:147338:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:49.429023Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:3:10:147338:1000:0] PatchedBlobId# [1:1:4:10:3978:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:49.429783Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [436cf51f1a5e73fa] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:4:10:3978:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:49.429803Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:4:10:3978:1000:0] PatchedBlobId# [1:1:5:10:8074:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:49.430549Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [2476cb14f72ca04a] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:5:10:8074:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:49.430569Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:5:10:8074:1000:0] PatchedBlobId# [1:1:6:10:81802:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:49.431301Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [12dd76327f896f12] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:6:10:81802:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:49.431321Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:6:10:81802:1000:0] PatchedBlobId# [1:1:7:10:12170:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:19:49.432084Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [613bfc563b6cd7e6] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:7:10:12170:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:19:49.432107Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:7:10:12170: ... 2000000:_:0:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:105:10:159693:1000:0] PatchedBlobId# [1:1:106:10:16333:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.797782Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:106:10:16322:1000:0] PatchedBlobId# [1:1:107:10:16322:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.797968Z 6 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:104:10:12274:1000:0] PatchedBlobId# [1:1:105:10:135154:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.803559Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [1a79f9bbbb1ae5d9] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:91:10:4019:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:20:06.804061Z 3 00h00m30.010000s :BS_PROXY_GET ERROR: [06c525fe05a1ba60] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:91:10:4080:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:20:06.804475Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:91:10:4019:1000:0] PatchedBlobId# [1:1:92:10:53171:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.804924Z 3 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:91:10:4080:1000:0] PatchedBlobId# [1:1:92:10:4080:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.806487Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [194d023acedbb637] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:108:10:20438:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:20:06.808179Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:108:10:20438:1000:0] PatchedBlobId# [1:1:109:10:143318:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.809051Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [4ce75644737ebec8] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:106:10:16338:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:1:0]"} Marker# BPG29 2025-06-30T09:20:06.809188Z 5 00h00m30.010000s :BS_PROXY_GET ERROR: [70d1042230890c0a] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:100:10:1:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:2:0]"} Marker# BPG29 2025-06-30T09:20:06.809990Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:106:10:16338:1000:0] PatchedBlobId# [1:1:107:10:90066:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-30T09:20:06.810051Z 5 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:100:10:1:1000:0] PatchedBlobId# [1:1:101:10:4097:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:2:0] Marker# BSVSP01 2025-06-30T09:20:06.814480Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [cad34d9b1505b3e7] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:91:10:4025:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:20:06.814941Z 3 00h00m30.010000s :BS_PROXY_GET ERROR: [5ee9994cff78cb10] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:91:10:24548:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:20:06.815243Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:91:10:4025:1000:0] PatchedBlobId# [1:1:92:10:126905:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.815692Z 3 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:91:10:24548:1000:0] PatchedBlobId# [1:1:92:10:4068:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.815868Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [2260e9f4cda891a8] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:109:10:24540:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:20:06.816447Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:109:10:24540:1000:0] PatchedBlobId# [1:1:110:10:24540:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.818354Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [1579940b553009df] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:106:10:16341:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:1:0]"} Marker# BPG29 2025-06-30T09:20:06.818529Z 5 00h00m30.010000s :BS_PROXY_GET ERROR: [7bcd4bd472420b03] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:101:10:4014:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:2:0]"} Marker# BPG29 2025-06-30T09:20:06.819153Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:106:10:16341:1000:0] PatchedBlobId# [1:1:107:10:139221:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-30T09:20:06.819233Z 5 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:101:10:4014:1000:0] PatchedBlobId# [1:1:102:10:4014:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:2:0] Marker# BSVSP01 2025-06-30T09:20:06.823161Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [9ad9cfd0885f35c5] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:111:10:4066:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:20:06.823472Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [2638f835afc0f387] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:91:10:24558:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:20:06.823885Z 3 00h00m30.010000s :BS_PROXY_GET ERROR: [567f40b5802d0a93] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:91:10:73706:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:20:06.824036Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:111:10:4066:1000:0] PatchedBlobId# [1:1:112:10:8162:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.824166Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:91:10:24558:1000:0] PatchedBlobId# [1:1:92:10:4078:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.824579Z 3 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:91:10:73706:1000:0] PatchedBlobId# [1:1:92:10:4074:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.827638Z 5 00h00m30.010000s :BS_PROXY_GET ERROR: [bc535eac25ac0ce3] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:101:10:73640:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:2:0]"} Marker# BPG29 2025-06-30T09:20:06.828286Z 5 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:101:10:73640:1000:0] PatchedBlobId# [1:1:102:10:4008:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:2:0] Marker# BSVSP01 2025-06-30T09:20:06.833556Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [7cd4214f63792e65] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:91:10:24561:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:20:06.834019Z 3 00h00m30.010000s :BS_PROXY_GET ERROR: [215c72458da28502] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:91:10:4083:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-30T09:20:06.834343Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:91:10:24561:1000:0] PatchedBlobId# [1:1:92:10:4081:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:06.834668Z 3 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:91:10:4083:1000:0] PatchedBlobId# [1:1:92:10:4083:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 >> TestKinesisHttpProxy::CreateStreamWithInvalidName [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Table [GOOD] >> TestYmqHttpProxy::TestGetQueueUrlWithIAM >> TestKinesisHttpProxy::CreateStreamWithDifferentRetentions >> DataShardReadIteratorBatchMode::RangeToNonInclusive [GOOD] >> DataShardReadIteratorBatchMode::MultipleRanges |80.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut |80.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut >> TestYmqHttpProxy::TestCreateQueueWithBadQueueName [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Query >> ReadIteratorExternalBlobs::ExtBlobsWithSpecificKeys [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheBeginning >> TestYmqHttpProxy::TestCreateQueueWithEmptyName >> TestKinesisHttpProxy::TestRequestBadJson [GOOD] >> TestYmqHttpProxy::TestSendMessage [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndDifferentParams [GOOD] >> TestKinesisHttpProxy::UnauthorizedGetShardIteratorRequest [GOOD] >> KqpJoinOrder::ShuffleEliminationManyKeysJoinPredicate+EnableSeparationComputeActorsFromRead >> ReadSessionImplTest::DataReceivedCallback [GOOD] >> TestKinesisHttpProxy::TestConsumersEmptyNames >> KqpIndexLookupJoin::JoinWithSubquery+StreamLookup >> TestYmqHttpProxy::TestCreateQueueWithWrongBody >> TestKinesisHttpProxy::GoodRequestPutRecords [GOOD] >> TestYmqHttpProxy::TestReceiveMessage >> TestYmqHttpProxy::TestSendMessageFifoQueue [GOOD] >> KqpJoinOrder::DatetimeConstantFold-ColumnStore [GOOD] >> TestKinesisHttpProxy::TestRequestWithWrongRegion >> TestKinesisHttpProxy::DoubleCreateStream >> TestYmqHttpProxy::TestSendMessageWithAttributes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::DataReceivedCallback [GOOD] Test command err: 2025-06-30T09:19:46.335935Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.335943Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.335946Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.336096Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.337456Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.337508Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.337621Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:46.337761Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.337817Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:19:46.337860Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.337868Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-30T09:19:46.338059Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.338062Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.338064Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.338128Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.338301Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.338341Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.338379Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:46.338430Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.338461Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:19:46.338488Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.338493Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-30T09:19:46.338691Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.338694Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.338696Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.338758Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.338891Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.338935Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.338958Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:46.339219Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.339290Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:19:46.339324Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.339332Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-30T09:19:46.339517Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.339520Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.339524Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.339575Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.339694Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.339719Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.339753Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:46.340372Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.340477Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:19:46.340515Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.340522Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-30T09:19:46.340719Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.340722Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.340724Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.340783Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.340955Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.340995Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.341020Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:46.341068Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.341101Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:19:46.341131Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.341136Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-30T09:19:46.341253Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.341256Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.341258Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.341301Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.341436Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.341452Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.341469Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:46.341505Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.341532Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:19:46.341555Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.341559Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-30T09:19:46.341699Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.341701Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.341703Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.341746Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.341854Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.341878Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.341917Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:46.342088Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.342125Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:19:46.342147Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.342152Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-30T09:19:46.342304Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.342306Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.342308Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:19:46.342367Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:19:46.342673Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:19:46.342700Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.342729Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:19:46.342907Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:19:46.342945Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:19:46.342953Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:19:46.342957Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-30T09:19:46.399982Z :ReadSession INFO: Random seed for debugging is 1751275186399973 2025-06-30T09:19:46.493163Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669653312498659:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:46.493189Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:19:46.496195Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669653860677201:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:46.496213Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;p ... b6] [dc1] Commit offsets [2, 3). Partition stream id: 1 2025-06-30T09:19:59.576354Z :DEBUG: [/Root] [/Root] [a6b272d2-97d46f11-dd08bc3-cf9b9ab6] [dc1] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-30T09:19:59.576486Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 grpc read done: success# 1, data# { commit { cookies { assign_id: 1 partition_cookie: 3 } } } 2025-06-30T09:19:59.576564Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1438: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 commit request from client for 3 in TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) 2025-06-30T09:19:59.576577Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:129: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 commit request from 3 to 3 in TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) 2025-06-30T09:19:59.576596Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) committing to position 3 prev 2 end 3 by cookie 3 2025-06-30T09:19:59.576770Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:19:59.576791Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:19:59.576833Z node 2 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user offset is set to 3 (startOffset 0) session shared/user_1_1_2203034252587783000_v1 2025-06-30T09:19:59.576862Z node 2 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:19:59.577839Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 3 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:19:59.577865Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 3 2025-06-30T09:19:59.577868Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:19:59.577891Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:19:59.577975Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 3 } 2025-06-30T09:19:59.577994Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) commit done to position 3 endOffset 3 with cookie 3 2025-06-30T09:19:59.578009Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 replying for commits: assignId# 1, from# 3, to# 3, offset# 3 2025-06-30T09:19:59.578219Z :DEBUG: [/Root] [/Root] [a6b272d2-97d46f11-dd08bc3-cf9b9ab6] [dc1] Committed response: cookies { assign_id: 1 partition_cookie: 3 } 2025-06-30T09:19:59.673049Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ec32ba5c-306ff6f1-f15a3467-2114491d_0] Write session will now close 2025-06-30T09:19:59.673075Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ec32ba5c-306ff6f1-f15a3467-2114491d_0] Write session: aborting 2025-06-30T09:19:59.673344Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ec32ba5c-306ff6f1-f15a3467-2114491d_0] Write session: gracefully shut down, all writes complete 2025-06-30T09:19:59.673355Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ec32ba5c-306ff6f1-f15a3467-2114491d_0] Write session: destroy 2025-06-30T09:19:59.673738Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message-group-id|ec32ba5c-306ff6f1-f15a3467-2114491d_0 grpc read done: success: 0 data: 2025-06-30T09:19:59.673758Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message-group-id|ec32ba5c-306ff6f1-f15a3467-2114491d_0 grpc read failed 2025-06-30T09:19:59.673777Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message-group-id|ec32ba5c-306ff6f1-f15a3467-2114491d_0 grpc closed 2025-06-30T09:19:59.673785Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message-group-id|ec32ba5c-306ff6f1-f15a3467-2114491d_0 is DEAD 2025-06-30T09:19:59.674273Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:19:59.674575Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [1:7521669709147076123:2550] destroyed 2025-06-30T09:19:59.674609Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:20:01.536336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7389: Cannot get console configs 2025-06-30T09:20:01.536369Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:02.269088Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:20:02.332985Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 5 from offset 3 2025-06-30T09:20:07.269500Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:20:09.579148Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 6 from offset 3 2025-06-30T09:20:09.674947Z :INFO: [/Root] [/Root] [a6b272d2-97d46f11-dd08bc3-cf9b9ab6] Closing read session. Close timeout: 0.000000s 2025-06-30T09:20:09.674989Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:3 2025-06-30T09:20:09.675004Z :INFO: [/Root] [/Root] [a6b272d2-97d46f11-dd08bc3-cf9b9ab6] Counters: { Errors: 0 CurrentSessionLifetimeMs: 16349 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:09.675026Z :NOTICE: [/Root] [/Root] [a6b272d2-97d46f11-dd08bc3-cf9b9ab6] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:20:09.675049Z :DEBUG: [/Root] [/Root] [a6b272d2-97d46f11-dd08bc3-cf9b9ab6] [dc1] Abort session to cluster 2025-06-30T09:20:09.675328Z :NOTICE: [/Root] [/Root] [a6b272d2-97d46f11-dd08bc3-cf9b9ab6] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:20:09.678864Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 grpc read done: success# 0, data# { } 2025-06-30T09:20:09.678884Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 grpc read failed 2025-06-30T09:20:09.678897Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 grpc closed 2025-06-30T09:20:09.678918Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_1_1_2203034252587783000_v1 is DEAD 2025-06-30T09:20:09.681671Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [1:7521669683377271886:2468] disconnected; active server actors: 1 2025-06-30T09:20:09.681687Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [1:7521669683377271886:2468] client user disconnected session shared/user_1_1_2203034252587783000_v1 2025-06-30T09:20:09.686200Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_1_1_2203034252587783000_v1 2025-06-30T09:20:09.686236Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [1:7521669683377271889:2471] destroyed 2025-06-30T09:20:09.686266Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_2203034252587783000_v1 2025-06-30T09:20:10.184583Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:10.184593Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:10.184598Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:20:10.184696Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:20:10.184846Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:20:10.184928Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:10.184992Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:20:10.185243Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:20:10.185281Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:20:10.185374Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-30T09:20:10.185391Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:20:10.185403Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:20:10.185412Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-30T09:20:10.185455Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-30T09:20:10.185460Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes >> TestKinesisHttpProxy::MissingAction [GOOD] |80.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |80.4%| [LD] {RESULT} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::DatetimeConstantFold-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 62197, MsgBus: 28926 2025-06-30T09:20:03.635682Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669726390222044:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:03.635717Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a4b/r3tmp/tmp4CDCSX/pdisk_1.dat 2025-06-30T09:20:03.692480Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62197, node 1 2025-06-30T09:20:03.713588Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:03.713608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:03.713611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:03.713660Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:03.738048Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:03.738092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:28926 2025-06-30T09:20:03.739208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28926 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:03.783795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:04.027934Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669730685189941:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.027938Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669730685189952:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.027959Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.028707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:04.030496Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669730685189955:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:20:04.094127Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669730685190006:2328] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:04.157571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.222330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.230628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.239511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.253446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.294295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.302608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.317734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.330532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.345587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.359891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.373134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.386703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.553706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.563137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.575782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675: ... 714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.008773Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038478;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.008929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.009838Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.010003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038476;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.011242Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.011438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.012330Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.012476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038544;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.013328Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038544;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.013494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.014381Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.014523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.015319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038476;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.015505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038542;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.016434Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038542;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.016573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.017530Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.017671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.018598Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.018724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.019567Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.019736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.020637Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.020792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038510;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.021658Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038510;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.021791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.022668Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.023726Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.023914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038536;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.024816Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038536;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.024949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.025833Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.026001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.026987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.027915Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.028092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038500;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.028995Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038500;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.029140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.030075Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.030198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.031025Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.031279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.032260Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.035287Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.163156Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:11.163316Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:11.163581Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7521669734980163758:2859];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-30T09:20:11.163656Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> TestKinesisHttpProxy::PutRecordsWithLongExplicitHashKey >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin4-RemoveLimitOperator [GOOD] >> DataShardReadIteratorBatchMode::MultipleRanges [GOOD] >> DataShardReadIteratorBatchMode::SelectingColumns >> KqpIndexLookupJoin::JoinWithSubquery+StreamLookup [GOOD] >> KqpIndexLookupJoin::JoinWithSubquery-StreamLookup >> TestYmqHttpProxy::TestGetQueueUrlWithIAM [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin4-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 15831, MsgBus: 17182 2025-06-30T09:20:03.871820Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669724761343435:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:03.871851Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a40/r3tmp/tmp4kvwog/pdisk_1.dat 2025-06-30T09:20:03.940953Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:03.941069Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669724761343411:2080] 1751275203871611 != 1751275203871614 TServer::EnableGrpc on GrpcPort 15831, node 1 2025-06-30T09:20:03.951346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:03.951360Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:03.951362Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:03.951402Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17182 2025-06-30T09:20:03.974551Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:03.974581Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:03.975640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17182 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:04.020855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:04.231494Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669729056311342:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.232311Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669729056311333:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.232384Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.232595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:04.235845Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669729056311347:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:20:04.293517Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669729056311398:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:04.359017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.422708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.430409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.442258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.456206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.493085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.505047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.516651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.529637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.540976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.554715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.568311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.583201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.764667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.778044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20: ... TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.728240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.728961Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.729100Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.729104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.729245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.730185Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.730214Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.730339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.730364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.731378Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.731408Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.731537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.731551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.732470Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.732571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.732653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.732880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.733543Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.733690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.733703Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.733844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.734523Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.734638Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.734772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.734805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.735647Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.735658Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.735781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.735782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.736658Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.736686Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.736807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.736834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.737729Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.737729Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.737883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.737885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.738801Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.738801Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.738953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.739271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.739809Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.740089Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.743815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.744673Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.808362Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:11.808380Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:11.808630Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> TestKinesisHttpProxy::CreateStreamWithDifferentRetentions [GOOD] >> TBackupCollectionTests::CreateAbsolutePath >> TestYmqHttpProxy::TestGetQueueAttributes |80.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut >> TBackupCollectionTests::CreateAbsolutePath [GOOD] >> TBackupCollectionTests::Create |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |80.4%| [LD] {RESULT} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut >> TestYmqHttpProxy::TestCreateQueueWithEmptyName [GOOD] >> TestKinesisHttpProxy::CreateDeleteStream ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK [GOOD] Test command err: 2025-06-30T09:18:53.078036Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669426503715503:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:53.078055Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004031/r3tmp/tmpEVR1Rp/pdisk_1.dat 2025-06-30T09:18:53.230182Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:53.270903Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:53.273447Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669426503715487:2080] 1751275133077872 != 1751275133077875 TServer::EnableGrpc on GrpcPort 6236, node 1 2025-06-30T09:18:53.302968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004031/r3tmp/yandexrCb11T.tmp 2025-06-30T09:18:53.302985Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004031/r3tmp/yandexrCb11T.tmp 2025-06-30T09:18:53.303070Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004031/r3tmp/yandexrCb11T.tmp 2025-06-30T09:18:53.303121Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:53.308377Z INFO: TTestServer started on Port 1075 GrpcPort 6236 2025-06-30T09:18:53.321540Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:53.321571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:53.322337Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1075 PQClient connected to localhost:6236 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:53.380358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:53.387327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:53.396654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:18:53.475255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:53.678919Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669426503716260:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.678935Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669426503716273:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.678983Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.682095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:53.687052Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669426503716275:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:18:53.732263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.741495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.748154Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669426503716467:2510] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:53.763620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.766868Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669426503716477:2321], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:53.767734Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=MWE5YWVjNGItNjVlZDdhMDQtMjYzMjlhNDMtNDNmNmM3NjU=, ActorId: [1:7521669426503716258:2296], ActorState: ExecuteState, TraceId: 01jz023pq7ft35xrg18kv9a99f, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:53.768286Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669426503716640:2612] 2025-06-30T09:18:54.087097Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:58.078814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669426503715503:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:58.078863Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:18:59.050247Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-30T09:18:59.079155Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:18:59.082083Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521669452273520605:2683], Recipient [1:7521669426503715809:2146]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:59.082103Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:59.082106Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:18:59.082119Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521669452273520601:2680], Recipient [1:7521669426503715809:2146]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-30T09:18:59.082122Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:18:59.103604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTable ... r test-consumer session test-consumer_7_1_9066496331223293014_v1 grpc read done: success# 0, data# { } 2025-06-30T09:20:13.183875Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_7_1_9066496331223293014_v1 grpc read failed 2025-06-30T09:20:13.183881Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_7_1_9066496331223293014_v1 grpc closed 2025-06-30T09:20:13.183891Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_7_1_9066496331223293014_v1 is DEAD 2025-06-30T09:20:13.184550Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 269877764, Sender [7:7521669766540062283:3385], Recipient [7:7521669727885354979:2416]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:13.184554Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5326: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:13.184556Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2906: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:13.184563Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session test-consumer_7_1_9066496331223293014_v1 2025-06-30T09:20:13.184569Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [7:7521669766540062282:2847] destroyed 2025-06-30T09:20:13.184590Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][test-topic] pipe [7:7521669766540062279:2844] disconnected; active server actors: 1 2025-06-30T09:20:13.184593Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][test-topic] pipe [7:7521669766540062279:2844] client test-consumer disconnected session test-consumer_7_1_9066496331223293014_v1 2025-06-30T09:20:13.184638Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 269877764, Sender [7:7521669766540062291:3388], Recipient [7:7521669757950127317:2756]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:13.184639Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5326: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:13.184641Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2906: [PQ: 72075186224037896] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:13.184644Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037896] Destroy direct read session test-consumer_7_1_9066496331223293014_v1 2025-06-30T09:20:13.184647Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [7:7521669766540062289:2849] destroyed 2025-06-30T09:20:13.184655Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 269877764, Sender [7:7521669766540062290:3387], Recipient [7:7521669757950127324:2757]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:13.184657Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5326: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:13.184659Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2906: [PQ: 72075186224037897] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:13.184663Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037897] Destroy direct read session test-consumer_7_1_9066496331223293014_v1 2025-06-30T09:20:13.184666Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037897] server disconnected, pipe [7:7521669766540062288:2848] destroyed 2025-06-30T09:20:13.184683Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_7_1_9066496331223293014_v1 2025-06-30T09:20:13.184686Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_7_1_9066496331223293014_v1 2025-06-30T09:20:13.184689Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_7_1_9066496331223293014_v1 2025-06-30T09:20:13.190876Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669727885354979:2416], Partition 0, Sender [0:0:0], Recipient [7:7521669727885355035:2419], Cookie: 0 2025-06-30T09:20:13.190900Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669727885355035:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:13.190908Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:13.190926Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:13.190959Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:13.190962Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:13.190969Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:13.255043Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669757950127317:2756], Partition 2, Sender [0:0:0], Recipient [7:7521669757950127391:2764], Cookie: 0 2025-06-30T09:20:13.255071Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669757950127391:2764]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:13.255078Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:13.255096Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:13.255128Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:13.255131Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:13.255139Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:13.255152Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669757950127324:2757], Partition 1, Sender [0:0:0], Recipient [7:7521669757950127394:2766], Cookie: 0 2025-06-30T09:20:13.255160Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669757950127394:2766]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:13.255162Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:13.255167Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:13.255174Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:13.255176Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:13.255181Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:13.293658Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669727885354979:2416], Partition 0, Sender [0:0:0], Recipient [7:7521669727885355035:2419], Cookie: 0 2025-06-30T09:20:13.293688Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669727885355035:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:13.293694Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:13.293713Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:13.293738Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:13.293741Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:13.293749Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:13.355757Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669757950127317:2756], Partition 2, Sender [0:0:0], Recipient [7:7521669757950127391:2764], Cookie: 0 2025-06-30T09:20:13.355789Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669757950127391:2764]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:13.355796Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:13.355816Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:13.355844Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:13.355848Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:13.355856Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:13.355871Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669757950127324:2757], Partition 1, Sender [0:0:0], Recipient [7:7521669757950127394:2766], Cookie: 0 2025-06-30T09:20:13.355882Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669757950127394:2766]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:13.355884Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:13.355889Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:13.355896Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:13.355898Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:13.355902Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> TestYmqHttpProxy::TestCreateQueueWithAllAttributes >> TBackupCollectionTests::Create [GOOD] >> TBackupCollectionTests::CreateTwice >> KqpJoinOrder::SortingsWithLookupJoin1+RemoveLimitOperator [GOOD] >> TBackupCollectionTests::HiddenByFeatureFlag >> TestYmqHttpProxy::TestCreateQueueWithWrongBody [GOOD] >> TestKinesisHttpProxy::TestConsumersEmptyNames [GOOD] >> KqpJoinOrder::TPCDS90-ColumnStore [GOOD] >> TBackupCollectionTests::CreateTwice [GOOD] >> TBackupCollectionTests::BackupAbsentCollection >> TestYmqHttpProxy::TestCreateQueueWithWrongAttribute >> TBackupCollectionTests::HiddenByFeatureFlag [GOOD] >> TBackupCollectionTests::DisallowedPath >> TestKinesisHttpProxy::TestRequestWithWrongRegion [GOOD] >> TestKinesisHttpProxy::DoubleCreateStream [GOOD] >> TBackupCollectionTests::BackupAbsentCollection [GOOD] >> TBackupCollectionTests::BackupDroppedCollection >> TxUsage::Sinks_Oltp_WriteToTopics_4_Table [GOOD] >> TestKinesisHttpProxy::TestListStreamConsumers >> KqpIndexLookupJoin::JoinWithSubquery-StreamLookup [GOOD] >> TestKinesisHttpProxy::GoodRequestGetRecords >> DataShardReadIteratorBatchMode::SelectingColumns [GOOD] >> DataShardReadIteratorBatchMode::ShouldHandleReadAck >> OlapEstimationRowsCorrectness::TPCH9 >> TestYmqHttpProxy::TestReceiveMessage [GOOD] >> TestYmqHttpProxy::TestSendMessageWithAttributes [GOOD] >> TBackupCollectionTests::DisallowedPath [GOOD] >> TBackupCollectionTests::ParallelCreate >> TestKinesisHttpProxy::TestRequestWithIAM >> TxUsage::Sinks_Oltp_WriteToTopics_4_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin1+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 20466, MsgBus: 13863 2025-06-30T09:20:04.396273Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669728292705247:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:04.396301Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a3d/r3tmp/tmpAibMQo/pdisk_1.dat 2025-06-30T09:20:04.457532Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669728292705228:2080] 1751275204396120 != 1751275204396123 2025-06-30T09:20:04.461307Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20466, node 1 2025-06-30T09:20:04.468202Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:04.468219Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:04.468221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:04.468296Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13863 TClient is connected to server localhost:13863 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:04.534798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:04.534827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:04.535893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:04.536533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:04.538807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:20:04.836049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669728292705853:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.836051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669728292705865:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.836073Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.836823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:04.839161Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669728292705867:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:20:04.933068Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669728292705918:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:04.989979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.052067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.062139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.074713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.086760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.124328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.132376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.142586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.156046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.170538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.184459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.199928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.212664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.398880Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:05.399682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.410406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__ope ... ablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.279957Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.280113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.280133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.281066Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.281072Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.281200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.281203Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.282382Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.282382Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.282533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.282534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.283659Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.283659Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.283831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.283831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.285013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.285013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.285165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.285165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.286124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.286254Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.286294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.286530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.287398Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.287581Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.287597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.287736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.288495Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.288627Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.288661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.288765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.289535Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.289627Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.289707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.289779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.290589Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.290794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.291155Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.291434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.291499Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.291783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:13.292597Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.292768Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:13.382707Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:13.382839Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:13.383023Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7521669754062532935:4724];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-30T09:20:13.383096Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716;
:7:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504
:7:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504 >> TBackupCollectionTests::BackupDroppedCollection [GOOD] >> TBackupCollectionTests::BackupAbsentDirs >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Table [GOOD] >> KqpJoinOrder::UdfConstantFold-ColumnStore >> TestYmqHttpProxy::TestReceiveMessageWithAttributes >> TBackupCollectionTests::ParallelCreate [GOOD] >> TBackupCollectionTests::Drop ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS90-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 23085, MsgBus: 5040 2025-06-30T09:20:04.291516Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669728427914677:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:04.291591Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a3e/r3tmp/tmpGhowT5/pdisk_1.dat 2025-06-30T09:20:04.342310Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:04.342789Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669728427914503:2080] 1751275204287725 != 1751275204287728 TServer::EnableGrpc on GrpcPort 23085, node 1 2025-06-30T09:20:04.358150Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:04.358163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:04.358165Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:04.358207Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5040 2025-06-30T09:20:04.389726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:04.389760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:04.390916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5040 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:04.426794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:04.727318Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669728427915137:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.727429Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669728427915126:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.727449Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.728215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:04.730426Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669728427915140:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:20:04.788367Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669728427915191:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:04.849424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.911001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.920736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.932420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.947208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.981599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:04.997959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.010417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.024427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.037237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.053460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.065533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.084004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.212530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.221750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05. ... 714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.957067Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.957218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.957432Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.957554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.958137Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.958299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.958466Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.958580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.959207Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.959354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.959541Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.959666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.960277Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.960435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.960579Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.960689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.961330Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.961481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.961653Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.961766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.962401Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.962554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.962716Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.962864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.963460Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.963601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.963809Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.963922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.964435Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.964558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.964799Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.965038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.965335Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.965460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.965949Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.966037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.966328Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.966501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038575;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.966933Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.967061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.967403Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038575;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.967553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:11.968013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:11.968468Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:12.125517Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:12.125551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:12.125621Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7521669749902767579:3950];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-30T09:20:12.125699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::JoinWithSubquery-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 27377, MsgBus: 12619 2025-06-30T09:20:12.400822Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669762351772000:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:12.402456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a2b/r3tmp/tmp7e0hNY/pdisk_1.dat 2025-06-30T09:20:12.458439Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 27377, node 1 2025-06-30T09:20:12.460317Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:12.471109Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:12.471126Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:12.471128Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:12.471171Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12619 2025-06-30T09:20:12.502963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:12.502991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:12.507116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12619 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:12.561474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:12.564411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:20:12.575243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:12.657486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:12.686824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:12.713927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:12.901279Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669762351773563:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:12.901319Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:12.953230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:12.962276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:12.975774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:13.043501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:13.054403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:13.081976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:13.096714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:13.116606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669766646741514:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:13.116633Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:13.116788Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669766646741519:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:13.117839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:13.124420Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669766646741521:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:20:13.214819Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669766646741572:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:13.401429Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:13.447133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:13.459001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/r ... s response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:14.049420Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:14.049459Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:14.050017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:14.050470Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:20:14.080111Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:14.096709Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:14.129977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:14.162876Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:14.459362Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669771565003513:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:14.459393Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:14.466758Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:14.496067Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:14.525342Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:14.537736Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:14.554412Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:14.569018Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:14.629383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:14.644815Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669771565004176:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:14.644847Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:14.644962Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669771565004181:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:14.646055Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:14.655215Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669771565004183:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:20:14.740767Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669771565004234:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:14.918007Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:14.926926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:14.948233Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:14.991370Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:15.000493Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:15.014166Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:15.027776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TestYmqHttpProxy::TestSetQueueAttributes >> TBackupCollectionTests::BackupAbsentDirs [GOOD] >> TBackupCollectionTests::BackupNonIncrementalCollection >> TestKinesisHttpProxy::PutRecordsWithLongExplicitHashKey [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Query >> TBackupCollectionTests::Drop [GOOD] >> TBackupCollectionTests::DropTwice >> TestKinesisHttpProxy::PutRecordsWithIncorrectHashKey >> TBackupCollectionTests::BackupNonIncrementalCollection [GOOD] >> TBackupCollectionTests::DropTwice [GOOD] >> TBackupCollectionTests::TableWithSystemColumns >> TestYmqHttpProxy::TestGetQueueAttributes [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Query >> KqpJoinOrder::TPCDS16-ColumnStore ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection/unittest >> TBackupCollectionTests::BackupNonIncrementalCollection [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:14.514402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:14.514434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:14.514440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:14.514447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:14.514454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:14.514459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:14.514470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:14.514488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:14.514623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:14.514703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:14.531215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:14.531243Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:14.536264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:14.536365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:14.536407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:14.560527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:14.560656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:14.560807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:14.560926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:14.564296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:14.564385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:14.564773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:14.564791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:14.564821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:14.564831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:14.564839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:14.564868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:14.567236Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:14.593922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:14.594038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:14.594122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:14.594132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:14.594195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:14.594210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:14.595418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:14.595475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:14.595563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:14.595591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:14.595599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:14.595605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:14.596329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:14.596346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:14.596355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:14.596825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:14.596837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:14.596844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:14.596852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:14.597651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:14.598178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:14.598225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:14.598457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:14.598488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:14.598497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:14.598586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:14.598595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:14.598638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:14.598653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:14.599271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:14.599283Z node 1 :FLAT_TX_SCHEMESHARD ... thDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-30T09:20:16.624221Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:1 2025-06-30T09:20:16.624223Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 105:1 2025-06-30T09:20:16.624241Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-30T09:20:16.624245Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-30T09:20:16.624639Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:20:16.624661Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [7:304:2293] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 105 at schemeshard: 72057594046678944 2025-06-30T09:20:16.624697Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-30T09:20:16.624704Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [7:529:2488] 2025-06-30T09:20:16.624738Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [7:531:2490], Recipient [7:128:2152]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:16.624744Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:16.624749Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 TestModificationResults wait txId: 106 2025-06-30T09:20:16.624846Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [7:598:2555], Recipient [7:128:2152]: {TEvModifySchemeTransaction txid# 106 TabletId# 72057594046678944} 2025-06-30T09:20:16.624850Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:20:16.625631Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpBackupIncrementalBackupCollection BackupIncrementalBackupCollection { Name: ".backups/collections/MyCollection1" } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:16.625713Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-30T09:20:16.625746Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 4], parent name: MyCollection1, child name: 19700101000000Z_incremental, child id: [OwnerId: 72057594046678944, LocalPathId: 8], at schemeshard: 72057594046678944 2025-06-30T09:20:16.625758Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 0 2025-06-30T09:20:16.625765Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 106:0 type: TxMkDir target path: [OwnerId: 72057594046678944, LocalPathId: 8] source path: 2025-06-30T09:20:16.625778Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 106:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:16.625793Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 106:1, explain: Incremental backup is disabled on this collection, at schemeshard: 72057594046678944 2025-06-30T09:20:16.625799Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 106:2, propose status:StatusInvalidParameter, reason: Incremental backup is disabled on this collection, at schemeshard: 72057594046678944 2025-06-30T09:20:16.626317Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:148: Abort operation: IgniteOperation fail to propose a part, opId: 106:1, at schemeshard: 72057594046678944, already accepted parts: 1, propose result status: StatusInvalidParameter, with reason: Incremental backup is disabled on this collection, tx message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpBackupIncrementalBackupCollection BackupIncrementalBackupCollection { Name: ".backups/collections/MyCollection1" } } TxId: 106 TabletId: 72057594046678944 2025-06-30T09:20:16.626372Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:275: MkDir AbortPropose, opId: 106:0, at schemeshard: 72057594046678944 2025-06-30T09:20:16.626411Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:20:16.627035Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Incremental backup is disabled on this collection" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:16.627089Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Incremental backup is disabled on this collection, operation: BACKUP INCREMENTAL, path: /MyRoot/.backups/collections/MyCollection1 2025-06-30T09:20:16.627098Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-30T09:20:16.627194Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-30T09:20:16.627203Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-30T09:20:16.627282Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [7:604:2561], Recipient [7:128:2152]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:16.627303Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:16.627308Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:20:16.627324Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124996, Sender [7:304:2293], Recipient [7:128:2152]: NKikimrScheme.TEvNotifyTxCompletion TxId: 106 2025-06-30T09:20:16.627329Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4973: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-30T09:20:16.627341Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-30T09:20:16.627362Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-30T09:20:16.627367Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [7:602:2559] 2025-06-30T09:20:16.627393Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [7:604:2561], Recipient [7:128:2152]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:16.627399Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:16.627402Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 2025-06-30T09:20:16.627469Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [7:605:2562], Recipient [7:128:2152]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-30T09:20:16.627475Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:20:16.627489Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:20:16.627537Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1" took 43us result status StatusSuccess 2025-06-30T09:20:16.627664Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1" PathDescription { Self { Name: "MyCollection1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "19700101000000Z_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 105 CreateStep: 5000006 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "MyCollection1" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/Table1" } } Cluster { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery >> TBackupCollectionTests::TableWithSystemColumns [GOOD] >> DataShardReadTableSnapshots::ReadTableDropColumn >> TestYmqHttpProxy::TestDeleteQueue >> TestKinesisHttpProxy::CreateDeleteStream [GOOD] >> DataShardReadIteratorBatchMode::ShouldHandleReadAck [GOOD] |80.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumer ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection/unittest >> TBackupCollectionTests::TableWithSystemColumns [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:15.251395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:15.251418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:15.251422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:15.251427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:15.251432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:15.251435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:15.251442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:15.251455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:15.251545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:15.251601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:15.265234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:15.265264Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:15.270992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:15.271099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:15.271158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:15.276958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:15.277068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:15.277192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:15.277300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:15.278209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:15.278286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:15.278670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:15.278684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:15.278715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:15.278724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:15.278730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:15.278774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:15.280527Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:15.301600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:15.301689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:15.301763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:15.301772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:15.301822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:15.301837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:15.302755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:15.302797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:15.302872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:15.302897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:15.302923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:15.302929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:15.303461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:15.303472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:15.303481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:15.303851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:15.303860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:15.303865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:15.303872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:15.304515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:15.304914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:15.304958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:15.305182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:15.305209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:15.305217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:15.305296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:15.305304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:15.305341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:15.305353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:15.305936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:15.305951Z node 1 :FLAT_TX_SCHEMESHARD ... 0:17.177156Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:17.177163Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:20:17.177193Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269551620, Sender [6:601:2556], Recipient [6:127:2151]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 601 RawX2: 25769806332 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-06-30T09:20:17.177198Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-30T09:20:17.177207Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 601 RawX2: 25769806332 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-06-30T09:20:17.177211Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409548, partId: 1 2025-06-30T09:20:17.177226Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 106:1, at schemeshard: 72057594046678944, message: Source { RawX1: 601 RawX2: 25769806332 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-06-30T09:20:17.177233Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 106:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-30T09:20:17.177242Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 106:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 601 RawX2: 25769806332 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-06-30T09:20:17.177253Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 106:1, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:17.177258Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 106:1, at schemeshard: 72057594046678944 2025-06-30T09:20:17.177263Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 106:1, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-30T09:20:17.177270Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 106:1 129 -> 240 2025-06-30T09:20:17.177291Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:20:17.177319Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 274137603, Sender [6:212:2212], Recipient [6:127:2151]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 8] Version: 3 } 2025-06-30T09:20:17.177323Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5044: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-30T09:20:17.177329Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 3 PathOwnerId: 72057594046678944, cookie: 106 2025-06-30T09:20:17.177337Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 3 PathOwnerId: 72057594046678944, cookie: 106 2025-06-30T09:20:17.177343Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-30T09:20:17.177347Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 8], version: 3 2025-06-30T09:20:17.177351Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 4 2025-06-30T09:20:17.177380Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/2, is published: true 2025-06-30T09:20:17.177389Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:20:17.177960Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:20:17.177991Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:20:17.178007Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-30T09:20:17.178012Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:20:17.178434Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 106:1, at schemeshard: 72057594046678944 2025-06-30T09:20:17.178442Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:20:17.178462Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-30T09:20:17.178466Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:20:17.178479Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 106:1, at schemeshard: 72057594046678944 2025-06-30T09:20:17.178483Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:20:17.178488Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 106:1 2025-06-30T09:20:17.178505Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [6:601:2556] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 106 at schemeshard: 72057594046678944 2025-06-30T09:20:17.178558Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [6:127:2151], Recipient [6:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:20:17.178562Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:20:17.178567Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 106:1, at schemeshard: 72057594046678944 2025-06-30T09:20:17.178573Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 106:1 ProgressState 2025-06-30T09:20:17.178587Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:20:17.178591Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:1 progress is 2/2 2025-06-30T09:20:17.178596Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 106 ready parts: 2/2 2025-06-30T09:20:17.178602Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:1 progress is 2/2 2025-06-30T09:20:17.178605Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 106 ready parts: 2/2 2025-06-30T09:20:17.178610Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 106, ready parts: 2/2, is published: true 2025-06-30T09:20:17.178622Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [6:308:2297] message: TxId: 106 2025-06-30T09:20:17.178628Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 106 ready parts: 2/2 2025-06-30T09:20:17.178635Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:0 2025-06-30T09:20:17.178641Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 106:0 2025-06-30T09:20:17.178651Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-30T09:20:17.178655Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:1 2025-06-30T09:20:17.178657Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 106:1 2025-06-30T09:20:17.178668Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 3 2025-06-30T09:20:17.178716Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-30T09:20:17.178719Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:20:17.179072Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:20:17.179090Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [6:308:2297] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 106 at schemeshard: 72057594046678944 2025-06-30T09:20:17.179127Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-30T09:20:17.179135Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [6:630:2577] 2025-06-30T09:20:17.179175Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [6:632:2579], Recipient [6:127:2151]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:17.179181Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:17.179186Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 >> TestYmqHttpProxy::TestCreateQueueWithAllAttributes [GOOD] >> THeavyPerfTest::TTestLoadEverything [GOOD] >> THiveImplTest::BootQueueSpeed |80.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |80.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Table [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithWrongAttribute [GOOD] >> TestYmqHttpProxy::BillingRecordsForJsonApi ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIteratorBatchMode::ShouldHandleReadAck [GOOD] Test command err: 2025-06-30T09:19:34.333476Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:34.333585Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:34.333659Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:34.333711Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:19:34.333726Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:34.333756Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004303/r3tmp/tmprrE5a2/pdisk_1.dat 2025-06-30T09:19:34.445166Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:34.533468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:34.621996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:34.622029Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:34.623299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:34.623327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:34.635690Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:19:34.635872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:34.636044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:34.887190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:34.930578Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [2:1186:2342], Recipient [2:1211:2354]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:19:34.932760Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [2:1186:2342], Recipient [2:1211:2354]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:19:34.932878Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1211:2354] 2025-06-30T09:19:34.932928Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:19:34.942875Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [2:1186:2342], Recipient [2:1211:2354]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:19:34.944732Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:19:34.944771Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:19:34.944904Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:19:34.944912Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:19:34.944918Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:19:34.944972Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:19:34.944990Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:19:34.945000Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:1235:2354] in generation 1 2025-06-30T09:19:34.956772Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:19:34.962145Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:19:34.962243Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:19:34.962272Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:1238:2371] 2025-06-30T09:19:34.962279Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:19:34.962285Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:19:34.962292Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:34.962398Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:1211:2354], Recipient [2:1211:2354]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:34.962408Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:34.962527Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:19:34.962559Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:19:34.962584Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:19:34.962594Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:34.962603Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:19:34.962610Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:19:34.962616Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:19:34.962623Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:19:34.962631Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:19:35.004982Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1242:2372], Recipient [2:1211:2354]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:35.005011Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:35.005020Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1195:2739], serverId# [2:1242:2372], sessionId# [0:0:0] 2025-06-30T09:19:35.005107Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:764:2428], Recipient [2:1242:2372] 2025-06-30T09:19:35.005112Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:19:35.005148Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:19:35.005204Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:19:35.005216Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:19:35.005249Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:19:35.005256Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:19:35.005260Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:19:35.005265Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:19:35.005268Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:35.005315Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:19:35.005319Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:19:35.005322Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:19:35.005324Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:35.005334Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:19:35.005337Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:19:35.005340Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:19:35.005342Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:19:35.005346Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:19:35.005748Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, ... 72075186224037889 to execution unit CreateVolatileSnapshot 2025-06-30T09:20:17.420804Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3000:281474976715664] at 72075186224037889 on unit CreateVolatileSnapshot 2025-06-30T09:20:17.420822Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3000:281474976715664] at 72075186224037889 is ExecutedNoMoreRestarts 2025-06-30T09:20:17.420827Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3000:281474976715664] at 72075186224037889 executing on unit CreateVolatileSnapshot 2025-06-30T09:20:17.420831Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3000:281474976715664] at 72075186224037889 to execution unit DropVolatileSnapshot 2025-06-30T09:20:17.420835Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3000:281474976715664] at 72075186224037889 on unit DropVolatileSnapshot 2025-06-30T09:20:17.420840Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3000:281474976715664] at 72075186224037889 is Executed 2025-06-30T09:20:17.420844Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3000:281474976715664] at 72075186224037889 executing on unit DropVolatileSnapshot 2025-06-30T09:20:17.420849Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3000:281474976715664] at 72075186224037889 to execution unit CompleteOperation 2025-06-30T09:20:17.420854Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3000:281474976715664] at 72075186224037889 on unit CompleteOperation 2025-06-30T09:20:17.420883Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3000:281474976715664] at 72075186224037889 is DelayComplete 2025-06-30T09:20:17.420888Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3000:281474976715664] at 72075186224037889 executing on unit CompleteOperation 2025-06-30T09:20:17.420892Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3000:281474976715664] at 72075186224037889 to execution unit CompletedOperations 2025-06-30T09:20:17.420897Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3000:281474976715664] at 72075186224037889 on unit CompletedOperations 2025-06-30T09:20:17.420904Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3000:281474976715664] at 72075186224037889 is Executed 2025-06-30T09:20:17.420908Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3000:281474976715664] at 72075186224037889 executing on unit CompletedOperations 2025-06-30T09:20:17.420912Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3000:281474976715664] at 72075186224037889 has finished 2025-06-30T09:20:17.420917Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:20:17.420921Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-30T09:20:17.420925Z node 16 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-30T09:20:17.420929Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-30T09:20:17.441663Z node 16 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3000} 2025-06-30T09:20:17.441713Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:20:17.441725Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3000:281474976715664] at 72075186224037889 on unit CompleteOperation 2025-06-30T09:20:17.441750Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3000 : 281474976715664] from 72075186224037889 at tablet 72075186224037889 send result to client [16:998:2784], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:20:17.441763Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:20:17.441855Z node 16 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3000} 2025-06-30T09:20:17.441879Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:20:17.441885Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3000:281474976715664] at 72075186224037888 on unit CompleteOperation 2025-06-30T09:20:17.441894Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3000 : 281474976715664] from 72075186224037888 at tablet 72075186224037888 send result to client [16:998:2784], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:20:17.441900Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:20:17.442305Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [16:556:2482], Recipient [16:629:2533]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 281474976715664 } ResultFormat: FORMAT_ARROW MaxRows: 1 Hints: 1 RangesSize: 1 2025-06-30T09:20:17.442340Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-30T09:20:17.442357Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CheckRead 2025-06-30T09:20:17.442383Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-30T09:20:17.442390Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:20:17.442397Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:20:17.442406Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:20:17.442417Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037888 2025-06-30T09:20:17.442424Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-30T09:20:17.442428Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:20:17.442433Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:20:17.442438Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:20:17.442458Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 281474976715664 } ResultFormat: FORMAT_ARROW MaxRows: 1 Hints: 1 } 2025-06-30T09:20:17.442467Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3000/281474976715664 2025-06-30T09:20:17.442506Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-30T09:20:17.442512Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:20:17.442517Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:20:17.442522Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:20:17.442537Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-30T09:20:17.442541Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:20:17.442546Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037888 has finished 2025-06-30T09:20:17.442552Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-30T09:20:17.442572Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-30T09:20:17.442720Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553236, Sender [16:1014:2798], Recipient [16:629:2533]: NKikimr::TEvDataShard::TEvReadScanStarted 2025-06-30T09:20:17.442885Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553218, Sender [16:556:2482], Recipient [16:629:2533]: NKikimrTxDataShard.TEvReadAck ReadId: 1 SeqNo: 1 MaxRows: 2 MaxBytes: 10000 2025-06-30T09:20:17.442898Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1193: 72075186224037888 forwarding NKikimr::TEvDataShard::TEvReadAck to scan actor [16:1014:2798] 2025-06-30T09:20:17.442990Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553218, Sender [16:556:2482], Recipient [16:629:2533]: NKikimrTxDataShard.TEvReadAck ReadId: 1 SeqNo: 2 MaxRows: 100 MaxBytes: 10000 2025-06-30T09:20:17.442997Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1193: 72075186224037888 forwarding NKikimr::TEvDataShard::TEvReadAck to scan actor [16:1014:2798] 2025-06-30T09:20:17.443063Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553237, Sender [16:1014:2798], Recipient [16:629:2533]: NKikimr::TEvDataShard::TEvReadScanFinished 2025-06-30T09:20:17.443097Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [16:629:2533], Recipient [16:629:2533]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:20:17.443104Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:20:17.443115Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:20:17.443124Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:20:17.443130Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-30T09:20:17.443137Z node 16 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:20:17.443141Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:20:17.443148Z node 16 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:20:17.443158Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession_ParentCommittedToEnd [GOOD] >> CommitOffset::Commit_WithSession_ToPastParentPartition >> TestYmqHttpProxy::TestCreateQueueWithTags >> TestKinesisHttpProxy::TestListStreamConsumers [GOOD] |80.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_backup_collection/test-results/unittest/{meta.json ... results_accumulator.log} >> TestKinesisHttpProxy::TestRequestWithIAM [GOOD] |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH17 [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData [GOOD] >> ReadSessionImplTest::CreatePartitionStream [GOOD] >> ReadSessionImplTest::BrokenCompressedData [GOOD] >> ReadSessionImplTest::CommitOffsetTwiceIsError [GOOD] >> ReadSessionImplTest::CommonHandler [GOOD] >> TestKinesisHttpProxy::TestListStreamConsumersWithMaxResults >> TestKinesisHttpProxy::TestRequestNoAuthorization |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> TestYmqHttpProxy::TestReceiveMessageWithAttributes [GOOD] >> TestYmqHttpProxy::TestSetQueueAttributes [GOOD] |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::CommonHandler [GOOD] Test command err: 2025-06-30T09:19:46.399959Z :ReadSession INFO: Random seed for debugging is 1751275186399951 2025-06-30T09:19:46.497127Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669651002630893:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:46.497152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:19:46.500994Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669650537943921:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:46.501014Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00495a/r3tmp/tmpVu5hUG/pdisk_1.dat 2025-06-30T09:19:46.532929Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:19:46.537710Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:19:46.557265Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:46.565659Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 17082, node 1 2025-06-30T09:19:46.571768Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/00495a/r3tmp/yandexUjpm5j.tmp 2025-06-30T09:19:46.571784Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/00495a/r3tmp/yandexUjpm5j.tmp 2025-06-30T09:19:46.571918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/00495a/r3tmp/yandexUjpm5j.tmp 2025-06-30T09:19:46.571977Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:46.579018Z INFO: TTestServer started on Port 61139 GrpcPort 17082 TClient is connected to server localhost:61139 PQClient connected to localhost:17082 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:19:46.598037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:46.598068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-30T09:19:46.602649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:46.630240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:46.630286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:46.631198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:46.632514Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:19:46.632852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... waiting... waiting... waiting... 2025-06-30T09:19:46.902776Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669651002631781:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:46.902813Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669651002631789:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:46.902830Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:46.902776Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669650537944179:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:46.902783Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669650537944166:2268], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:46.902812Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:46.903820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:46.904178Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669651002631827:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:46.904195Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:46.905134Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669650537944196:2121] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:46.908745Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669651002631796:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:19:46.908901Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669650537944195:2272], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:19:46.941422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:46.992151Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669651002631949:2716] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:47.009833Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669654832911581:2164] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:47.009829Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669651002631968:2312], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:19:47.010018Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YjljMDIwMTgtYTNkODFiYjYtNjk2OTdjNDAtMmY3NDZhMw==, ActorId: [1:7521669651002631779:2295], ActorState: ExecuteState, TraceId: 01jz025apm7bj6hbk5w2qsxytz, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:19:47.010629Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:1 ... message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:20:18.275934Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 6 Topic 'rt3.dc1--test-topic' partition 0 user user offset 2 count 1 size 211 endOffset 3 max time lag 0ms effective offset 2 2025-06-30T09:20:18.275967Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 6 added 1 blobs, size 176 count 1 last offset 2, current partition end offset: 3 2025-06-30T09:20:18.275973Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 6. Send blob request. 2025-06-30T09:20:18.276002Z node 8 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 2 partno 0 count 1 parts_count 0 source 1 size 176 accessed 1 times before, last time 2025-06-30T09:20:17.000000Z 2025-06-30T09:20:18.276014Z node 8 :PERSQUEUE DEBUG: read.h:121: Reading cookie 6. All 1 blobs are from cache. 2025-06-30T09:20:18.276032Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-30T09:20:18.276050Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 2 totakecount 1 count 1 size 156 from pos 0 cbcount 1 2025-06-30T09:20:18.276100Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-30T09:20:18.276493Z node 8 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' 2025-06-30T09:20:18.279004Z :DEBUG: [/Root] [/Root] [cae94cb2-c34cb0a0-dc058828-113f628d] [dc1] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.279718Z :DEBUG: [/Root] Decompression task done. Partition/PartitionSessionId: 0 (2-2) 2025-06-30T09:20:18.282184Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 0} (2-2) 2025-06-30T09:20:18.282211Z :DEBUG: [/Root] [/Root] [cae94cb2-c34cb0a0-dc058828-113f628d] [dc1] The application data is transferred to the client. Number of messages 1, size 8 bytes DataReceived { PartitionStreamId: 1 PartitionId: 0 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "dc1". Topic: "test-topic" Partition: 0 PartitionKey: "" Information: { Offset: 2 SeqNo: 3 MessageGroupId: "test-message-group-id" CreateTime: 2025-06-30T09:20:17.165000Z WriteTime: 2025-06-30T09:20:17.166000Z Ip: "ipv6:[::1]:37538" UncompressedSize: 8 Meta: { "logtype": "unknown", "ident": "unknown", "server": "ipv6:[::1]:37538" } } } } 2025-06-30T09:20:18.282271Z :INFO: [/Root] [/Root] [cae94cb2-c34cb0a0-dc058828-113f628d] Closing read session. Close timeout: 3.000000s 2025-06-30T09:20:18.282281Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-06-30T09:20:18.282289Z :INFO: [/Root] [/Root] [cae94cb2-c34cb0a0-dc058828-113f628d] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1250 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:18.282509Z :INFO: [/Root] [/Root] [cae94cb2-c34cb0a0-dc058828-113f628d] Closing read session. Close timeout: 0.000000s 2025-06-30T09:20:18.282513Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-06-30T09:20:18.282517Z :INFO: [/Root] [/Root] [cae94cb2-c34cb0a0-dc058828-113f628d] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1250 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:18.282535Z :NOTICE: [/Root] [/Root] [cae94cb2-c34cb0a0-dc058828-113f628d] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:20:18.287280Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_7_1_11009835433550785182_v1 grpc read done: success# 1, data# { read { } } 2025-06-30T09:20:18.287306Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_7_1_11009835433550785182_v1 grpc closed 2025-06-30T09:20:18.287325Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_7_1_11009835433550785182_v1 is DEAD 2025-06-30T09:20:18.288283Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_7_1_11009835433550785182_v1 2025-06-30T09:20:18.288321Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [7:7521669786874187931:2474] destroyed 2025-06-30T09:20:18.288343Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_7_1_11009835433550785182_v1 2025-06-30T09:20:18.288633Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [7:7521669786874187928:2471] disconnected; active server actors: 1 2025-06-30T09:20:18.288645Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [7:7521669786874187928:2471] client user disconnected session shared/user_7_1_11009835433550785182_v1 2025-06-30T09:20:18.850414Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.850424Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.850430Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:20:18.850527Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:20:18.850667Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:20:18.850756Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.850850Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: 13. Commit offset: 31 2025-06-30T09:20:18.851162Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.851167Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.851171Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:20:18.851311Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:20:18.851483Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:20:18.851535Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.851575Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:20:18.851802Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-30T09:20:18.883723Z :INFO: Error decompressing data: (TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check) 2025-06-30T09:20:18.883763Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-3) 2025-06-30T09:20:18.886800Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:20:18.886819Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-30T09:20:18.886826Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-30T09:20:18.886844Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 3, size 16 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { DataDecompressionError: "(TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check)" Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-30T09:20:18.903419Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.903429Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.903433Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:20:18.903573Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:20:18.903780Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:20:18.903859Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.903917Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-30T09:20:18.904088Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.904168Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:20:18.904207Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:20:18.904216Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-30T09:20:18.904227Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 2). Partition stream id: 1 2025-06-30T09:20:18.904603Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.904607Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.904610Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:20:18.904717Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-30T09:20:18.904857Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-30T09:20:18.904941Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.905111Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:20:18.905146Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-30T09:20:18.905165Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-30T09:20:18.905184Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes >> TestKinesisHttpProxy::GoodRequestGetRecords [GOOD] >> TestKinesisHttpProxy::PutRecordsWithIncorrectHashKey [GOOD] >> DataShardReadTableSnapshots::ReadTableDropColumn [GOOD] >> DataShardReadTableSnapshots::CorruptedDyNumber >> TestYmqHttpProxy::TestReceiveMessageWithAttemptId >> TestYmqHttpProxy::TestTagQueue >> TestKinesisHttpProxy::ListShards >> TestKinesisHttpProxy::GoodRequestGetRecordsCbor >> BasicUsage::RecreateObserver [GOOD] |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BindQueue::Basic >> TAsyncIndexTests::SplitIndexWithReboots[TabletReboots] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH17 [GOOD] Test command err: Trying to start YDB, gRPC: 28738, MsgBus: 7788 2025-06-30T09:20:03.640399Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669727145422831:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:03.640447Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a54/r3tmp/tmp2t98pH/pdisk_1.dat 2025-06-30T09:20:03.718570Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28738, node 1 2025-06-30T09:20:03.728695Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:03.728712Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:03.728714Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:03.728764Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7788 TClient is connected to server localhost:7788 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:20:03.789319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:03.789351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:03.790466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:03.792317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:04.113607Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669731440390724:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.113621Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669731440390729:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.113640Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:04.114779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:04.117093Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669731440390738:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:20:04.200261Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669731440390789:2328] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:04.253331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:20:04.285591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521669731440391027:2309];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:20:04.285590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669731440391058:2318];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:20:04.285640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669731440391058:2318];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:20:04.285640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521669731440391027:2309];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:20:04.285715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669731440391058:2318];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:20:04.285736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521669731440391027:2309];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:20:04.285740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669731440391058:2318];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:20:04.285775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669731440391058:2318];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:20:04.285786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521669731440391027:2309];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:20:04.285814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669731440391058:2318];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:20:04.285815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521669731440391027:2309];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:20:04.285844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669731440391058:2318];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:20:04.285864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521669731440391027:2309];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:20:04.285887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669731440391058:2318];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:20:04.285899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521669731440391027:2309];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:20:04.285924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669731440391058:2318];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:20:04.285932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521669731440391027:2309];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:20:04.285954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669731440391058:2318];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:20:04.285954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521669731440391027:2309];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:20:04.285978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521669731440391027:2309];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:20:04.285984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669731440391058:2318];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:20:04.286010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669731440391058:2318];tablet_id=72075186224037888;process=TTxIn ... TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.720950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.721645Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.721837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.721906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.722051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.722826Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.722950Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.722976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.723099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.723737Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.723792Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.723848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.723919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.724557Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.724674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.724774Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.724919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.725230Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.725317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.725509Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.725620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.725883Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.725981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.726426Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.726530Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.726560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.726650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.727240Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.727354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.727388Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.727512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.727926Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.728022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.728439Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.728584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.728720Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.728850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.729423Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.729497Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.729580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.729765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.730093Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.730200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:14.730384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.730673Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:14.802029Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:14.802035Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:14.802169Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheBeginning [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheEnd ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::RecreateObserver [GOOD] Test command err: 2025-06-30T09:18:36.977267Z :RetryDiscoveryWithCancel INFO: Random seed for debugging is 1751275116977259 2025-06-30T09:18:37.126201Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669358091475682:2084];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:37.126227Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:37.132249Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669357214917258:2095];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:37.139851Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d3e/r3tmp/tmpiocA9n/pdisk_1.dat 2025-06-30T09:18:37.224167Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:37.240599Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:37.289726Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22102, node 1 2025-06-30T09:18:37.306617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002d3e/r3tmp/yandexnTeUEN.tmp 2025-06-30T09:18:37.306636Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002d3e/r3tmp/yandexnTeUEN.tmp 2025-06-30T09:18:37.306752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002d3e/r3tmp/yandexnTeUEN.tmp 2025-06-30T09:18:37.306817Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:37.314394Z INFO: TTestServer started on Port 29800 GrpcPort 22102 2025-06-30T09:18:37.321679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:37.321731Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:37.323885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:37.323928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:37.328028Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:37.328085Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:37.328521Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29800 PQClient connected to localhost:22102 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:18:37.355893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:37.364484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:37.375387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-30T09:18:37.622005Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669357214917498:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.622031Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669357214917487:2268], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.622056Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.623775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:37.633358Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669357214917501:2272], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-30T09:18:37.742867Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669357214917529:2128] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:37.746013Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669358091476659:2302], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:37.746227Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=MjBkMzUwMjEtZjIzNzZkZC02NDkxOTEwNS1iNWQwZGM0OQ==, ActorId: [1:7521669358091476633:2295], ActorState: ExecuteState, TraceId: 01jz02373q48hbjwk56mjm6r6z, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:37.746800Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:37.747589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.748990Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669357214917544:2276], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:37.749281Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=ODRlNWM2NjUtNzE3ZGYwNDYtYjJjNThhYzAtNTA0ZTlmNzk=, ActorId: [2:7521669357214917485:2267], ActorState: ExecuteState, TraceId: 01jz02371n0fp58y38bmch0rsc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:37.749430Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:37.834147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.862854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:22102", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, false, 1000); 2025-06-30T09:18:37.908909Z node ... 8Z node 3 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer shared/user session shared/user_3_2_1150484498984098484_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 0 SizeLag: 0 WriteTimestampEstimateMS: 1751275219102 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-06-30T09:20:19.141332Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 2 consumer shared/user session shared/user_3_2_1150484498984098484_v1 INIT DONE TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 0 readOffset 0 committedOffset 0 2025-06-30T09:20:19.141368Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 2 consumer shared/user session shared/user_3_2_1150484498984098484_v1 sending to client partition status >>> Got event: StartPartitionSession { Partition session id: 1 Topic: "test-topic" Partition: 0 Database name: dc1 Database path: /Root Database id: account-dc1 CommittedOffset: 0 EndOffset: 0 } 2025-06-30T09:20:19.141948Z :INFO: [/Root] [/Root] [494e317f-7507695a-80a21eec-74b13a57] Closing read session. Close timeout: 0.000000s 2025-06-30T09:20:19.141964Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:0:0 2025-06-30T09:20:19.141977Z :INFO: [/Root] [/Root] [494e317f-7507695a-80a21eec-74b13a57] Counters: { Errors: 0 CurrentSessionLifetimeMs: 11 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:19.141995Z :NOTICE: [/Root] [/Root] [494e317f-7507695a-80a21eec-74b13a57] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:20:19.142005Z :DEBUG: [/Root] [/Root] [494e317f-7507695a-80a21eec-74b13a57] [] Abort session to cluster 2025-06-30T09:20:19.142235Z :INFO: [/Root] [/Root] [1699d006-de444182-ebf25d8c-6fce6d0e] Closing read session. Close timeout: 0.000000s 2025-06-30T09:20:19.142252Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:20:19.142259Z :INFO: [/Root] [/Root] [1699d006-de444182-ebf25d8c-6fce6d0e] Counters: { Errors: 0 CurrentSessionLifetimeMs: 10 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:19.142272Z :NOTICE: [/Root] [/Root] [1699d006-de444182-ebf25d8c-6fce6d0e] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:20:19.142279Z :DEBUG: [/Root] [/Root] [1699d006-de444182-ebf25d8c-6fce6d0e] [] Abort session to cluster 2025-06-30T09:20:19.142321Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_3_2_1150484498984098484_v1 grpc read done: success# 0, data# { } 2025-06-30T09:20:19.142337Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_3_2_1150484498984098484_v1 grpc read failed 2025-06-30T09:20:19.142343Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_3_2_1150484498984098484_v1 grpc closed 2025-06-30T09:20:19.142361Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/user session shared/user_3_2_1150484498984098484_v1 is DEAD 2025-06-30T09:20:19.142411Z :INFO: [/Root] [/Root] [3c8d156-11c97b4b-3faaad6-e4a1f3ee] Closing read session. Close timeout: 0.000000s 2025-06-30T09:20:19.142421Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:20:19.142426Z :INFO: [/Root] [/Root] [3c8d156-11c97b4b-3faaad6-e4a1f3ee] Counters: { Errors: 0 CurrentSessionLifetimeMs: 10 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:19.142433Z :NOTICE: [/Root] [/Root] [3c8d156-11c97b4b-3faaad6-e4a1f3ee] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:20:19.142437Z :DEBUG: [/Root] [/Root] [3c8d156-11c97b4b-3faaad6-e4a1f3ee] [] Abort session to cluster 2025-06-30T09:20:19.142462Z :INFO: [/Root] [/Root] [3c8d156-11c97b4b-3faaad6-e4a1f3ee] Closing read session. Close timeout: 0.000000s 2025-06-30T09:20:19.142466Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:20:19.142468Z :INFO: [/Root] [/Root] [3c8d156-11c97b4b-3faaad6-e4a1f3ee] Counters: { Errors: 0 CurrentSessionLifetimeMs: 10 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:19.142473Z :NOTICE: [/Root] [/Root] [3c8d156-11c97b4b-3faaad6-e4a1f3ee] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:20:19.142498Z :INFO: [/Root] [/Root] [1699d006-de444182-ebf25d8c-6fce6d0e] Closing read session. Close timeout: 0.000000s 2025-06-30T09:20:19.142501Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-30T09:20:19.142504Z :INFO: [/Root] [/Root] [1699d006-de444182-ebf25d8c-6fce6d0e] Counters: { Errors: 0 CurrentSessionLifetimeMs: 10 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:19.142509Z :NOTICE: [/Root] [/Root] [1699d006-de444182-ebf25d8c-6fce6d0e] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:20:19.142580Z :INFO: [/Root] [/Root] [494e317f-7507695a-80a21eec-74b13a57] Closing read session. Close timeout: 0.000000s 2025-06-30T09:20:19.142605Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:0:0 2025-06-30T09:20:19.142610Z :INFO: [/Root] [/Root] [494e317f-7507695a-80a21eec-74b13a57] Counters: { Errors: 0 CurrentSessionLifetimeMs: 12 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:19.142616Z :NOTICE: [/Root] [/Root] [494e317f-7507695a-80a21eec-74b13a57] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:20:19.142581Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer shared/user session shared/user_3_3_5540015765924766368_v1 grpc read done: success# 0, data# { } 2025-06-30T09:20:19.142605Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_3_2_1150484498984098484_v1 2025-06-30T09:20:19.142632Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7521669794922704055:2495] destroyed 2025-06-30T09:20:19.142595Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer shared/user session shared/user_3_3_5540015765924766368_v1 grpc read failed 2025-06-30T09:20:19.142656Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_2_1150484498984098484_v1 2025-06-30T09:20:19.142600Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer shared/user session shared/user_3_3_5540015765924766368_v1 grpc closed 2025-06-30T09:20:19.142606Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer shared/user session shared/user_3_3_5540015765924766368_v1 is DEAD 2025-06-30T09:20:19.142775Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_17867289953694373321_v1 grpc read done: success# 0, data# { } 2025-06-30T09:20:19.142784Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_3_1_17867289953694373321_v1 grpc read failed 2025-06-30T09:20:19.142789Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 1 consumer shared/user session shared/user_3_1_17867289953694373321_v1 closed 2025-06-30T09:20:19.142796Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669794922704050:2486] disconnected; active server actors: 1 2025-06-30T09:20:19.142800Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669794922704050:2486] client user disconnected session shared/user_3_2_1150484498984098484_v1 2025-06-30T09:20:19.142816Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-06-30T09:20:19.142825Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669794922704047:2487] disconnected; active server actors: 1 2025-06-30T09:20:19.142827Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669794922704047:2487] client user disconnected session shared/user_3_3_5540015765924766368_v1 2025-06-30T09:20:19.142837Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037893][rt3.dc1--test-topic] consumer user balancing. Sessions=1, Families=1, UnradableFamilies=1 [1 (0), ], RequireBalancing=0 [] 2025-06-30T09:20:19.142838Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_3_1_17867289953694373321_v1 is DEAD 2025-06-30T09:20:19.142850Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1302: [72075186224037893][rt3.dc1--test-topic] consumer user balancing family=1 (Status=Free, Partitions=[0]) for ReadingSession "shared/user_3_1_17867289953694373321_v1" (Sender=[3:7521669794922704037:2485], Pipe=[3:7521669794922704046:2485], Partitions=[], ActiveFamilyCount=0) 2025-06-30T09:20:19.142861Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:545: [72075186224037893][rt3.dc1--test-topic] consumer user family 1 status Active partitions [0] session "shared/user_3_1_17867289953694373321_v1" sender [3:7521669794922704037:2485] lock partition 0 for ReadingSession "shared/user_3_1_17867289953694373321_v1" (Sender=[3:7521669794922704037:2485], Pipe=[3:7521669794922704046:2485], Partitions=[], ActiveFamilyCount=1) generation 1 step 3 2025-06-30T09:20:19.142869Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037893][rt3.dc1--test-topic] consumer user start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-30T09:20:19.142893Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037893][rt3.dc1--test-topic] consumer user balancing duration: 0.000052s 2025-06-30T09:20:19.142920Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669794922704046:2485] disconnected; active server actors: 1 2025-06-30T09:20:19.142931Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7521669794922704046:2485] client user disconnected session shared/user_3_1_17867289953694373321_v1 >> TopicAutoscaling::ReadFromTimestamp_PQv1 [GOOD] |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/dread_cache_service/ut/unittest |80.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |80.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/dread_cache_service/ut/unittest >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumer [GOOD] >> DataShardReadTableSnapshots::CorruptedDyNumber [GOOD] >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumerWithFlag |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest |80.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/test-results/unittest/{meta.json ... results_accumulator.log} |80.5%| [LD] {RESULT} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |80.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |80.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |80.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::CorruptedDyNumber [GOOD] Test command err: 2025-06-30T09:20:17.993713Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:17.993820Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:17.993840Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003801/r3tmp/tmpG3hcMu/pdisk_1.dat 2025-06-30T09:20:18.111636Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:20:18.112702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:18.135491Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:18.136809Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275217476293 != 1751275217476297 2025-06-30T09:20:18.181150Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:62:2109] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:20:18.181481Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:20:18.181633Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:18.181658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:18.192886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:18.269942Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:62:2109] Handle TEvProposeTransaction 2025-06-30T09:20:18.269975Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:62:2109] TxId# 281474976715657 ProcessProposeTransaction 2025-06-30T09:20:18.270018Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:62:2109] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:605:2513] 2025-06-30T09:20:18.298474Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:605:2513] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-30T09:20:18.298520Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:605:2513] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:20:18.298870Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:605:2513] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:20:18.298889Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:605:2513] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:20:18.298971Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:605:2513] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:20:18.299026Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:605:2513] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:20:18.299043Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:605:2513] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-30T09:20:18.299124Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:605:2513] txid# 281474976715657 HANDLE EvClientConnected 2025-06-30T09:20:18.299541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:18.299876Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:605:2513] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-30T09:20:18.299891Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:605:2513] txid# 281474976715657 SEND to# [1:557:2483] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-30T09:20:18.315699Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:18.315998Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:18.316097Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:20:18.316180Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:20:18.328109Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:18.328362Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:20:18.328393Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:20:18.328607Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:20:18.328618Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:20:18.328628Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:20:18.328705Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:20:18.328727Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:20:18.328739Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:20:18.339095Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:20:18.342825Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:20:18.342934Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:20:18.342965Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:20:18.342972Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:20:18.342977Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:20:18.342984Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:20:18.343067Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:20:18.343077Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:20:18.343194Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:20:18.343221Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:20:18.343235Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:20:18.343242Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:20:18.343250Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:20:18.343255Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:20:18.343261Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:20:18.343266Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:20:18.343272Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:20:18.343293Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:18.343299Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:18.343307Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:20:18.343418Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:20:18.343424Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:20:18.343447Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:20:18.343516Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:20:18.343529Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:20:18.343549Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:20:18.343559Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09: ... ng event TEvTxProcessing::TEvStreamClearancePending 2025-06-30T09:20:20.443893Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:704:2585], Recipient [2:629:2533]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715659 Cleared: true 2025-06-30T09:20:20.443899Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-30T09:20:20.443921Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:629:2533], Recipient [2:629:2533]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:20:20.443930Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:20:20.443940Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:20:20.443948Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:20:20.443956Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715659] at 72075186224037888 for WaitForStreamClearance 2025-06-30T09:20:20.443962Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit WaitForStreamClearance 2025-06-30T09:20:20.443969Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [0:281474976715659] at 72075186224037888 2025-06-30T09:20:20.443975Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Executed 2025-06-30T09:20:20.443980Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit WaitForStreamClearance 2025-06-30T09:20:20.443985Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715659] at 72075186224037888 to execution unit ReadTableScan 2025-06-30T09:20:20.443989Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit ReadTableScan 2025-06-30T09:20:20.444036Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Continue 2025-06-30T09:20:20.444042Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:20:20.444047Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-30T09:20:20.444052Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:20:20.444057Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:20:20.444067Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:20:20.444198Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:734:2602], Recipient [2:629:2533]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-30T09:20:20.444205Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-30T09:20:20.444223Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:734:2602], Recipient [2:704:2585]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715659 ShardId: 72075186224037888 2025-06-30T09:20:20.444230Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:704:2585] TxId# 281474976715658] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-30T09:20:20.444296Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:703:2585], Recipient [2:704:2585]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715658 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-30T09:20:20.444305Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:704:2585] TxId# 281474976715658] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-30T09:20:20.444310Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:704:2585] TxId# 281474976715658] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-30T09:20:20.444323Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715659, MessageQuota: 1 2025-06-30T09:20:20.444355Z node 2 :TX_DATASHARD ERROR: read_table_scan.cpp:681: Got scan fatal error: Invalid DyNumber binary representation 2025-06-30T09:20:20.444363Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715659, MessageQuota: 1 2025-06-30T09:20:20.444395Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-30T09:20:20.444401Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715659, at: 72075186224037888 2025-06-30T09:20:20.444420Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287429, Sender [2:734:2602], Recipient [2:704:2585]: NKikimrTx.TEvStreamQuotaRelease TxId: 281474976715659 ShardId: 72075186224037888 2025-06-30T09:20:20.444426Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:704:2585] TxId# 281474976715658] Received TEvStreamQuotaRelease from ShardId# 72075186224037888 2025-06-30T09:20:20.444431Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:704:2585] TxId# 281474976715658] Released quota 1 reserved messages from ShardId# 72075186224037888 2025-06-30T09:20:20.444466Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:629:2533], Recipient [2:629:2533]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:20:20.444471Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:20:20.444481Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:20:20.444487Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-30T09:20:20.444493Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715659] at 72075186224037888 for ReadTableScan 2025-06-30T09:20:20.444498Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit ReadTableScan 2025-06-30T09:20:20.444505Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715659] at 72075186224037888 error: Invalid DyNumber binary representation, IsFatalError: 1 2025-06-30T09:20:20.444519Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Executed 2025-06-30T09:20:20.444524Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit ReadTableScan 2025-06-30T09:20:20.444529Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715659] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:20:20.444534Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit FinishPropose 2025-06-30T09:20:20.444544Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is DelayComplete 2025-06-30T09:20:20.444549Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:20:20.444554Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715659] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:20:20.444558Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:20:20.444575Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Executed 2025-06-30T09:20:20.444579Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:20:20.444584Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715659] at 72075186224037888 has finished 2025-06-30T09:20:20.444588Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:20:20.444592Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-30T09:20:20.444596Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:20:20.444600Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:20:20.444611Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:20:20.444616Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715659] at 72075186224037888 on unit FinishPropose 2025-06-30T09:20:20.444624Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715659 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: EXEC_ERROR 2025-06-30T09:20:20.444633Z node 2 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715659 at tablet 72075186224037888 status: EXEC_ERROR errors: PROGRAM_ERROR (Invalid DyNumber binary representation) | 2025-06-30T09:20:20.444651Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:20:20.444727Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:629:2533], Recipient [2:704:2585]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037888 Status: EXEC_ERROR Error { Kind: PROGRAM_ERROR Reason: "Invalid DyNumber binary representation" } TxId: 281474976715659 Step: 0 OrderId: 281474976715659 ExecLatency: 0 ProposeLatency: 0 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037888 CpuTimeUsec: 103 } } CommitVersion { Step: 0 TxId: 281474976715659 } 2025-06-30T09:20:20.444735Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1921: [ReadTable [2:704:2585] TxId# 281474976715658] Received TEvProposeTransactionResult Status# EXEC_ERROR ShardId# 72075186224037888 2025-06-30T09:20:20.444750Z node 2 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [2:704:2585] TxId# 281474976715658] RESPONSE Status# ExecError shard: 72075186224037888 table: /Root/Table 2025-06-30T09:20:20.444831Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:704:2585], Recipient [2:629:2533]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1500 TxId: 281474976715658 >> THiveImplTest::BootQueueSpeed [GOOD] >> THiveImplTest::BalancerSpeedAndDistribution >> BindQueue::Basic [GOOD] >> TBlobStorageWardenTest::ObtainPDiskKeySamePin [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithTags [GOOD] |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates >> GroupReconfiguration::BsControllerConfigurationRequestIsFastEnough [GOOD] >> GroupReconfiguration::ReassignsDoNotCauseErrorMessagesMirror3dc >> TestKinesisHttpProxy::TestRequestNoAuthorization [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::ReadFromTimestamp_PQv1 [GOOD] Test command err: 2025-06-30T09:18:52.891346Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669419896621225:2158];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004039/r3tmp/tmpwGg5NH/pdisk_1.dat 2025-06-30T09:18:52.931046Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:52.933464Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:52.973541Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30590, node 1 2025-06-30T09:18:52.980781Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004039/r3tmp/yandexwXdfnF.tmp 2025-06-30T09:18:52.980797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004039/r3tmp/yandexwXdfnF.tmp 2025-06-30T09:18:52.980863Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004039/r3tmp/yandexwXdfnF.tmp 2025-06-30T09:18:52.980927Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:52.987145Z INFO: TTestServer started on Port 11107 GrpcPort 30590 TClient is connected to server localhost:11107 PQClient connected to localhost:30590 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:53.031500Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:53.031538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:53.032485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:53.034277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... waiting... waiting... 2025-06-30T09:18:53.056014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:18:53.203285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:53.568172Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669424191589137:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.568247Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.568795Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669424191589150:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.569908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:53.573161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-30T09:18:53.573216Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669424191589152:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:18:53.621326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.651787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.659223Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669424191589342:2509] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:53.675470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.676481Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669424191589352:2321], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:53.676569Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NjYwZTgwMDYtNWZmNDkwMDEtNzg0ODRkY2ItYTJhZjVkNTk=, ActorId: [1:7521669424191589135:2296], ActorState: ExecuteState, TraceId: 01jz023pkxe3rw9rvrfvkw7gpe, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:53.677136Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669424191589508:2610] 2025-06-30T09:18:53.891396Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:57.889542Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669419896621225:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:57.889576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:18:58.919689Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-30T09:18:58.924290Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:18:58.924851Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521669445666426178:2682], Recipient [1:7521669419896621387:2143]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:58.924864Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:58.924867Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:18:58.924876Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521669445666426174:2679], Recipient [1:7521669419896621387:2143]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-30T09:18:58.924878Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:18:58.935378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { Po ... AD_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_7_1_2188824851398010067_v1 grpc read failed 2025-06-30T09:20:20.083179Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_7_1_2188824851398010067_v1 grpc closed 2025-06-30T09:20:20.083190Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_7_1_2188824851398010067_v1 is DEAD 2025-06-30T09:20:20.083428Z node 7 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:278: StateIdle, received event# 65543, Sender [7:7521669793892679391:2797], Recipient [7:7521669793892679393:2797]: NActors::TEvents::TEvPoison 2025-06-30T09:20:20.083442Z node 7 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=2) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:20:20.083496Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 269877764, Sender [7:7521669793892679428:3351], Recipient [7:7521669789597711832:2742]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:20.083500Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5326: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:20.083504Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2906: [PQ: 72075186224037896] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:20.083509Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][test-topic] pipe [7:7521669798187646801:2828] disconnected; active server actors: 1 2025-06-30T09:20:20.083512Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [7:7521669793892679427:2797] destroyed 2025-06-30T09:20:20.083512Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][test-topic] pipe [7:7521669798187646801:2828] client test-consumer disconnected session test-consumer_7_1_2188824851398010067_v1 2025-06-30T09:20:20.083519Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 269877764, Sender [7:7521669798187646812:3381], Recipient [7:7521669789597711832:2742]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:20.083521Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5326: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:20.083523Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2906: [PQ: 72075186224037896] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:20.083527Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037896] Destroy direct read session test-consumer_7_1_2188824851398010067_v1 2025-06-30T09:20:20.083531Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [7:7521669798187646810:2832] destroyed 2025-06-30T09:20:20.083537Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 269877764, Sender [7:7521669798187646813:3382], Recipient [7:7521669789597711833:2743]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:20.083539Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5326: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:20.083541Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2906: [PQ: 72075186224037897] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:20.083544Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037897] Destroy direct read session test-consumer_7_1_2188824851398010067_v1 2025-06-30T09:20:20.083547Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037897] server disconnected, pipe [7:7521669798187646811:2833] destroyed 2025-06-30T09:20:20.083554Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 269877764, Sender [7:7521669798187646805:3379], Recipient [7:7521669759532939550:2416]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:20.083558Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5326: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:20.083560Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2906: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:20.083563Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session test-consumer_7_1_2188824851398010067_v1 2025-06-30T09:20:20.083566Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [7:7521669798187646804:2831] destroyed 2025-06-30T09:20:20.083571Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188506 (NKikimr::TEvPQ::TEvPipeDisconnected), Tablet [7:7521669789597711832:2742], Partition 2, Sender [7:7521669789597711832:2742], Recipient [7:7521669789597711911:2749], Cookie: 0 2025-06-30T09:20:20.083579Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188506, Sender [7:7521669789597711832:2742], Recipient [7:7521669789597711911:2749]: NKikimr::TEvPQ::TEvPipeDisconnected 2025-06-30T09:20:20.083580Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_7_1_2188824851398010067_v1 2025-06-30T09:20:20.083583Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_7_1_2188824851398010067_v1 2025-06-30T09:20:20.083583Z node 7 :PERSQUEUE TRACE: partition.h:612: StateIdle, processing event TEvPQ::TEvPipeDisconnected 2025-06-30T09:20:20.083585Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_7_1_2188824851398010067_v1 2025-06-30T09:20:20.083591Z node 7 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:20:20.083596Z node 7 :PERSQUEUE TRACE: partition_write.cpp:910: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessChangeOwnerRequests. 2025-06-30T09:20:20.083604Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:20.083618Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:20.083621Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:20.083626Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:20.099827Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669759532939550:2416], Partition 0, Sender [0:0:0], Recipient [7:7521669759532939602:2419], Cookie: 0 2025-06-30T09:20:20.099852Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669759532939602:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:20.099858Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:20.099877Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:20.099907Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:20.099910Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:20.099918Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:20.169909Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669789597711833:2743], Partition 1, Sender [0:0:0], Recipient [7:7521669789597711914:2751], Cookie: 0 2025-06-30T09:20:20.169939Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669789597711914:2751]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:20.169945Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:20.169965Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:20.169998Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:20.170010Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:20.170017Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:20.170035Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669789597711832:2742], Partition 2, Sender [0:0:0], Recipient [7:7521669789597711911:2749], Cookie: 0 2025-06-30T09:20:20.170045Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669789597711911:2749]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:20.170047Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:20.170053Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:20.170066Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:20.170071Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:20.170075Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:20.200508Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669759532939550:2416], Partition 0, Sender [0:0:0], Recipient [7:7521669759532939602:2419], Cookie: 0 2025-06-30T09:20:20.200540Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669759532939602:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:20.200547Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:20.200566Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:20.200598Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:20.200602Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:20.200609Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest >> TestKinesisHttpProxy::TestListStreamConsumersWithMaxResults [GOOD] >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates [GOOD] >> TCertificateCheckerTest::CheckSubjectDns >> TestYmqHttpProxy::TestDeleteMessage >> TestKinesisHttpProxy::TestUnauthorizedPutRecords ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::ObtainPDiskKeySamePin [GOOD] Test command err: Disable nodeId# 90 Enable nodeId# 90 Pick Pick Delete nodeId# 94 Pick Pick Add nodeId# 101 Disable nodeId# 19 Enable nodeId# 19 Pick Disable nodeId# 53 Pick Pick Delete nodeId# 72 Disable nodeId# 31 Delete nodeId# 55 Enable nodeId# 53 Disable nodeId# 69 Disable nodeId# 83 Disable nodeId# 32 Disable nodeId# 66 Disable nodeId# 13 Delete nodeId# 34 Delete nodeId# 19 Add nodeId# 102 Delete nodeId# 7 Add nodeId# 103 Enable nodeId# 13 Pick Disable nodeId# 9 Add nodeId# 104 Disable nodeId# 6 Enable nodeId# 66 Add nodeId# 105 Delete nodeId# 1 Enable nodeId# 9 Delete nodeId# 25 Enable nodeId# 6 Pick Pick Disable nodeId# 20 Enable nodeId# 83 Delete nodeId# 13 Delete nodeId# 28 Add nodeId# 106 Add nodeId# 107 Pick Pick Delete nodeId# 2 Delete nodeId# 107 Add nodeId# 108 Pick Delete nodeId# 40 Delete nodeId# 23 Add nodeId# 109 Pick Delete nodeId# 39 Pick Delete nodeId# 27 Add nodeId# 110 Delete nodeId# 35 Pick Enable nodeId# 20 Add nodeId# 111 Add nodeId# 112 Delete nodeId# 51 Delete nodeId# 109 Disable nodeId# 93 Disable nodeId# 67 Add nodeId# 113 Pick Pick Disable nodeId# 86 Delete nodeId# 106 Add nodeId# 114 Delete nodeId# 113 Add nodeId# 115 Delete nodeId# 6 Disable nodeId# 74 Pick Pick Add nodeId# 116 Pick Add nodeId# 117 Delete nodeId# 116 Disable nodeId# 76 Pick Delete nodeId# 42 Disable nodeId# 5 Delete nodeId# 17 Disable nodeId# 15 Add nodeId# 118 Pick Delete nodeId# 46 Pick Pick Disable nodeId# 103 Add nodeId# 119 Enable nodeId# 15 Pick Add nodeId# 120 Disable nodeId# 111 Disable nodeId# 21 Add nodeId# 121 Delete nodeId# 104 Disable nodeId# 60 Pick Enable nodeId# 76 Add nodeId# 122 Disable nodeId# 112 Enable nodeId# 21 Pick Disable nodeId# 71 Enable nodeId# 5 Disable nodeId# 87 Disable nodeId# 41 Disable nodeId# 64 Pick Delete nodeId# 58 Add nodeId# 123 Pick Delete nodeId# 117 Enable nodeId# 111 Add nodeId# 124 Enable nodeId# 67 Disable nodeId# 90 Delete nodeId# 76 Delete nodeId# 12 Delete nodeId# 118 Enable nodeId# 74 Disable nodeId# 62 Pick Enable nodeId# 93 Delete nodeId# 73 Disable nodeId# 10 Disable nodeId# 63 Disable nodeId# 20 Add nodeId# 125 Enable nodeId# 20 Disable nodeId# 99 Disable nodeId# 9 Delete nodeId# 45 Delete nodeId# 32 Pick Delete nodeId# 65 Enable nodeId# 9 Disable nodeId# 101 Enable nodeId# 41 Delete nodeId# 59 Enable nodeId# 86 Delete nodeId# 84 Delete nodeId# 77 Enable nodeId# 103 Disable nodeId# 29 Enable nodeId# 90 Add nodeId# 126 Enable nodeId# 87 Delete nodeId# 98 Pick Disable nodeId# 108 Add nodeId# 127 Add nodeId# 128 Add nodeId# 129 Add nodeId# 130 Disable nodeId# 105 Delete nodeId# 68 Pick Add nodeId# 131 Add nodeId# 132 Add nodeId# 133 Disable nodeId# 130 Enable nodeId# 105 Enable nodeId# 10 Add nodeId# 134 Pick Delete nodeId# 61 Disable nodeId# 92 Pick Enable nodeId# 31 Pick Enable nodeId# 101 Pick Disable nodeId# 91 Enable nodeId# 63 Delete nodeId# 85 Pick Disable nodeId# 81 Disable nodeId# 66 Add nodeId# 135 Pick Delete nodeId# 69 Pick Enable nodeId# 62 Enable nodeId# 99 Delete nodeId# 99 Enable nodeId# 130 Delete nodeId# 133 Delete nodeId# 16 Enable nodeId# 60 Pick Add nodeId# 136 Delete nodeId# 3 Disable nodeId# 10 Pick Disable nodeId# 126 Pick Disable nodeId# 31 Enable nodeId# 112 Disable nodeId# 20 Disable nodeId# 56 Disable nodeId# 83 Delete nodeId# 126 Add nodeId# 137 Delete nodeId# 64 Add nodeId# 138 Pick Pick Enable nodeId# 83 Pick Enable nodeId# 108 Add nodeId# 139 Delete nodeId# 49 Disable nodeId# 78 Delete nodeId# 30 Delete nodeId# 41 Disable nodeId# 95 Enable nodeId# 81 Add nodeId# 140 Enable nodeId# 56 Enable nodeId# 20 Delete nodeId# 135 Pick Disable nodeId# 110 Enable nodeId# 71 Add nodeId# 141 Delete nodeId# 138 Delete nodeId# 103 Enable nodeId# 92 Disable nodeId# 36 Pick Pick Enable nodeId# 110 Add nodeId# 142 Delete nodeId# 131 Enable nodeId# 95 Enable nodeId# 36 Delete nodeId# 142 Add nodeId# 143 Enable nodeId# 29 Pick Add nodeId# 144 Delete nodeId# 33 Pick Disable nodeId# 101 Delete nodeId# 124 Disable nodeId# 48 Delete nodeId# 29 Add nodeId# 145 Pick Delete nodeId# 21 Enable nodeId# 10 Disable nodeId# 105 Enable nodeId# 105 Delete nodeId# 100 Enable nodeId# 78 Delete nodeId# 134 Delete nodeId# 22 Delete nodeId# 108 Pick Delete nodeId# 95 Delete nodeId# 110 Pick Add nodeId# 146 Delete nodeId# 48 Disable nodeId# 141 Disable nodeId# 47 Enable nodeId# 47 Pick Add nodeId# 147 Disable nodeId# 52 Disable nodeId# 127 Disable nodeId# 82 Delete nodeId# 132 Pick Add nodeId# 148 Add nodeId# 149 Delete nodeId# 67 Disable nodeId# 54 Add nodeId# 150 Enable nodeId# 82 Delete nodeId# 31 Pick Delete nodeId# 66 Enable nodeId# 127 Delete nodeId# 8 Add nodeId# 151 Disable nodeId# 50 Disable nodeId# 140 Pick Disable nodeId# 24 Delete nodeId# 112 Add nodeId# 152 Delete nodeId# 130 Delete nodeId# 145 Enable nodeId# 50 Disable nodeId# 88 Pick Enable nodeId# 24 Disable nodeId# 20 Disable nodeId# 136 Pick Enable nodeId# 91 Disable nodeId# 14 Add nodeId# 153 Delete nodeId# 137 Enable nodeId# 14 Pick Pick Enable nodeId# 20 Disable nodeId# 4 Disable nodeId# 128 Disable nodeId# 119 Add nodeId# 154 Disable nodeId# 93 Enable nodeId# 88 Disable nodeId# 18 Disable nodeId# 79 Enable nodeId# 54 Add nodeId# 155 Delete nodeId# 20 Disable nodeId# 26 Delete nodeId# 74 Add nodeId# 156 Delete nodeId# 122 Enable nodeId# 18 Pick Pick Add nodeId# 157 Delete nodeId# 105 Pick Disable nodeId# 146 Add nodeId# 158 Add nodeId# 159 Delete nodeId# 157 Delete nodeId# 50 Pick Add nodeId# 160 Delete nodeId# 159 Pick Pick Enable nodeId# 141 Pick Pick Add nodeId# 161 Pick Add nodeId# 162 Disable nodeId# 71 Enable nodeId# 128 Add nodeId# 163 Disable nodeId# 139 Pick Add nodeId# 164 Pick Disable nodeId# 97 Add nodeId# 165 Enable nodeId# 136 Disable nodeId# 149 Pick Pick Add nodeId# 166 Disable nodeId# 148 Enable nodeId# 149 Disable nodeId# 162 Pick Add nodeId# 167 Delete nodeId# 90 Enable nodeId# 162 Enable nodeId# 101 Delete nodeId# 101 Disable nodeId# 92 Disable nodeId# 47 Enable nodeId# 97 Disable nodeId# 15 Add nodeId# 168 Pick Disable nodeId# 120 Delete nodeId# 87 Delete nodeId# 18 Delete nodeId# 148 Enable nodeId# 26 Delete nodeId# 155 Add nodeId# 169 Add nodeId# 170 Delete nodeId# 78 Delete nodeId# 57 Enable nodeId# 47 Delete nodeId# 161 Add nodeId# 171 Pick Pick Pick Pick Disable nodeId# 10 Pick Disable nodeId# 165 Disable nodeId# 154 Delete nodeId# 143 Delete nodeId# 125 Disable nodeId# 152 Enable nodeId# 93 Add nodeId# 172 Delete nodeId# 140 Pick Add nodeId# 173 Add nodeId# 174 Enable nodeId# 52 Add nodeId# 175 Enable nodeId# 10 Delete nodeId# 60 Pick Disable nodeId# 52 Enable nodeId# 52 Enable nodeId# 165 Delete nodeId# 139 Add nodeId# 176 Delete nodeId# 75 Disable nodeId# 167 Delete nodeId# 82 Enable nodeId# 79 Enable nodeId# 154 Delete nodeId# 175 Delete nodeId# 38 Enable nodeId# 92 Delete nodeId# 156 Enable nodeId# 167 Enable nodeId# 146 Add nodeId# 177 Disable nodeId# 111 Enable nodeId# 152 Disable nodeId# 47 Delete nodeId# 96 Disable nodeId# 9 Add nodeId# 178 Delete nodeId# 37 Disable nodeId# 128 Delete nodeId# 102 Delete nodeId# 11 Add nodeId# 179 Enable nodeId# 4 Pick Add nodeId# 180 Disable nodeId# 54 Add nodeId# 181 Disable nodeId# 123 Pick Enable nodeId# 111 Enable nodeId# 9 Enable nodeId# 120 Delete nodeId# 70 Enable nodeId# 15 Add nodeId# 182 Disable nodeId# 52 Pick Add nodeId# 183 Disable nodeId# 89 Delete nodeId# 14 Add nodeId# 184 Add nodeId# 185 Disable nodeId# 164 Pick Add nodeId# 186 Pick Delete nodeId# 146 Enable nodeId# 54 Delete nodeId# 71 Enable nodeId# 123 Add nodeId# 187 Delete nodeId# 147 Disable nodeId# 165 Disable nodeId# 170 Add nodeId# 188 Delete nodeId# 54 Pick Disable nodeId# 153 Pick Add nodeId# 189 Delete nodeId# 170 Delete nodeId# 186 Disable nodeId# 9 Pick Disable nodeId# 179 Add nodeId# 190 Add nodeId# 191 Enable nodeId# 9 Pick Pick Pick Enable nodeId# 47 Add nodeId# 192 Enable nodeId# 165 Delete nodeId# 92 Add nodeId# 193 Delete nodeId# 114 Disable nodeId# 177 Enable nodeId# 179 Delete nodeId# 167 Disable nodeId# 4 Disable nodeId# 86 Add nodeId# 194 Add nodeId# 195 Pick Enable nodeId# 153 Add nodeId# 196 Pick Enable nodeId# 52 Add nodeId# 197 Delete nodeId# 44 Enable nodeId# 4 Pick Disable nodeId# 169 Pick Add nodeId# 198 Delete nodeId# 80 Pick Disable nodeId# 168 Enable nodeId# 164 Enable nodeId# 168 Delete nodeId# 136 Delete nodeId# 163 Delete nodeId# 120 Add nodeId# 199 Add nodeId# 200 Disable nodeId# 172 Enable nodeId# 89 Disable nodeId# 62 Pick Delete nodeId# 191 Disable nodeId# 47 Pick Pick Pick Pick Pick Delete nodeId# 79 Enable nodeId# 172 Disable nodeId# 181 Enable nodeId# 47 Delete nodeId# 93 Enable nodeId# 169 Delete nodeId# 43 Delete nodeId# 183 Add nodeId# 201 Delete nodeId# 129 Pick Pick Disable nodeId# 165 Enable nodeId# 128 Add nodeId# 202 Pick Disable nodeId# 150 Disable nodeId# 192 Disable nodeId# 190 Add nodeId# 203 Delete nodeId# 53 Delete nodeId# 5 Pick Add nodeId# 204 Add nodeId# 205 Enable nodeId# 190 Add nodeId# 206 Delete nodeId# 184 Add nodeId# 207 Enable nodeId# 165 Delete nodeId# 181 Delete nodeId# 144 Delete nodeId# 207 Delete nodeId# 193 Add nodeId# 208 Disable nodeId# 24 Add nodeId# 209 Disable nodeId# 164 Delete nodeId# 171 Delete nodeId# 187 Delete nodeId# 111 Pick Delete nodeId# 47 Disable nodeId# 182 Disable nodeId# 198 Delete nodeId# 172 Delete nodeId# 206 Enable nodeId# 24 Add nodeId# 210 Enable nodeId# 192 Enable nodeId# 119 Pick Enable nodeId# 182 Enable nodeId# 198 Delete nodeId# 91 Delete nodeId# 201 Add nodeId# 211 Pick Disable nodeId# 199 Enable nodeId# 150 Delete nodeId# 168 Enable nodeId# 62 Delete nodeId# 198 Disable nodeId# 56 Disable nodeId# 151 Delete nodeId# 185 Add nodeId# 212 Disable nodeId# 203 Disable nodeId# 165 Disable nodeId# 15 Enable nodeId# 199 Enable nodeId# 203 Delete nodeId# 123 Disable nodeId# 158 Enable nodeId# 165 Add nodeId# 213 Pick Disable nodeId# 83 Delete nodeId# 127 Enable nodeId# 177 Pick Pick Disable nodeId# 200 Disable nodeId# 190 Enable nodeId# 56 Delete nodeId# 208 Disable nodeId# 81 Disable nodeId# 162 Disable nodeId# 176 Add nodeId# 214 Delete nodeId# 214 Disable nodeId# 197 Add nodeId# 215 Enable nodeId# 81 Disable nodeId# 24 Enable nodeId# 176 Enable nodeId# 24 Delete nodeId# 83 Add nodeId# 216 Disable nodeId# 152 Add nodeId# 217 Delete nodeId# 196 Disable nodeId# 205 Pick Enable nodeId# 197 Enable nodeId# 190 Disable nodeId# 88 Disable nodeId# 217 Disable nodeId# 210 Pick Add nodeId# 218 Delete nodeId# 200 Disable nodeId# 150 Add nodeId# 219 Pick Delete nodeId# 204 Enable nodeId# 151 Disable nodeId# 52 Delete nodeId# 202 Pick Delete nodeId# 115 Pick Enable nodeId# 164 Disable nodeId# 81 Delete nodeId# 26 Disable nodeId# 151 Delete nodeId# 81 Add nodeId# 220 Enable nodeId# 150 Enable nodeId# 52 Add nodeId# 221 Enable nodeId# 217 Delete nodeId# 162 Delete nodeId# 166 Disable nodeId# 173 Delete nodeId# 216 Delete nodeId# 150 Enable nodeId# 15 Enable nodeId# 210 Add nodeId# 222 Enable nodeId# 158 Delete nodeId# 4 Disable nodeId# 180 Pick Pick Pick Enable nodeId# 152 Add nodeId# 223 Delete nodeId# 151 Enable nodeId# 205 Disable nodeId# 160 Delete nodeId# 173 Enable nodeId# 160 Add nodeId# 224 Delete nodeId# 212 Enable nodeId# 86 Pick Delete nodeId# 213 Enable nodeId# 180 Add nodeId# 225 Delete nodeId# 194 Pick Delete nodeId# 56 Pick Enable nodeId# 88 Delete nodeId# 52 Pick Delete nodeId# 62 Delete nodeId# 190 Disable nodeId# 209 Add nodeId# 226 Pick Pick Pick Disable nodeId# 160 Pick Add nodeId# 227 Enable nodeId# 160 Pick Enable nodeId# 209 Delete nodeId# 215 Delete nodeId# 188 Delete nodeId# 88 Pick Disable nodeId# 197 Delete nodeId# 178 Pick Enable nodeId# 197 Pick Disable nodeId# 153 Pick Add nodeId# 228 Delete nodeId# 228 Pick Disable nodeId# 119 Enable nodeId# 119 Enable nodeId# 153 Disable nodeId# 224 Disable nodeId# 197 Delete nodeId# 174 Delete nodeId# 164 Add nodeId# 229 Add nodeId# 230 Disable nodeId# 9 Pick Enable nodeId# 224 Add nodeId# 231 Disable nodeId# 158 Delete nodeId# 229 Add nodeId# 232 Enable nodeId# 197 Pick Enable nodeId# 158 Pick Pick Enable nodeId# 9 Pick Delete nodeId# 195 Disable nodeId# 121 Disable nodeId# 211 Add nodeId# 23 ... ble nodeId# 20127 Add nodeId# 20204 Delete nodeId# 20180 Add nodeId# 20205 Add nodeId# 20206 Pick Enable nodeId# 20102 Enable nodeId# 20162 Enable nodeId# 20148 Pick Disable nodeId# 20065 Delete nodeId# 20117 Disable nodeId# 20082 Enable nodeId# 20065 Pick Add nodeId# 20207 Enable nodeId# 20082 Add nodeId# 20208 Delete nodeId# 20135 Pick Delete nodeId# 20202 Delete nodeId# 20102 Add nodeId# 20209 Pick Delete nodeId# 20045 Add nodeId# 20210 Add nodeId# 20211 Delete nodeId# 20115 Pick Pick Delete nodeId# 20208 Pick Add nodeId# 20212 Add nodeId# 20213 Disable nodeId# 20210 Add nodeId# 20214 Delete nodeId# 20197 Add nodeId# 20215 Add nodeId# 20216 Disable nodeId# 20127 Disable nodeId# 19924 Pick Disable nodeId# 20138 Disable nodeId# 20214 Pick Pick Disable nodeId# 20160 Delete nodeId# 20176 Delete nodeId# 20150 Disable nodeId# 20188 Enable nodeId# 20188 Pick Disable nodeId# 19997 Enable nodeId# 19997 Enable nodeId# 20160 Enable nodeId# 20127 Enable nodeId# 19924 Delete nodeId# 20201 Add nodeId# 20217 Enable nodeId# 20210 Pick Disable nodeId# 20216 Enable nodeId# 20138 Delete nodeId# 20194 Disable nodeId# 20190 Add nodeId# 20218 Disable nodeId# 20035 Disable nodeId# 20168 Delete nodeId# 20184 Disable nodeId# 19968 Delete nodeId# 20149 Disable nodeId# 20193 Disable nodeId# 20110 Enable nodeId# 20193 Add nodeId# 20219 Enable nodeId# 20190 Pick Disable nodeId# 20034 Delete nodeId# 20181 Disable nodeId# 20138 Add nodeId# 20220 Enable nodeId# 20034 Delete nodeId# 20035 Add nodeId# 20221 Add nodeId# 20222 Enable nodeId# 20216 Pick Pick Pick Enable nodeId# 20138 Add nodeId# 20223 Add nodeId# 20224 Enable nodeId# 19968 Add nodeId# 20225 Delete nodeId# 20216 Pick Disable nodeId# 20221 Pick Delete nodeId# 20054 Pick Pick Add nodeId# 20226 Enable nodeId# 20214 Pick Add nodeId# 20227 Disable nodeId# 20139 Add nodeId# 20228 Enable nodeId# 20110 Pick Disable nodeId# 20199 Disable nodeId# 20224 Delete nodeId# 20200 Delete nodeId# 20192 Delete nodeId# 20226 Disable nodeId# 20085 Pick Pick Enable nodeId# 20085 Delete nodeId# 20195 Enable nodeId# 20139 Disable nodeId# 20214 Disable nodeId# 20083 Enable nodeId# 20224 Enable nodeId# 20221 Add nodeId# 20229 Enable nodeId# 20168 Disable nodeId# 20082 Disable nodeId# 20205 Disable nodeId# 20211 Delete nodeId# 20210 Delete nodeId# 20209 Add nodeId# 20230 Add nodeId# 20231 Enable nodeId# 20211 Enable nodeId# 20205 Enable nodeId# 20214 Disable nodeId# 20153 Disable nodeId# 20159 Enable nodeId# 20083 Add nodeId# 20232 Pick Enable nodeId# 20199 Delete nodeId# 20204 Disable nodeId# 20205 Enable nodeId# 20153 Pick Add nodeId# 20233 Enable nodeId# 20082 Enable nodeId# 20159 Disable nodeId# 20133 Add nodeId# 20234 Enable nodeId# 20133 Pick Enable nodeId# 20205 Pick Pick Delete nodeId# 20220 Add nodeId# 20235 Delete nodeId# 20191 Add nodeId# 20236 Disable nodeId# 20139 Delete nodeId# 20099 Pick Disable nodeId# 20055 Delete nodeId# 20163 Delete nodeId# 20188 Pick Disable nodeId# 20132 Disable nodeId# 20215 Enable nodeId# 20215 Enable nodeId# 20139 Disable nodeId# 20147 Add nodeId# 20237 Delete nodeId# 20189 Enable nodeId# 20132 Pick Delete nodeId# 20173 Enable nodeId# 20147 Enable nodeId# 20055 Disable nodeId# 20139 Delete nodeId# 20206 Add nodeId# 20238 Pick Enable nodeId# 20139 Pick Add nodeId# 20239 Disable nodeId# 20153 Add nodeId# 20240 Delete nodeId# 20167 Enable nodeId# 20153 Delete nodeId# 20141 Pick Disable nodeId# 20144 Delete nodeId# 20164 Pick Add nodeId# 20241 Pick Enable nodeId# 20144 Pick Add nodeId# 20242 Disable nodeId# 20058 Disable nodeId# 20231 Pick Pick Pick Delete nodeId# 20230 Enable nodeId# 20058 Add nodeId# 20243 Pick Add nodeId# 20244 Enable nodeId# 20231 Disable nodeId# 20234 Pick Delete nodeId# 20152 Pick Add nodeId# 20245 Add nodeId# 20246 Enable nodeId# 20234 Add nodeId# 20247 Add nodeId# 20248 Pick Delete nodeId# 20228 Pick Delete nodeId# 20196 Disable nodeId# 20086 Pick Disable nodeId# 20179 Add nodeId# 20249 Pick Pick Pick Pick Add nodeId# 20250 Enable nodeId# 20179 Disable nodeId# 20190 Enable nodeId# 20086 Pick Pick Delete nodeId# 20222 Add nodeId# 20251 Disable nodeId# 20215 Pick Disable nodeId# 19924 Disable nodeId# 19997 Enable nodeId# 20215 Enable nodeId# 19924 Delete nodeId# 20239 Disable nodeId# 20243 Enable nodeId# 20190 Enable nodeId# 19997 Disable nodeId# 20247 Disable nodeId# 20083 Pick Disable nodeId# 20160 Delete nodeId# 20250 Disable nodeId# 20034 Disable nodeId# 20178 Delete nodeId# 20144 Enable nodeId# 20247 Add nodeId# 20252 Enable nodeId# 20243 Pick Pick Delete nodeId# 20218 Delete nodeId# 20127 Disable nodeId# 20132 Enable nodeId# 20160 Pick Pick Disable nodeId# 20245 Pick Delete nodeId# 20241 Delete nodeId# 20083 Delete nodeId# 20212 Enable nodeId# 20245 Delete nodeId# 20182 Enable nodeId# 20132 Pick Enable nodeId# 20178 Delete nodeId# 20252 Pick Enable nodeId# 20034 Delete nodeId# 20243 Disable nodeId# 20205 Enable nodeId# 20205 Delete nodeId# 20158 Disable nodeId# 20217 Add nodeId# 20253 Enable nodeId# 20217 Add nodeId# 20254 Pick Delete nodeId# 20248 Pick Pick Pick Pick Disable nodeId# 20148 Add nodeId# 20255 Disable nodeId# 20235 Enable nodeId# 20148 Delete nodeId# 20082 Disable nodeId# 20162 Disable nodeId# 20190 Delete nodeId# 20159 Enable nodeId# 20190 Enable nodeId# 20162 Add nodeId# 20256 Delete nodeId# 20147 Delete nodeId# 20256 Add nodeId# 20257 Enable nodeId# 20235 Add nodeId# 20258 Pick Add nodeId# 20259 Disable nodeId# 20236 Add nodeId# 20260 Add nodeId# 20261 Add nodeId# 20262 Delete nodeId# 20257 Delete nodeId# 20207 Disable nodeId# 20171 Enable nodeId# 20236 Delete nodeId# 20168 Pick Disable nodeId# 20179 Pick Add nodeId# 20263 Disable nodeId# 20139 Enable nodeId# 20179 Pick Disable nodeId# 20245 Add nodeId# 20264 Disable nodeId# 20086 Enable nodeId# 20245 Enable nodeId# 20086 Delete nodeId# 20229 Pick Delete nodeId# 20085 Pick Add nodeId# 20265 Delete nodeId# 20162 Disable nodeId# 20255 Add nodeId# 20266 Add nodeId# 20267 Pick Disable nodeId# 20178 Pick Disable nodeId# 19968 Disable nodeId# 20199 Add nodeId# 20268 Delete nodeId# 20114 Enable nodeId# 19968 Add nodeId# 20269 Pick Delete nodeId# 20190 Pick Delete nodeId# 20213 Add nodeId# 20270 Enable nodeId# 20178 Delete nodeId# 20203 Disable nodeId# 19924 Pick Enable nodeId# 20199 Add nodeId# 20271 Disable nodeId# 20138 Enable nodeId# 19924 Pick Enable nodeId# 20138 Delete nodeId# 20259 Pick Pick Enable nodeId# 20139 Add nodeId# 20272 Enable nodeId# 20171 Pick Delete nodeId# 20264 Add nodeId# 20273 Disable nodeId# 20245 Delete nodeId# 20233 Delete nodeId# 20121 Pick Delete nodeId# 20219 Disable nodeId# 20260 Delete nodeId# 20255 Enable nodeId# 20245 Add nodeId# 20274 Disable nodeId# 20193 Enable nodeId# 20260 Pick Add nodeId# 20275 Pick Delete nodeId# 20245 Enable nodeId# 20193 Delete nodeId# 20260 Add nodeId# 20276 Delete nodeId# 20104 Disable nodeId# 20034 Add nodeId# 20277 Disable nodeId# 19997 Add nodeId# 20278 Disable nodeId# 20205 Enable nodeId# 20205 Disable nodeId# 20231 Enable nodeId# 19997 Pick Disable nodeId# 20058 Disable nodeId# 20145 Disable nodeId# 20065 Pick Add nodeId# 20279 Disable nodeId# 20223 Enable nodeId# 20231 Delete nodeId# 20258 Disable nodeId# 20225 Enable nodeId# 20223 Add nodeId# 20280 Delete nodeId# 20275 Delete nodeId# 20266 Add nodeId# 20281 Pick Enable nodeId# 20034 Delete nodeId# 19968 Disable nodeId# 20223 Enable nodeId# 20225 Pick Delete nodeId# 20179 Add nodeId# 20282 Add nodeId# 20283 Pick Disable nodeId# 20278 Disable nodeId# 19924 Disable nodeId# 20211 Pick Pick Pick Disable nodeId# 20273 Add nodeId# 20284 Pick Pick Disable nodeId# 20237 Disable nodeId# 20116 Disable nodeId# 20236 Add nodeId# 20285 Enable nodeId# 19924 Add nodeId# 20286 Pick Enable nodeId# 20278 Enable nodeId# 20065 Pick Disable nodeId# 20249 Pick Pick Enable nodeId# 20116 Pick Enable nodeId# 20145 Disable nodeId# 20278 Delete nodeId# 20240 Add nodeId# 20287 Enable nodeId# 20249 Pick Enable nodeId# 20273 Add nodeId# 20288 Pick Disable nodeId# 20262 Disable nodeId# 20214 Delete nodeId# 20285 Disable nodeId# 20251 Pick Enable nodeId# 20251 Enable nodeId# 20262 Disable nodeId# 20232 Enable nodeId# 20232 Disable nodeId# 19997 Disable nodeId# 20261 Delete nodeId# 20274 Enable nodeId# 20278 Enable nodeId# 20214 Delete nodeId# 20227 Enable nodeId# 20211 Disable nodeId# 20281 Add nodeId# 20289 Disable nodeId# 20276 Delete nodeId# 20278 Delete nodeId# 20193 Enable nodeId# 19997 Enable nodeId# 20237 Disable nodeId# 20224 Add nodeId# 20290 Add nodeId# 20291 Enable nodeId# 20224 Enable nodeId# 20058 Pick Pick Pick Pick Disable nodeId# 20058 Enable nodeId# 20281 Add nodeId# 20292 Pick Pick Disable nodeId# 20254 Pick Delete nodeId# 20286 Delete nodeId# 20269 Delete nodeId# 20138 Disable nodeId# 20132 Enable nodeId# 20223 Pick Enable nodeId# 20254 Disable nodeId# 20244 Disable nodeId# 20263 Enable nodeId# 20276 Disable nodeId# 20215 Disable nodeId# 20292 Delete nodeId# 20055 Enable nodeId# 20263 Enable nodeId# 20292 Disable nodeId# 20238 Disable nodeId# 20145 Enable nodeId# 20261 Disable nodeId# 20034 Disable nodeId# 20280 Pick Disable nodeId# 20148 Delete nodeId# 20205 Add nodeId# 20293 Pick Delete nodeId# 20232 Enable nodeId# 20034 Add nodeId# 20294 Disable nodeId# 20223 Disable nodeId# 20289 Delete nodeId# 20214 Pick Delete nodeId# 20116 Enable nodeId# 20215 Enable nodeId# 20236 Pick Pick Pick Add nodeId# 20295 Add nodeId# 20296 Enable nodeId# 20148 Pick Delete nodeId# 20283 Delete nodeId# 20292 Add nodeId# 20297 Disable nodeId# 19924 Disable nodeId# 20295 Delete nodeId# 20244 Pick Delete nodeId# 20178 Enable nodeId# 20058 Enable nodeId# 20223 Enable nodeId# 20145 Disable nodeId# 20160 Delete nodeId# 20284 Delete nodeId# 20132 Add nodeId# 20298 Enable nodeId# 20160 Enable nodeId# 20289 Disable nodeId# 20290 Enable nodeId# 20280 Pick Delete nodeId# 20263 Pick Delete nodeId# 20094 Disable nodeId# 20145 Disable nodeId# 20297 Delete nodeId# 20268 Add nodeId# 20299 Delete nodeId# 20225 Delete nodeId# 20294 Enable nodeId# 20290 Disable nodeId# 20282 Delete nodeId# 20273 Pick Delete nodeId# 20291 Add nodeId# 20300 Delete nodeId# 20215 Add nodeId# 20301 Disable nodeId# 20234 Delete nodeId# 20110 Disable nodeId# 20157 Pick Add nodeId# 20302 Delete nodeId# 20088 Pick Delete nodeId# 20246 Add nodeId# 20303 Add nodeId# 20304 Disable nodeId# 20235 Enable nodeId# 20235 Enable nodeId# 19924 Disable nodeId# 20034 Add nodeId# 20305 Disable nodeId# 20287 Enable nodeId# 20295 Enable nodeId# 20234 Disable nodeId# 20234 Delete nodeId# 20199 Add nodeId# 20306 Disable nodeId# 20279 Pick Disable nodeId# 20223 Pick Disable nodeId# 20302 Pick Enable nodeId# 20034 Delete nodeId# 20280 Pick Delete nodeId# 20247 Pick Pick Enable nodeId# 20297 Enable nodeId# 20145 Disable nodeId# 20303 Pick Disable nodeId# 20299 Enable nodeId# 20238 Delete nodeId# 20223 Add nodeId# 20307 Disable nodeId# 20217 Delete nodeId# 20300 Add nodeId# 20308 Delete nodeId# 20301 Disable nodeId# 20221 Enable nodeId# 20282 Disable nodeId# 20272 Enable nodeId# 20299 Enable nodeId# 20221 Add nodeId# 20309 Pick Pick Enable nodeId# 20217 Delete nodeId# 20224 Pick Pick Delete nodeId# 20065 Enable nodeId# 20234 Disable nodeId# 20262 Add nodeId# 20310 Delete nodeId# 20305 Add nodeId# 20311 Delete nodeId# 20272 Disable nodeId# 20086 Enable nodeId# 20279 Add nodeId# 20312 Add nodeId# 20313 Enable nodeId# 20262 Pick Add nodeId# 20314 Enable nodeId# 20302 Pick Delete nodeId# 20293 Delete nodeId# 20148 Pick Enable nodeId# 20287 Disable nodeId# 20279 Delete nodeId# 20058 Pick Disable nodeId# 20267 Delete nodeId# 20306 Add nodeId# 20315 Disable nodeId# 20281 Pick Disable nodeId# 20277 Delete nodeId# 20276 Disable nodeId# 20315 Add nodeId# 20316 Add nodeId# 20317 Pick Delete nodeId# 20160 Pick Enable nodeId# 20303 Delete nodeId# 20310 Pick Add nodeId# 20318 Enable nodeId# 20157 Disable nodeId# 20157 Add nodeId# 20319 Add nodeId# 20320 Enable nodeId# 20277 Delete nodeId# 20287 Pick Enable nodeId# 20315 Delete nodeId# 20253 Add nodeId# 20321 Delete nodeId# 20298 Disable nodeId# 20242 Add nodeId# 20322 Add nodeId# 20323 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitIndexWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:18:38.403449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:38.403475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:38.403479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:38.403483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:38.403487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:38.403490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:38.403497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:38.403510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:38.403604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:38.403665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:38.418426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:18:38.418454Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:38.418570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:18:38.422560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:38.422687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:38.422724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:38.424481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:38.424530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:38.424639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:38.424718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:38.425266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:38.425306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:38.425527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:38.425535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:38.425557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:38.425563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:38.425568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:38.425590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:18:38.426920Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:18:38.440962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:38.441036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.441086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:38.441092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:38.441127Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:38.441137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:38.441853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:38.441884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:38.441937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.441945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:38.441961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:38.441967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:38.442514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.442522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:38.442525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:38.442793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.442800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:38.442804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:38.442809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:38.443279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:38.443623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:38.443655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:38.443807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:38.443824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... ntToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409547 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:20.057584Z node 91 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:20:20.057697Z node 91 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 137us result status StatusSuccess 2025-06-30T09:20:20.057944Z node 91 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\004\000\000\0002\000\000\000\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TestKinesisHttpProxy::TestListStreamConsumersWithToken |80.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |80.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates [GOOD] |80.5%| [LD] {RESULT} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest >> TestKinesisHttpProxy::GoodRequestGetRecordsCbor [GOOD] >> TestKinesisHttpProxy::ListShards [GOOD] >> TestYmqHttpProxy::TestReceiveMessageWithAttemptId [GOOD] >> TestYmqHttpProxy::TestTagQueue [GOOD] >> TestKinesisHttpProxy::ListShardsEmptyFields >> TestYmqHttpProxy::TestListQueues >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Query [GOOD] >> TestKinesisHttpProxy::GoodRequestGetRecordsLongStreamName >> TestYmqHttpProxy::TestUntagQueue >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Table >> CommitOffset::DistributedTxCommit_CheckOffsetCommitForDifferentCases [GOOD] >> CommitOffset::DistributedTxCommit_Flat_CheckOffsetCommitForDifferentCases >> TCertificateCheckerTest::CheckSubjectDns [GOOD] >> KqpJoinOrder::UdfConstantFold-ColumnStore [GOOD] >> THiveImplTest::BalancerSpeedAndDistribution [GOOD] >> THiveImplTest::TestShortTabletTypes [GOOD] >> THiveImplTest::TestStDev [GOOD] >> THiveImplTest::BootQueueConfigurePriorities [GOOD] >> THiveTest::TestBlockCreateTablet >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumerWithFlag [GOOD] |80.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest >> TCertificateCheckerTest::CheckSubjectDns [GOOD] >> THiveTest::TestBlockCreateTablet [GOOD] >> THiveTest::DrainWithHiveRestart |80.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut >> TestKinesisHttpProxy::BadRequestUnknownMethod >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Query |80.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |80.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::UdfConstantFold-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 4364, MsgBus: 29807 2025-06-30T09:20:16.275618Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669781740301083:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:16.275839Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a1e/r3tmp/tmpHiTOom/pdisk_1.dat 2025-06-30T09:20:16.332723Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:16.332827Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669781740301049:2080] 1751275216275405 != 1751275216275408 TServer::EnableGrpc on GrpcPort 4364, node 1 2025-06-30T09:20:16.347778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:16.347790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:16.347792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:16.347835Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29807 2025-06-30T09:20:16.410504Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:16.410530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:16.411661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29807 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:16.439514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:16.645773Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669781740301677:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:16.645783Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669781740301671:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:16.645806Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:16.646775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:16.649203Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669781740301685:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:20:16.726011Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669781740301736:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:16.779711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:16.848063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:16.864095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:16.877763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:16.932905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:16.968736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:16.980158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:16.993639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:17.007450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:17.062042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:17.070338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:17.084438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:17.098398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:17.192672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:17.200878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:17 ... 714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.249723Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.249876Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.250240Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.250356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.251237Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.251382Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.251383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.251577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.252240Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.252351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.252553Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.252690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.253468Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.253633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.253825Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.254007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.254557Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.254682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.254976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.255116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.255530Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.255642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.256374Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.256593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.256634Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.256946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.257493Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.257614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.257846Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.257999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.258779Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.258944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.259012Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.259190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.259808Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.259970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.260114Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.260272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.260912Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.261060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.261261Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.261447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:22.261971Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.262400Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:22.334362Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:22.334476Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:22.334551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7521669794625217243:3725];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-30T09:20:22.334613Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> TExportToS3Tests::CancelUponTransferringSingleShardTableShouldSucceed >> TExportToS3Tests::CancelUponCreatingExportDirShouldSucceed >> TExportToS3Tests::RebootDuringCompletion >> TPQTest::TestStorageRetention [GOOD] >> TPQTest::TestSetClientOffset >> TExportToS3Tests::ShouldCheckQuotasExportsLimited >> TExportToS3Tests::DropSourceTableBeforeTransferring >> TExportToS3Tests::ShouldOmitNonStrictStorageSettings >> TExportToS3Tests::CheckItemProgress >> TExportToS3Tests::ShouldSucceedOnConcurrentTxs >> TExportToS3Tests::ExportPartitioningSettings >> TExportToS3Tests::CancelUponCreatingExportDirShouldSucceed [GOOD] >> TestKinesisHttpProxy::TestUnauthorizedPutRecords [GOOD] >> TExportToS3Tests::CancelUponCopyingTablesShouldSucceed >> TestYmqHttpProxy::TestDeleteMessage [GOOD] >> TestKinesisHttpProxy::TestWrongStream >> TestKinesisHttpProxy::TestListStreamConsumersWithToken [GOOD] >> TExportToS3Tests::RebootDuringCompletion [GOOD] >> TExportToS3Tests::ShouldOmitNonStrictStorageSettings [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentTxs [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery [GOOD] >> TExportToS3Tests::ShouldCheckQuotasExportsLimited [GOOD] >> TExportToS3Tests::ExportPartitioningSettings [GOOD] >> TExportToS3Tests::DropSourceTableBeforeTransferring [GOOD] >> BasicUsage::TWriteSession_WriteEncoded >> TExportToS3Tests::ShouldPreserveIncrBackupFlag >> TExportToS3Tests::ShouldSucceedOnConcurrentExport >> TestKinesisHttpProxy::TestCounters >> TKeyValueTest::TestBasicWriteRead >> TExportToS3Tests::RebootDuringAbortion >> TestYmqHttpProxy::TestDeleteMessageBatch >> TExportToS3Tests::ShouldCheckQuotasChildrenLimited >> TExportToS3Tests::DropCopiesBeforeTransferring1 >> TExportToS3Tests::ExportIndexTablePartitioningSettings >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOk >> TestKinesisHttpProxy::ListShardsEmptyFields [GOOD] >> TExportToS3Tests::ShouldPreserveIncrBackupFlag [GOOD] >> TExportToS3Tests::DropCopiesBeforeTransferring1 [GOOD] >> KqpJoinOrder::ShuffleEliminationManyKeysJoinPredicate+EnableSeparationComputeActorsFromRead [GOOD] >> TestYmqHttpProxy::TestUntagQueue [GOOD] >> TExportToS3Tests::ShouldCheckQuotasChildrenLimited [GOOD] >> TExportToS3Tests::SchemaMappingEncryption >> TExportToS3Tests::ExportIndexTablePartitioningSettings [GOOD] >> TExportToS3Tests::ShouldExcludeBackupTableFromStats >> TExportToS3Tests::DropCopiesBeforeTransferring2 >> TestKinesisHttpProxy::GoodRequestGetRecordsLongStreamName [GOOD] >> TestKinesisHttpProxy::ListShardsExclusiveStartShardId >> TExportToS3Tests::RebootDuringAbortion [GOOD] >> TExportToS3Tests::EnableChecksumsPersistance >> TExportToS3Tests::CheckItemProgress [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentExport [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentImport >> TestYmqHttpProxy::TestListQueues [GOOD] >> TestYmqHttpProxy::TestTagQueueMultipleQueriesInflight >> TExportToS3Tests::ExportStartTime >> TExportToS3Tests::CompletedExportEndTime >> TestKinesisHttpProxy::ErroneousRequestGetRecords >> TExportToS3Tests::SchemaMappingEncryption [GOOD] >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey >> TExportToS3Tests::EnableChecksumsPersistance [GOOD] >> TExportToS3Tests::DropCopiesBeforeTransferring2 [GOOD] >> TExportToS3Tests::CorruptedDyNumber >> TExportToS3Tests::EncryptedExport >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCreateClean [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestReboot >> TestYmqHttpProxy::TestPurgeQueue >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentImport [GOOD] >> TestYmqHttpProxy::BillingRecordsForJsonApi [GOOD] >> LocalPartition::Restarts [GOOD] >> LocalPartition::WithoutPartitionWithRestart >> TExportToS3Tests::ExportStartTime [GOOD] >> TExportToS3Tests::CorruptedDyNumber [GOOD] >> TExportToS3Tests::DisableAutoDropping >> TExportToS3Tests::ShouldRetryAtFinalStage >> TestYmqHttpProxy::TestChangeMessageVisibility >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Query [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationManyKeysJoinPredicate+EnableSeparationComputeActorsFromRead [GOOD] Test command err: Trying to start YDB, gRPC: 18098, MsgBus: 30333 2025-06-30T09:20:12.313689Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669761931272006:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:12.313727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a2a/r3tmp/tmp1uMmqe/pdisk_1.dat 2025-06-30T09:20:12.402182Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:12.402471Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669761931271982:2080] 1751275212313538 != 1751275212313541 TServer::EnableGrpc on GrpcPort 18098, node 1 2025-06-30T09:20:12.416695Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:12.416721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:12.417827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:12.417849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:12.417852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:12.417854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:12.417910Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30333 TClient is connected to server localhost:30333 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:12.480745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:12.483310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:20:12.768321Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669761931272608:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:12.768342Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669761931272620:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:12.768349Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:12.769104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:12.771282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:20:12.771362Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669761931272622:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:20:12.868208Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669761931272674:2328] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:12.911625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:20:12.941213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669761931272907:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:20:12.941268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669761931272907:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:20:12.941316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669761931272907:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:20:12.941335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669761931272907:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:20:12.941358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669761931272907:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:20:12.941378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669761931272907:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:20:12.941399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669761931272907:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:20:12.941423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669761931272907:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:20:12.941441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669761931272907:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:20:12.941460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669761931272907:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:20:12.941472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669761931272907:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:20:12.941484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669761931272907:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:20:12.944013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669761931272905:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:20:12.944053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669761931272905:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:20:12.944112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669761931272905:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:20:12.944143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669761931272905:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:20:12.944166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669761931272905:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:20:12.944189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669761931272905:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:20:12.944209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669761931272905:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:20:12.944232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669761931272905:2308];tablet_id=72075186224037900;proc ... 714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.227810Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.227994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.228091Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.228251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.228943Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.229064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.229528Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.229685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.229908Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.230012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.230713Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.230886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.230887Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.231000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.239694Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.239958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.240797Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.240931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.241639Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.241796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.242780Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.242897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.243792Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.244106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.245280Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.245406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.247639Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.247853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.247947Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.248135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.251548Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.251773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.252599Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.252712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.253468Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.253799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.254600Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.254711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.255360Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.255562Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.256579Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.256745Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.256820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:23.257458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:23.343779Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:23.343917Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:23.344491Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7521669800586023500:7289];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-30T09:20:23.344615Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> TExportToS3Tests::SchemaMapping >> KqpJoinOrder::TPCDS16-ColumnStore [GOOD] >> TExportToS3Tests::EncryptedExport [GOOD] >> TExportToS3Tests::CompletedExportEndTime [GOOD] >> TExportToS3Tests::Checksums >> TExportToS3Tests::DisableAutoDropping [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Table >> TestKinesisHttpProxy::BadRequestUnknownMethod [GOOD] >> THiveTest::DrainWithHiveRestart [GOOD] >> THiveTest::TestCheckSubHiveForwarding >> TExportToS3Tests::SchemaMapping [GOOD] >> TExportToS3Tests::Checksums [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:24.648770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:24.648794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.648800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:24.648805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:24.648816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:24.648820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:24.648829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.648842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:24.648970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:24.649033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:24.663053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:24.663071Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:24.665991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:24.666029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:24.666061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:24.667647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:24.667709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:24.667819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.667866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:24.668436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.668470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:24.668684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.668693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.668722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:24.668730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:24.668736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:24.668752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.670046Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:24.691771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:24.691848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.691906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:24.691913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:24.691976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:24.691991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:24.692602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.692643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:24.692682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.692692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:24.692697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:24.692701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:24.693100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.693112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:24.693118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:24.693437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.693447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.693453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.693462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:24.694132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:24.694530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:24.694600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:24.694798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.694826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:24.694833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.694885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:24.694892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.694922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:24.694935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:24.695344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.695352Z node 1 :FLAT_TX_SCHEMESHARD ... hemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710758, at schemeshard: 72057594046678944 2025-06-30T09:20:26.227288Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710758:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710758 msg type: 269090816 2025-06-30T09:20:26.227312Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976710758, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:26.227349Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 FAKE_COORDINATOR: Add transaction: 281474976710758 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710758 at step: 5000005 2025-06-30T09:20:26.227425Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:26.227446Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710758 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 17179871343 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:26.227452Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710758:0, step: 5000005, at schemeshard: 72057594046678944 2025-06-30T09:20:26.227474Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710758:0, at schemeshard: 72057594046678944 2025-06-30T09:20:26.227483Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710758:0 progress is 1/1 2025-06-30T09:20:26.227487Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-06-30T09:20:26.227493Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710758:0 progress is 1/1 2025-06-30T09:20:26.227497Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-06-30T09:20:26.227504Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:20:26.227516Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:20:26.227522Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710758, ready parts: 1/1, is published: false 2025-06-30T09:20:26.227528Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-06-30T09:20:26.227533Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710758:0 2025-06-30T09:20:26.227537Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710758:0 2025-06-30T09:20:26.227545Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:20:26.227550Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710758, publications: 2, subscribers: 1 2025-06-30T09:20:26.227555Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710758, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-30T09:20:26.227559Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710758, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:20:26.227859Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-06-30T09:20:26.228152Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:26.228163Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710758, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:26.228196Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710758, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:20:26.228221Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:26.228226Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:212:2212], at schemeshard: 72057594046678944, txId: 281474976710758, path id: 1 2025-06-30T09:20:26.228231Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:212:2212], at schemeshard: 72057594046678944, txId: 281474976710758, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710758 2025-06-30T09:20:26.228366Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-30T09:20:26.228377Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-30T09:20:26.228382Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710758 2025-06-30T09:20:26.228389Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-30T09:20:26.228394Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:20:26.228484Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-30T09:20:26.228494Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-30T09:20:26.228498Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710758 2025-06-30T09:20:26.228501Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:20:26.228505Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:20:26.228515Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710758, subscribers: 1 2025-06-30T09:20:26.228520Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:128:2152] 2025-06-30T09:20:26.228569Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:20:26.228575Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:20:26.228585Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:20:26.229114Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-06-30T09:20:26.229184Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-06-30T09:20:26.229198Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710758 2025-06-30T09:20:26.229209Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710758 2025-06-30T09:20:26.229216Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-30T09:20:26.229220Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710758 2025-06-30T09:20:26.229225Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710758, id# 103, itemIdx# 4294967295 2025-06-30T09:20:26.229278Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:20:26.229551Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 103 2025-06-30T09:20:26.229597Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-30T09:20:26.229604Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-30T09:20:26.229665Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-30T09:20:26.229681Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:20:26.229685Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:555:2513] TestWaitNotification: OK eventTxId 103 >> HttpProxyInsideYdb::TestIfEnvVariableSet [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Query [GOOD] >> TExportToS3Tests::ChecksumsWithCompression ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS16-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 14899, MsgBus: 29184 2025-06-30T09:20:17.285051Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669784300765237:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:17.285080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a1c/r3tmp/tmpRDLARe/pdisk_1.dat TServer::EnableGrpc on GrpcPort 14899, node 1 2025-06-30T09:20:17.347834Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:17.349381Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:17.349392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:17.349394Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:17.349427Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29184 2025-06-30T09:20:17.387272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:17.387299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:17.388440Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29184 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:17.403457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:17.716009Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669784300765839:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:17.716014Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669784300765850:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:17.716029Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:17.716841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:17.718905Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669784300765853:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-30T09:20:17.806820Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669784300765904:2327] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:17.866036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:17.935447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:17.946494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:17.960456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:17.974077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:18.019808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:18.037445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:18.048686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:18.064267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:18.072848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:18.132552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:18.143712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:18.157637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:18.289836Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:18.351115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:18.361622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:18.372286Z node 1 :FLAT_TX_SCHEMESHA ... 714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.361022Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.361130Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.361218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.361264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.362242Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.362320Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.362409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.362517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.363349Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.363519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.363527Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.363690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.364507Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.364662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.364868Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.365024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.365660Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.365802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.366287Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.366486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.366699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.366887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.367699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.367780Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.367827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038453;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.367929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.368749Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038453;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.368868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.368986Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.369150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038575;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.369720Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.369825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.370111Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038575;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.370287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.370536Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.371089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.371195Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.371382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.371850Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.372018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.372322Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.372519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-30T09:20:23.372731Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.373415Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-30T09:20:23.510861Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-30T09:20:23.510954Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7521669792890706857:2872];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-30T09:20:23.511091Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-30T09:20:23.511117Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::EncryptedExport [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:24.747809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:24.747829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.747833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:24.747837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:24.747846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:24.747848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:24.747855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.747867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:24.747962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:24.748013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:24.758187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:24.758205Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:24.760519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:24.760555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:24.760587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:24.761840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:24.761917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:24.762025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.762071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:24.762606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.762635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:24.762896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.762905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.762925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:24.762931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:24.762935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:24.762947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.763964Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:24.778292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:24.778373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.778450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:24.778458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:24.778519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:24.778536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:24.779244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.779290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:24.779338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.779348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:24.779354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:24.779362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:24.779754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.779766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:24.779773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:24.780152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.780161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.780167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.780174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:24.780855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:24.781246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:24.781297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:24.781455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.781479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:24.781485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.781540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:24.781546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.781585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:24.781598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:24.782004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.782012Z node 1 :FLAT_TX_SCHEMESHARD ... 9:20:26.632498Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-30T09:20:26.632501Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-06-30T09:20:26.632505Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:20:26.632515Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-30T09:20:26.632748Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-30T09:20:26.632883Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-30T09:20:26.632887Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-30T09:20:26.632891Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-30T09:20:26.632992Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710763:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710763 msg type: 269090816 2025-06-30T09:20:26.633013Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976710763, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:26.633052Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 FAKE_COORDINATOR: Add transaction: 281474976710763 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710763 at step: 5000010 2025-06-30T09:20:26.633318Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:26.633340Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710763 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 17179871343 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:26.633345Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710763:0, step: 5000010, at schemeshard: 72057594046678944 2025-06-30T09:20:26.633364Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710763:0, at schemeshard: 72057594046678944 2025-06-30T09:20:26.633373Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-30T09:20:26.633376Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-30T09:20:26.633380Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-30T09:20:26.633382Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-30T09:20:26.633389Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:20:26.633395Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:20:26.633399Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 1/1, is published: false 2025-06-30T09:20:26.633404Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-30T09:20:26.633408Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710763:0 2025-06-30T09:20:26.633410Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710763:0 2025-06-30T09:20:26.633417Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:20:26.633421Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710763, publications: 2, subscribers: 1 2025-06-30T09:20:26.633424Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-30T09:20:26.633427Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:20:26.633537Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.633894Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:26.633906Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:26.633945Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:20:26.633973Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:26.633978Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:212:2212], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 1 2025-06-30T09:20:26.633983Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:212:2212], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710763 2025-06-30T09:20:26.634130Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.634140Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.634143Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-30T09:20:26.634147Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-30T09:20:26.634150Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:20:26.634207Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.634213Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.634215Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-30T09:20:26.634218Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:20:26.634220Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:20:26.634226Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710763, subscribers: 1 2025-06-30T09:20:26.634229Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:128:2152] 2025-06-30T09:20:26.634772Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.634829Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.634840Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710763 2025-06-30T09:20:26.634850Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710763 2025-06-30T09:20:26.634855Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-30T09:20:26.634859Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763 2025-06-30T09:20:26.634863Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763, id# 103, itemIdx# 4294967295 2025-06-30T09:20:26.635166Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-30T09:20:26.635181Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:20:26.635186Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:1142:3019] TestWaitNotification: OK eventTxId 103 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> HttpProxyInsideYdb::TestIfEnvVariableSet [GOOD] Test command err: 2025-06-30T09:20:04.683390Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669729744079516:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:04.683418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004af4/r3tmp/tmp2WpkNz/pdisk_1.dat 2025-06-30T09:20:04.764064Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:04.766848Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669729744079494:2080] 1751275204683228 != 1751275204683231 TServer::EnableGrpc on GrpcPort 28541, node 1 2025-06-30T09:20:04.791426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:04.791471Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:04.804997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:04.811067Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:04.811083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:04.811087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:04.811141Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14891 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:04.854634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:14891 2025-06-30T09:20:04.875573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:20:04.877480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-30T09:20:04.883839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:04.948838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:04.971671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:04.999075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715665, at schemeshard: 72057594046644480 2025-06-30T09:20:05.004009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.015779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.027166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.038284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.056177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.120094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.177372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.207391Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669734039048159:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.207408Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669734039048148:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.207431Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.208461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:05.211297Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669734039048162:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715673 completed, doublechecking } 2025-06-30T09:20:05.263684Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669734039048213:2856] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:05.324327Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jz025wjp86rjfje681sbrmxw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2Q1Y2FlNjYtZWMwMzI2NWMtMjU0N2JiYTktZjIzMTU2NWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:20:05.347466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.356734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchem ... bletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:25.738689Z node 7 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 3ms 2025-06-30T09:20:25.738799Z node 7 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:25.738813Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-06-30T09:20:25.738832Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 3ms 2025-06-30T09:20:25.738846Z node 7 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:25.738851Z node 7 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Attempt 1 execution duration: 3ms 2025-06-30T09:20:25.738892Z node 7 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:25.738907Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-06-30T09:20:25.738919Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 3ms 2025-06-30T09:20:25.738929Z node 7 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:25.738998Z node 7 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:25.761906Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [7:7521669820638415294:2402]: Pool not found 2025-06-30T09:20:25.761969Z node 7 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-06-30T09:20:25.775553Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [7:7521669820638415281:2397]: Pool not found 2025-06-30T09:20:25.775627Z node 7 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-06-30T09:20:25.776292Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521669820638415400:2416], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:25.776310Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7521669820638415401:2417], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-06-30T09:20:25.776313Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:25.803103Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [7:7521669820638415398:2415]: Pool not found 2025-06-30T09:20:25.803185Z node 7 :SQS DEBUG: cleanup_queue_data.cpp:138: [cleanup removed queues] there are no queues to delete 2025-06-30T09:20:26.733054Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:56968) incoming connection opened 2025-06-30T09:20:26.733096Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:56968) -> (POST /Root, 3 bytes) 2025-06-30T09:20:26.733150Z node 7 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [98ed:abbb:a313:0:80ed:abbb:a313:0] request [UnknownMethodName] url [/Root] database [/Root] requestId: 8aee922c-a9c943f4-b935ee84-a4810d36 2025-06-30T09:20:26.733205Z node 7 :HTTP_PROXY INFO: http_req.cpp:1211: http request [UnknownMethodName] requestId [8aee922c-a9c943f4-b935ee84-a4810d36] reply with status: UNSUPPORTED message: Missing method name UnknownMethodName 2025-06-30T09:20:26.733243Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:56968) <- (400 InvalidAction, 76 bytes) 2025-06-30T09:20:26.733254Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:289: (#37,[::1]:56968) Request: POST /Root HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.UnknownMethodName X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked { } 2025-06-30T09:20:26.733261Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:56968) Response: HTTP/1.1 400 InvalidAction Connection: close x-amzn-requestid: 8aee922c-a9c943f4-b935ee84-a4810d36 x-amz-crc32: 139748724 Content-Type: application/x-amz-json-1.1 Content-Length: 76 2025-06-30T09:20:26.733307Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:56968) connection closed Http output full {"__type":"InvalidAction","message":"Missing method name UnknownMethodName"} 400 {"__type":"InvalidAction","message":"Missing method name UnknownMethodName"} >> TSchemeShardExtSubDomainTest::Fake [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithOnlyDotsNotAllowed-SystemNamesProtection-true >> TExportToS3Tests::ChecksumsWithCompression [GOOD] >> TestKinesisHttpProxy::TestWrongStream [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Table >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheEnd [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheMiddle >> THiveTest::TestCheckSubHiveForwarding [GOOD] >> THiveTest::TestCheckSubHiveDrain >> TExportToS3Tests::Changefeeds ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::DisableAutoDropping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:24.629267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:24.629294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.629300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:24.629306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:24.629316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:24.629321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:24.629330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.629343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:24.629451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:24.629527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:24.642792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:24.642817Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:24.646079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:24.646120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:24.646164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:24.647522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:24.647566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:24.647638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.647678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:24.648226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.648254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:24.648454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.648461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.648481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:24.648486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:24.648490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:24.648504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.649472Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:24.664056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:24.664123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.664175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:24.664183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:24.664226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:24.664237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:24.664807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.664847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:24.664881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.664890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:24.664896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:24.664901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:24.665321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.665332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:24.665341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:24.665666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.665674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.665691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.665699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:24.666388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:24.666773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:24.666812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:24.666966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.666991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:24.666997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.667045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:24.667052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.667081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:24.667093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:24.667481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.667489Z node 1 :FLAT_TX_SCHEMESHARD ... T09:20:26.728746Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-30T09:20:26.728751Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-30T09:20:26.728756Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:20:26.728769Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-30T09:20:26.729119Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-30T09:20:26.729181Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-30T09:20:26.729187Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-30T09:20:26.729194Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-30T09:20:26.729320Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710761:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710761 msg type: 269090816 2025-06-30T09:20:26.729347Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976710761, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:26.729385Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 FAKE_COORDINATOR: Add transaction: 281474976710761 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000007 2025-06-30T09:20:26.729593Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:26.729619Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 21474838639 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:26.729626Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000007, at schemeshard: 72057594046678944 2025-06-30T09:20:26.729654Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-30T09:20:26.729663Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-30T09:20:26.729669Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:20:26.729675Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-30T09:20:26.729680Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:20:26.729689Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:20:26.729698Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:20:26.729704Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2025-06-30T09:20:26.729712Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:20:26.729717Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-30T09:20:26.729721Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710761:0 2025-06-30T09:20:26.729730Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:20:26.729736Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2025-06-30T09:20:26.729742Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-30T09:20:26.729746Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-30T09:20:26.729934Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:26.730254Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:26.730264Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:26.730302Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:20:26.730326Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:26.730332Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2025-06-30T09:20:26.730338Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 3 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-30T09:20:26.730509Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:26.730521Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:26.730527Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-30T09:20:26.730533Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-30T09:20:26.730538Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:20:26.730662Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:26.730674Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:26.730678Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-30T09:20:26.730683Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-30T09:20:26.730688Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:20:26.730697Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2025-06-30T09:20:26.730703Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [5:127:2151] 2025-06-30T09:20:26.731283Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:26.731370Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:26.731386Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-30T09:20:26.731398Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710761 2025-06-30T09:20:26.731406Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-30T09:20:26.731412Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-30T09:20:26.731418Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-06-30T09:20:26.731824Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-30T09:20:26.731843Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:20:26.731850Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:618:2572] TestWaitNotification: OK eventTxId 102 >> TSchemeShardExtSubDomainTest::CreateWithOnlyDotsNotAllowed-SystemNamesProtection-true [GOOD] >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-false >> TestKinesisHttpProxy::TestWrongStream2 >> TestKinesisHttpProxy::TestCounters [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanWithRetry [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanManyTables >> TestKinesisHttpProxy::TestEmptyHttpBody >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDrop [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDropIndex ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::SchemaMapping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:24.504303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:24.504329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.504335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:24.504339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:24.504349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:24.504352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:24.504360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.504372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:24.504473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:24.504534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:24.515558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:24.515582Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:24.518963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:24.519024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:24.519071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:24.520676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:24.520746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:24.520860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.520923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:24.521545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.521580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:24.521833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.521843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.521884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:24.521893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:24.521899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:24.521917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.526536Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:24.550134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:24.550224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.550296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:24.550305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:24.550355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:24.550370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:24.551619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.551683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:24.551758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.551773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:24.551779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:24.551786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:24.552507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.552523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:24.552529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:24.552945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.552956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.552963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.552972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:24.553825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:24.554341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:24.554396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:24.554603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.554636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:24.554645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.554712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:24.554720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.554777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:24.554794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:24.555340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.555351Z node 1 :FLAT_TX_SCHEMESHARD ... 09:20:26.933976Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-30T09:20:26.933981Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-06-30T09:20:26.933987Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:20:26.934016Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-30T09:20:26.934184Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-30T09:20:26.934258Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-30T09:20:26.934268Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-30T09:20:26.934274Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-30T09:20:26.934629Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710763:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710763 msg type: 269090816 2025-06-30T09:20:26.934663Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976710763, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:26.934689Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 FAKE_COORDINATOR: Add transaction: 281474976710763 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710763 at step: 5000010 2025-06-30T09:20:26.934818Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:26.934846Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710763 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 17179871343 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:26.934853Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710763:0, step: 5000010, at schemeshard: 72057594046678944 2025-06-30T09:20:26.934881Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710763:0, at schemeshard: 72057594046678944 2025-06-30T09:20:26.934890Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-30T09:20:26.934894Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-30T09:20:26.934900Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-30T09:20:26.934904Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-30T09:20:26.934913Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:20:26.934924Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:20:26.934930Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 1/1, is published: false 2025-06-30T09:20:26.934937Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-30T09:20:26.934941Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710763:0 2025-06-30T09:20:26.934946Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710763:0 2025-06-30T09:20:26.934958Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:20:26.934964Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710763, publications: 2, subscribers: 1 2025-06-30T09:20:26.934969Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-30T09:20:26.934973Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:20:26.935236Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.935488Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:26.935495Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:26.935523Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:20:26.935542Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:26.935546Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:212:2212], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 1 2025-06-30T09:20:26.935550Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:212:2212], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710763 2025-06-30T09:20:26.935703Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.935715Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.935719Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-30T09:20:26.935722Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-30T09:20:26.935726Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:20:26.935796Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.935803Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.935808Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-30T09:20:26.935811Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:20:26.935814Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:20:26.935821Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710763, subscribers: 1 2025-06-30T09:20:26.935824Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:128:2152] 2025-06-30T09:20:26.936291Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.936343Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-30T09:20:26.936354Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710763 2025-06-30T09:20:26.936361Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710763 2025-06-30T09:20:26.936367Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-30T09:20:26.936370Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763 2025-06-30T09:20:26.936374Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763, id# 103, itemIdx# 4294967295 2025-06-30T09:20:26.936627Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-30T09:20:26.936640Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:20:26.936646Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:856:2783] TestWaitNotification: OK eventTxId 103 >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::Drop >> TestYmqHttpProxy::TestDeleteMessageBatch [GOOD] >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-true >> TExportToS3Tests::Changefeeds [GOOD] >> TestKinesisHttpProxy::ListShardsExclusiveStartShardId [GOOD] >> TSchemeShardExtSubDomainTest::Drop [GOOD] >> TSchemeShardExtSubDomainTest::Drop-ExternalHive >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::Create >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-false ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestDeleteMessageBatch [GOOD] Test command err: 2025-06-30T09:20:05.459641Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669733939448438:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:05.459684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004adf/r3tmp/tmp0bBAuJ/pdisk_1.dat 2025-06-30T09:20:05.542659Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24536, node 1 2025-06-30T09:20:05.554448Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:05.554465Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:05.554467Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:05.554510Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:05.562330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:05.562362Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:05.563514Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5839 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:05.590899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:05.599060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:5839 2025-06-30T09:20:05.641084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:20:05.647467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-30T09:20:05.648561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-30T09:20:05.656805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:20:05.661254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.706868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:05.731182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-06-30T09:20:05.734940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:05.808073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.823186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:05.840192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.856664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.872928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.896694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.922961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.020159Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669738234417063:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.020192Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.020309Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669738234417075:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.021590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:06.024386Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669738234417077:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-30T09:20:06.100190Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669738234417128:2856] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:06.177772Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jz025xc3crebnezb2c8mge1w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzEyODdlMmMtM2YzZmU4NDctYzE3NjMyNmYtYzkxNWMyN2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:20:06.188483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594 ... 47: Request [df506c11-4a767424-32d718b4-b07a2c17] Sending reply from proxy actor: { DeleteMessageBatch { RequestId: "df506c11-4a767424-32d718b4-b07a2c17" Entries { Id: "Id-0" } Entries { Id: "Id-1" } } RequestId: "df506c11-4a767424-32d718b4-b07a2c17" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false } 2025-06-30T09:20:28.002030Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [DeleteMessageBatch] requestId [df506c11-4a767424-32d718b4-b07a2c17] Got succesfult GRPC response. 2025-06-30T09:20:28.002068Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DeleteMessageBatch] requestId [df506c11-4a767424-32d718b4-b07a2c17] reply ok 2025-06-30T09:20:28.002107Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [DeleteMessageBatch] requestId [df506c11-4a767424-32d718b4-b07a2c17] Send metering event. HttpStatusCode: 200 IsFifo: 0 FolderId: folder4 RequestSizeInBytes: 716 ResponseSizeInBytes: 222 SourceAddress: 5867:bafc:9571:0:4067:bafc:9571:0 ResourceId: 000000000000000101v0 Action: DeleteMessageBatch 2025-06-30T09:20:28.002175Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:41332) <- (200 , 44 bytes) 2025-06-30T09:20:28.002238Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:41332) connection closed Http output full {"Successful":[{"Id":"Id-0"},{"Id":"Id-1"}]} 2025-06-30T09:20:28.002365Z node 7 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] HandleResponse { Status: 48 TxId: 281474976715712 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "messages" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "SentTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } } } Value { Struct { Optional { } } } } } 2025-06-30T09:20:28.002379Z node 7 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Attempt 1 execution duration: 1ms 2025-06-30T09:20:28.002409Z node 7 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Sending mkql execution result: { Status: 48 TxId: 281474976715712 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "messages" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "SentTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } } } Value { Struct { Optional { } } } } } 2025-06-30T09:20:28.002414Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Minikql data response: {"messages": []} 2025-06-30T09:20:28.002424Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] execution duration: 1ms 2025-06-30T09:20:28.002485Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [] Sending executed reply 2025-06-30T09:20:28.002499Z node 7 :SQS DEBUG: queue_leader.cpp:1913: Handle oldest timestamp metrics for [cloud4/000000000000000101v0/0] 2025-06-30T09:20:28.002663Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:41340) incoming connection opened 2025-06-30T09:20:28.002706Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:41340) -> (POST /Root, 106 bytes) 2025-06-30T09:20:28.002778Z node 7 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [5867:bafc:9571:0:4067:bafc:9571:0] request [ReceiveMessage] url [/Root] database [/Root] requestId: aba54c83-46144d09-e81c5265-59ac3cd4 2025-06-30T09:20:28.002877Z node 7 :HTTP_PROXY INFO: http_req.cpp:520: http request [ReceiveMessage] requestId [aba54c83-46144d09-e81c5265-59ac3cd4] got new request from [5867:bafc:9571:0:4067:bafc:9571:0] 2025-06-30T09:20:28.002949Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:454: http request [ReceiveMessage] requestId [aba54c83-46144d09-e81c5265-59ac3cd4] Got cloud auth response. FolderId: folder4 CloudId: cloud4 UserSid: fake_user_sid@as 2025-06-30T09:20:28.002952Z node 7 :HTTP_PROXY INFO: http_req.cpp:280: http request [ReceiveMessage] requestId [aba54c83-46144d09-e81c5265-59ac3cd4] sending grpc request to '' database: '/Root' iam token size: 0 2025-06-30T09:20:28.003013Z node 7 :SQS DEBUG: ymq_proxy.cpp:148: Got new request in YMQ proxy. FolderId: folder4, CloudId: cloud4, UserSid: fake_user_sid@as, RequestId: aba54c83-46144d09-e81c5265-59ac3cd4 2025-06-30T09:20:28.003029Z node 7 :SQS DEBUG: proxy_actor.cpp:263: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Proxy actor: used user_name='cloud4', queue_name='000000000000000101v0', folder_id='folder4' 2025-06-30T09:20:28.003031Z node 7 :SQS DEBUG: proxy_actor.cpp:78: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Request proxy started 2025-06-30T09:20:28.003098Z node 7 :SQS DEBUG: service.cpp:761: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Answer configuration for queue [cloud4/000000000000000101v0] without leader 2025-06-30T09:20:28.003130Z node 7 :SQS DEBUG: proxy_actor.cpp:97: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Get configuration duration: 0ms 2025-06-30T09:20:28.003172Z node 7 :SQS DEBUG: proxy_service.cpp:246: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Send get leader node request to sqs service for cloud4/000000000000000101v0 2025-06-30T09:20:28.003178Z node 7 :SQS DEBUG: service.cpp:581: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Leader node for queue [cloud4/000000000000000101v0] is 7 2025-06-30T09:20:28.003182Z node 7 :SQS DEBUG: proxy_service.cpp:170: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Got leader node for queue response. Node id: 7. Status: 0 2025-06-30T09:20:28.003206Z node 7 :SQS TRACE: proxy_service.cpp:303: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Sending request from proxy to leader node 7: ReceiveMessage { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000101v0" } RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" 2025-06-30T09:20:28.003216Z node 7 :SQS DEBUG: proxy_service.cpp:70: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Received Sqs Request: ReceiveMessage { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000101v0" } RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" 2025-06-30T09:20:28.003230Z node 7 :SQS DEBUG: action.h:133: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Request started. Actor: [7:7521669830706006125:3680] 2025-06-30T09:20:28.003236Z node 7 :SQS TRACE: service.cpp:1472: Inc local leader ref for actor [7:7521669830706006125:3680] 2025-06-30T09:20:28.003240Z node 7 :SQS DEBUG: service.cpp:754: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Forward configuration request to queue [cloud4/000000000000000101v0] leader 2025-06-30T09:20:28.003246Z node 7 :SQS DEBUG: action.h:627: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Get configuration duration: 0ms 2025-06-30T09:20:28.003249Z node 7 :SQS TRACE: action.h:647: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Got configuration. Root url: http://ghrun-x4qzxsyz2q.auto.internal:8771, Shards: 4, Fail: 0 2025-06-30T09:20:28.003253Z node 7 :SQS TRACE: action.h:662: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Got configuration. Attributes: { ContentBasedDeduplication: 0 DelaySeconds: 0.000000s FifoQueue: 0 MaximumMessageSize: 262144 MessageRetentionPeriod: 345600.000000s ReceiveMessageWaitTime: 0.000000s VisibilityTimeout: 30.000000s } 2025-06-30T09:20:28.003256Z node 7 :SQS TRACE: action.h:427: Request [aba54c83-46144d09-e81c5265-59ac3cd4] DoRoutine 2025-06-30T09:20:28.003268Z node 7 :SQS TRACE: queue_leader.cpp:2424: Increment active message requests for [cloud4/000000000000000101v0/2]. ActiveMessageRequests: 1 2025-06-30T09:20:28.003273Z node 7 :SQS DEBUG: queue_leader.cpp:938: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Received empty result from shard 2 infly. Infly capacity: 0. Messages count: 0 2025-06-30T09:20:28.003276Z node 7 :SQS DEBUG: queue_leader.cpp:1162: Request [aba54c83-46144d09-e81c5265-59ac3cd4] No known messages in this shard. Skip attempt to add messages to infly 2025-06-30T09:20:28.003278Z node 7 :SQS DEBUG: queue_leader.cpp:1168: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Already tried to add messages to infly 2025-06-30T09:20:28.003286Z node 7 :SQS TRACE: queue_leader.cpp:2434: Decrement active message requests for [[cloud4/000000000000000101v0/2]. ActiveMessageRequests: 0 2025-06-30T09:20:28.003301Z node 7 :SQS TRACE: action.h:264: Request [aba54c83-46144d09-e81c5265-59ac3cd4] SendReplyAndDie from action actor { ReceiveMessage { RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" } } 2025-06-30T09:20:28.003317Z node 7 :SQS TRACE: proxy_service.h:35: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Sending sqs response: { ReceiveMessage { RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" } RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false } 2025-06-30T09:20:28.003325Z node 7 :SQS TRACE: service.cpp:1483: Dec local leader ref for actor [7:7521669830706006125:3680]. Found: 1 2025-06-30T09:20:28.003349Z node 7 :SQS DEBUG: queue_leader.cpp:384: Request ReceiveMessage working duration: 0ms 2025-06-30T09:20:28.003364Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse ReceiveMessage { RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" } RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false 2025-06-30T09:20:28.003369Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7521669830706006124:2500]: ReceiveMessage { RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" } RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false 2025-06-30T09:20:28.003407Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [aba54c83-46144d09-e81c5265-59ac3cd4] HandleResponse: { ReceiveMessage { RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" } RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false }, status: OK 2025-06-30T09:20:28.003415Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [aba54c83-46144d09-e81c5265-59ac3cd4] Sending reply from proxy actor: { ReceiveMessage { RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" } RequestId: "aba54c83-46144d09-e81c5265-59ac3cd4" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false } Http output full {} 2025-06-30T09:20:28.003538Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [ReceiveMessage] requestId [aba54c83-46144d09-e81c5265-59ac3cd4] Got succesfult GRPC response. 2025-06-30T09:20:28.003548Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ReceiveMessage] requestId [aba54c83-46144d09-e81c5265-59ac3cd4] reply ok 2025-06-30T09:20:28.003569Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [ReceiveMessage] requestId [aba54c83-46144d09-e81c5265-59ac3cd4] Send metering event. HttpStatusCode: 200 IsFifo: 0 FolderId: folder4 RequestSizeInBytes: 526 ResponseSizeInBytes: 179 SourceAddress: 5867:bafc:9571:0:4067:bafc:9571:0 ResourceId: 000000000000000101v0 Action: ReceiveMessage 2025-06-30T09:20:28.003589Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:41340) <- (200 , 2 bytes) 2025-06-30T09:20:28.003645Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:41340) connection closed >> TestKinesisHttpProxy::ErroneousRequestGetRecords [GOOD] >> RetryPolicy::TWriteSession_TestPolicy [GOOD] >> RetryPolicy::TWriteSession_TestBrokenPolicy >> TExportToS3Tests::CancelUponTransferringSingleShardTableShouldSucceed [GOOD] >> TestKinesisHttpProxy::ListShardsTimestamp >> TSchemeShardExtSubDomainTest::CreateAndAlter >> TExportToS3Tests::CancelUponTransferringMultiShardTableShouldSucceed >> TSchemeShardExtSubDomainTest::Create [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain >> TestKinesisHttpProxy::GoodRequestCreateStream >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx >> TExportToS3Tests::CancelUponCopyingTablesShouldSucceed [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::Drop-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst >> THiveTest::TestCheckSubHiveDrain [GOOD] >> THiveTest::PipeAlivenessOfDeadTablet >> TExportToS3Tests::AuditCompletedExport >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-ExternalHive >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateWithOnlyDotsNotAllowed-SystemNamesProtection-false >> TSchemeShardExtSubDomainTest::CreateAndAlter [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-ExternalHive ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::Changefeeds [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:24.624055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:24.624080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.624088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:24.624093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:24.624104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:24.624107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:24.624115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.624126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:24.624198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:24.624258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:24.637906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:24.637930Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:24.641266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:24.641318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:24.641379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:24.643209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:24.643280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:24.643413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.643478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:24.644144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.644179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:24.644404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.644413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.644432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:24.644438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:24.644442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:24.644456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.645595Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:24.659889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:24.659967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.660032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:24.660041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:24.660092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:24.660106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:24.660757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.660796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:24.660844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.660853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:24.660859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:24.660865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:24.661249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.661259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:24.661268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:24.661585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.661593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.661600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.661607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:24.662275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:24.662673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:24.662713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:24.662902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.662927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:24.662935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.662985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:24.662993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.663024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:24.663036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:24.663422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.663431Z node 1 :FLAT_TX_SCHEMESHARD ... 09:20:28.297302Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-30T09:20:28.297305Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 7 2025-06-30T09:20:28.297308Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-06-30T09:20:28.297314Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-30T09:20:28.297671Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-30T09:20:28.297705Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-30T09:20:28.297708Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-30T09:20:28.297712Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-30T09:20:28.297802Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710761:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710761 msg type: 269090816 2025-06-30T09:20:28.297823Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976710761, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 281474976710761 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000010 2025-06-30T09:20:28.298033Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.298051Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 21474838639 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:28.298056Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000010, at schemeshard: 72057594046678944 2025-06-30T09:20:28.298075Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.298084Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-30T09:20:28.298087Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:20:28.298090Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-30T09:20:28.298093Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:20:28.298098Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:20:28.298105Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-06-30T09:20:28.298109Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2025-06-30T09:20:28.298114Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:20:28.298117Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-30T09:20:28.298119Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710761:0 2025-06-30T09:20:28.298125Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-06-30T09:20:28.298129Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2025-06-30T09:20:28.298132Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 12 2025-06-30T09:20:28.298135Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 9], 18446744073709551615 2025-06-30T09:20:28.298229Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:28.298243Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:28.298497Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:28.298504Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:28.298532Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 9] 2025-06-30T09:20:28.298551Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:28.298554Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2025-06-30T09:20:28.298557Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 9 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-30T09:20:28.298689Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:28.298696Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:28.298701Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-30T09:20:28.298704Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 12 2025-06-30T09:20:28.298707Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:20:28.298822Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:28.298830Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:28.298833Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-30T09:20:28.298836Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 18446744073709551615 2025-06-30T09:20:28.298839Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-06-30T09:20:28.298846Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2025-06-30T09:20:28.298849Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [5:127:2151] 2025-06-30T09:20:28.299363Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:28.299416Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-30T09:20:28.299428Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-30T09:20:28.299436Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710761 2025-06-30T09:20:28.299441Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-30T09:20:28.299445Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-30T09:20:28.299448Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 105, itemIdx# 4294967295 2025-06-30T09:20:28.299738Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-30T09:20:28.299754Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-30T09:20:28.299758Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [5:1388:3175] TestWaitNotification: OK eventTxId 105 >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive >> TxUsage::WriteToTopic_Demo_11_Query [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst-ExternalHive >> THiveTest::PipeAlivenessOfDeadTablet [GOOD] >> THiveTest::TestBootProgress >> TSchemeShardExtSubDomainTest::CreateWithOnlyDotsNotAllowed-SystemNamesProtection-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithExtraPathSymbolsAllowed-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Table [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlter-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst >> TExportToS3Tests::AuditCompletedExport [GOOD] >> TGRpcCmsTest::SimpleTenantsTest >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TxUsage::WriteToTopic_Demo_12_Query >> TExportToS3Tests::AuditCancelledExport >> TxUsage::Sinks_Oltp_WriteToTopics_4_Query [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-true >> TestYmqHttpProxy::TestPurgeQueue [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query >> THiveTest::TestBootProgress [GOOD] >> THiveTest::TestBridgeCreateTablet >> TSchemeShardExtSubDomainTest::CreateWithExtraPathSymbolsAllowed-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithExtraPathSymbolsAllowed-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-false >> OlapEstimationRowsCorrectness::TPCH9 [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive |80.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |80.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |80.5%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-ExternalHive >> TestYmqHttpProxy::TestChangeMessageVisibility [GOOD] >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TestYmqHttpProxy::TestSendMessageBatch >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-true >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true >> TestKinesisHttpProxy::TestWrongStream2 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:27.455282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:27.455318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:27.455325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:27.455331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:27.455349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:27.455354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:27.455365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:27.455385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:27.455527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:27.455634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:27.475256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:27.475306Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:27.479879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:27.479997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:27.480068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:27.488138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:27.488283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:27.488460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:27.488561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:27.491431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:27.491513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:27.491950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:27.491966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:27.492003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:27.492015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:27.492024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:27.492056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:27.494983Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:27.538232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:27.538336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:27.538430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:27.538441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:27.538499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:27.538516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:27.540262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:27.540329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:27.540407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:27.540422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:27.540430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:27.540437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:27.541593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:27.541616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:27.541624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:27.543722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:27.543741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:27.543749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:27.543759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:27.544611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:27.551317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:27.551428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:27.551791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:27.551852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:27.551881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:27.552015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:27.552027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:27.552078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:27.552096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:27.552968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:27.552982Z node 1 :FLAT_TX_SCHEMESHARD ... : schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 Forgetting tablet 72075186234409547 2025-06-30T09:20:29.626142Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-30T09:20:29.626166Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:20:29.626237Z node 6 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186234409546 2025-06-30T09:20:29.626281Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-30T09:20:29.626308Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:20:29.626527Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Forgetting tablet 72075186234409546 2025-06-30T09:20:29.627515Z node 6 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186234409548 2025-06-30T09:20:29.627616Z node 6 :TX_DATASHARD ERROR: datashard.cpp:3573: Datashard's schemeshard pipe destroyed while no messages to sent at 72075186234409549 2025-06-30T09:20:29.627622Z node 6 :TX_DATASHARD ERROR: datashard.cpp:3573: Datashard's schemeshard pipe destroyed while no messages to sent at 72075186234409550 Forgetting tablet 72075186234409548 2025-06-30T09:20:29.627785Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-30T09:20:29.627833Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:20:29.628112Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:20:29.628171Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:20:29.628175Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:20:29.628197Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:20:29.628260Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:20:29.628264Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:20:29.628277Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:29.629186Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-30T09:20:29.629210Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-30T09:20:29.629237Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-30T09:20:29.629242Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186234409547 2025-06-30T09:20:29.629475Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:20:29.629486Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186234409546 2025-06-30T09:20:29.629508Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-30T09:20:29.629515Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186234409548 2025-06-30T09:20:29.629644Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:20:29.629665Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-30T09:20:29.629765Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-30T09:20:29.629775Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-30T09:20:29.629889Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-30T09:20:29.629917Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-30T09:20:29.629925Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [6:795:2700] TestWaitNotification: OK eventTxId 105 2025-06-30T09:20:29.630031Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:20:29.630083Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir/table_1" took 67us result status StatusPathDoesNotExist 2025-06-30T09:20:29.630131Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/dir/table_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/USER_0/dir/table_1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:20:29.630223Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:20:29.630243Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 23us result status StatusPathDoesNotExist 2025-06-30T09:20:29.630265Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:20:29.630330Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:20:29.630364Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 35us result status StatusSuccess 2025-06-30T09:20:29.630465Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive >> TestYmqHttpProxy::TestDeleteQueue [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool |80.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TestKinesisHttpProxy::TestEmptyHttpBody [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithExtraPathSymbolsAllowed-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateNameConflicts-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TExportToS3Tests::AuditCancelledExport [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true [GOOD] >> TestKinesisHttpProxy::TestWrongRequest >> TestYmqHttpProxy::TestListDeadLetterSourceQueues ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:28.607309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:28.607338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:28.607345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:28.607350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:28.607364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:28.607369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:28.607379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:28.607394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:28.607527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:28.607626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:28.619355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:28.619381Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:28.622982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:28.623039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:28.623096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:28.624649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:28.624714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:28.624831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.624901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:28.625630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:28.625681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:28.626042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:28.626060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:28.626093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:28.626103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:28.626109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:28.626134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.627545Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:28.647490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:28.647568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.647628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:28.647637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:28.647694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:28.647711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:28.648431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.648467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:28.648516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.648525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:28.648529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:28.648534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:28.648942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.648952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:28.648956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:28.649335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.649345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.649351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.649356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:28.650132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:28.650567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:28.650613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:28.650813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.650847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:28.650868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.650944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:28.650953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.650994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:28.651025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:28.651527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:28.651538Z node 1 :FLAT_TX_SCHEMESHARD ... p Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:30.310706Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 30064773229 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:30.310714Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 103:0, at tablet# 72057594046678944 2025-06-30T09:20:30.310812Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 103:0 128 -> 240 2025-06-30T09:20:30.310821Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 103:0, at tablet# 72057594046678944 2025-06-30T09:20:30.310856Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-30T09:20:30.310912Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:577: Send TEvUpdateTenantSchemeShard, to actor: [7:393:2362], msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 }, at schemeshard: 72057594046678944 2025-06-30T09:20:30.311487Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5913: Handle TEvUpdateTenantSchemeShard, at schemeshard: 72075186234409546, msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } 2025-06-30T09:20:30.311521Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:79: TTxUpdateTenant DoExecute, msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 }, at schemeshard: 72075186234409546 2025-06-30T09:20:30.311579Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:590: Cannot publish paths for unknown operation id#0 FAKE_COORDINATOR: Erasing txId 103 2025-06-30T09:20:30.311659Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:30.311666Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:20:30.311706Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:30.311712Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:210:2210], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-30T09:20:30.311734Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:20:30.311741Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 103:0, ProgressState, NeedSyncHive: 0 2025-06-30T09:20:30.311747Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 103:0 240 -> 240 2025-06-30T09:20:30.311979Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:20:30.311993Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:20:30.312002Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-30T09:20:30.312008Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 6 2025-06-30T09:20:30.312014Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-30T09:20:30.312033Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-30T09:20:30.312616Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5900: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 2 TabletID: 72075186234409546 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 4 UserAttributesVersion: 1 TenantHive: 72075186233409546 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-06-30T09:20:30.312635Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:20:30.312655Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[7:393:2362], EffectiveACLVersion: 0, SubdomainVersion: 4, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 4, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:20:30.312706Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186234409546 2025-06-30T09:20:30.312712Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186234409546, txId: 0, path id: [OwnerId: 72075186234409546, LocalPathId: 1] 2025-06-30T09:20:30.312738Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186234409546 2025-06-30T09:20:30.312743Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:486:2427], at schemeshard: 72075186234409546, txId: 0, path id: 1 2025-06-30T09:20:30.312897Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186234409546, msg: Owner: 72075186234409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186234409546, cookie: 0 2025-06-30T09:20:30.312944Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:20:30.312955Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-30T09:20:30.313005Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-30T09:20:30.313017Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:20:30.313024Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-30T09:20:30.313028Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:20:30.313033Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-30T09:20:30.313040Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:20:30.313046Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-30T09:20:30.313051Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:0 2025-06-30T09:20:30.313065Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-30T09:20:30.313125Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:20:30.313137Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 104 TestModificationResults wait txId: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 2025-06-30T09:20:30.313561Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-30T09:20:30.313572Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-30T09:20:30.313645Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-30T09:20:30.313666Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:20:30.313672Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:571:2510] TestWaitNotification: OK eventTxId 103 >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::CreateNameConflicts-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateNameConflicts-AlterDatabaseCreateHiveFirst-true >> TExportToS3Tests::AutoDropping >> TestYmqHttpProxy::TestTagQueueMultipleQueriesInflight [GOOD] >> TGRpcCmsTest::SimpleTenantsTest [GOOD] >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::CreateNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-false >> TExportToS3Tests::AutoDropping [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:28.650781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:28.650810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:28.650816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:28.650822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:28.650838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:28.650843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:28.650853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:28.650869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:28.651019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:28.651106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:28.667463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:28.667490Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:28.671090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:28.671156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:28.671203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:28.672916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:28.672989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:28.673114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.673175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:28.673830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:28.673882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:28.674160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:28.674172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:28.674196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:28.674204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:28.674210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:28.674233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.675717Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:28.700542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:28.700643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.700716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:28.700725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:28.700778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:28.700795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:28.701787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.701839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:28.701927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.701939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:28.701945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:28.701951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:28.702510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.702525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:28.702532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:28.702908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.702918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.702924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.702932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:28.703724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:28.705050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:28.705108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:28.705297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.705329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:28.705353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.705427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:28.705436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.705473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:28.705486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:28.706055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:28.706066Z node 1 :FLAT_TX_SCHEMESHARD ... D DEBUG: schemeshard__operation_common_subdomain.cpp:120: NSubDomainState::TConfigureParts operationId# 102:0 Got OK TEvConfigureStatus from tablet# 72075186233409549 shardIdx# 72057594046678944:4 at schemeshard# 72057594046678944 2025-06-30T09:20:30.650225Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 3 -> 128 2025-06-30T09:20:30.651478Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:20:30.651546Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:20:30.651554Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:20:30.651560Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 102:0, at tablet# 72057594046678944 2025-06-30T09:20:30.651567Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 102 ready parts: 1/1 2025-06-30T09:20:30.651604Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:30.652076Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-30T09:20:30.652123Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-30T09:20:30.652215Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:30.652241Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 34359740524 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:30.652249Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:0, at tablet# 72057594046678944 2025-06-30T09:20:30.652336Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 128 -> 240 2025-06-30T09:20:30.652348Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:0, at tablet# 72057594046678944 2025-06-30T09:20:30.652378Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-30T09:20:30.652405Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[8:373:2346], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 72075186233409549, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:20:30.652925Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:30.652940Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:20:30.653002Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:30.653008Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [8:214:2214], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:20:30.653120Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:20:30.653130Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 102:0, ProgressState, NeedSyncHive: 0 2025-06-30T09:20:30.653136Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 240 -> 240 2025-06-30T09:20:30.653259Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:20:30.653278Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:20:30.653284Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:20:30.653290Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-30T09:20:30.653297Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-30T09:20:30.653315Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-30T09:20:30.654316Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:20:30.654332Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:20:30.654349Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:20:30.654355Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:20:30.654365Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:20:30.654370Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:20:30.654376Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:20:30.654390Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [8:312:2301] message: TxId: 102 2025-06-30T09:20:30.654399Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:20:30.654407Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:20:30.654413Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:20:30.654456Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-30T09:20:30.655160Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:20:30.655475Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:20:30.655489Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [8:517:2454] TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 2025-06-30T09:20:30.656495Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "USER_0" ExternalStatisticsAggregator: false } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:30.656542Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 103:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "USER_0" ExternalStatisticsAggregator: false } 2025-06-30T09:20:30.656550Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 103:0, path /MyRoot/USER_0 2025-06-30T09:20:30.656585Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 103:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, at schemeshard: 72057594046678944 2025-06-30T09:20:30.656594Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 103:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, at schemeshard: 72057594046678944 2025-06-30T09:20:30.657131Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 103, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:30.657187Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, operation: ALTER DATABASE, path: /MyRoot/USER_0 TestModificationResult got TxId: 103, wait until txId: 103 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH9 [GOOD] Test command err: Trying to start YDB, gRPC: 29520, MsgBus: 14616 2025-06-30T09:20:15.750932Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669775893811449:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:15.751167Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a28/r3tmp/tmpYvlzoK/pdisk_1.dat 2025-06-30T09:20:15.884249Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:15.886010Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669775893811409:2080] 1751275215742894 != 1751275215742897 TServer::EnableGrpc on GrpcPort 29520, node 1 2025-06-30T09:20:15.943923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:15.943938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:15.943942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:15.943992Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:15.947098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:15.947129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:15.950370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14616 TClient is connected to server localhost:14616 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:16.104079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:16.107847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:20:16.353307Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669780188779328:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:16.353316Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669780188779340:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:16.353344Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:16.354289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:16.357432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:20:16.357520Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669780188779342:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:20:16.438922Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669780188779393:2328] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:16.501917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:20:16.524621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669780188779628:2307];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:20:16.524676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669780188779628:2307];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:20:16.524751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669780188779628:2307];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:20:16.524777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669780188779628:2307];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:20:16.524797Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669780188779628:2307];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:20:16.524814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669780188779628:2307];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:20:16.524837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669780188779628:2307];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:20:16.524838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669780188779629:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:20:16.524854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669780188779628:2307];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:20:16.524855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669780188779629:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:20:16.524891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669780188779628:2307];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:20:16.524926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669780188779628:2307];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:20:16.524932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669780188779629:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:20:16.524952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669780188779628:2307];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:20:16.524959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669780188779629:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:20:16.524974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521669780188779628:2307];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:20:16.524978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669780188779629:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:20:16.524998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669780188779629:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:20:16.525025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669780188779629:2308];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:20:16.525062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7521669780188779629:2308];tablet_id=72075186224037900;proc ... TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.779327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.779641Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.779707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.780205Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.780244Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.780380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.780397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.780885Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.780973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.781263Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.781406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.781421Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.781493Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.782107Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.782216Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.782326Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.782479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.782723Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.782816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.783341Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.783411Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.783418Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.783534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.783921Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.784012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.784425Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.784503Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.784556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.784569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.785144Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.785209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.785523Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.785697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.785709Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.785776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.786362Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.786446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.786583Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.786848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.786932Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.787022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.787510Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.787612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-30T09:20:26.787721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.788090Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-30T09:20:26.843547Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:26.843556Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-30T09:20:26.843656Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:28.920522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:28.920550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:28.920556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:28.920563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:28.920582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:28.920587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:28.920616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:28.920636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:28.920768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:28.920849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:28.933917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:28.933946Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:28.937142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:28.937199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:28.937236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:28.938973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:28.939062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:28.939190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.939257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:28.940016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:28.940094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:28.940442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:28.940459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:28.940487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:28.940498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:28.940504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:28.940527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.942150Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:28.963238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:28.963323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.963393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:28.963402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:28.963452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:28.963469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:28.964170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.964209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:28.964263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.964271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:28.964276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:28.964280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:28.964735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.964745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:28.964749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:28.965113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.965122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.965127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.965133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:28.965663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:28.966090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:28.966133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:28.966285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.966308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:28.966327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.966386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:28.966392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.966419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:28.966428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:28.967051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:28.967084Z node 1 :FLAT_TX_SCHEMESHARD ... ion: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:20:30.663441Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-30T09:20:30.663446Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-30T09:20:30.663453Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-30T09:20:30.663463Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-30T09:20:30.663964Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72075186233409546 at ss 72057594046678944 2025-06-30T09:20:30.664000Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72075186233409546 at ss 72057594046678944 2025-06-30T09:20:30.664004Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72075186233409546 at ss 72057594046678944 2025-06-30T09:20:30.664007Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72075186233409546 at ss 72057594046678944 2025-06-30T09:20:30.664061Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:20:30.664070Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-30T09:20:30.664081Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-30T09:20:30.664085Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:20:30.664090Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-30T09:20:30.664092Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:20:30.664096Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-30T09:20:30.664100Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:20:30.664105Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-30T09:20:30.664108Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:0 2025-06-30T09:20:30.664162Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-30T09:20:30.664555Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-30T09:20:30.664619Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:30.664669Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-30T09:20:30.664775Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186234409547 2025-06-30T09:20:30.664860Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186234409546 Forgetting tablet 72075186234409547 2025-06-30T09:20:30.664902Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-30T09:20:30.664927Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:20:30.665113Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:30.666194Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186234409548 2025-06-30T09:20:30.666306Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-30T09:20:30.666361Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186234409546 2025-06-30T09:20:30.666548Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-30T09:20:30.666567Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186234409548 2025-06-30T09:20:30.666869Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:20:30.666934Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:20:30.666990Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:20:30.666998Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:20:30.667032Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:20:30.667111Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:20:30.667118Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:20:30.667132Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:30.667615Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-30T09:20:30.667631Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-30T09:20:30.667652Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-30T09:20:30.667661Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186234409547 2025-06-30T09:20:30.668134Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:20:30.668146Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186234409546 2025-06-30T09:20:30.668163Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-30T09:20:30.668169Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186234409548 2025-06-30T09:20:30.668205Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:20:30.668222Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-30T09:20:30.668284Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-30T09:20:30.668290Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-30T09:20:30.668350Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-30T09:20:30.668367Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:20:30.668371Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:582:2521] TestWaitNotification: OK eventTxId 103 2025-06-30T09:20:30.668456Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:20:30.668497Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 60us result status StatusPathDoesNotExist 2025-06-30T09:20:30.668531Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TestKinesisHttpProxy::ListShardsTimestamp [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::TestEmptyHttpBody [GOOD] Test command err: 2025-06-30T09:20:05.729194Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669732288257692:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:05.729215Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004adc/r3tmp/tmp2TgCrg/pdisk_1.dat TServer::EnableGrpc on GrpcPort 3181, node 1 2025-06-30T09:20:05.807710Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:05.807727Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:05.807729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:05.807777Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:05.807834Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:05.812522Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669732288257673:2080] 1751275205729008 != 1751275205729011 TClient is connected to server localhost:65377 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:20:05.833744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:05.833784Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-30T09:20:05.835173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:05.849605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:05.855044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:65377 2025-06-30T09:20:05.903893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:20:05.911330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-30T09:20:05.915394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-30T09:20:05.923305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-30T09:20:05.936535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.009069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:06.023516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:20:06.025058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.046391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.064662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.075395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.140274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.150240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.209895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:06.230673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:06.269806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669736583226328:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.269856Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.269999Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669736583226340:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.270955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:06.274758Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669736583226342:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-30T09:20:06.371897Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669736583226393:2857] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:06.462391Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jz025xkvd09n18n43edx2rtx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTcyYmNmMTItYzE3MjFlOGQtZmE3YTEzNTItOWU3ZGFmMTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:20:06.491851Z node 1 :FLAT_TX_ ... 9.677763Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:29.677784Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-06-30T09:20:29.677806Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 3ms 2025-06-30T09:20:29.677939Z node 8 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:29.677963Z node 8 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:29.677965Z node 8 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 4ms 2025-06-30T09:20:29.678021Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:29.678028Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-06-30T09:20:29.678040Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 4ms 2025-06-30T09:20:29.678136Z node 8 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:29.701452Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7521669838362811648:2399]: Pool not found 2025-06-30T09:20:29.701533Z node 8 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-06-30T09:20:29.724659Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7521669838362811666:2400]: Pool not found 2025-06-30T09:20:29.724750Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-06-30T09:20:29.725544Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7521669838362811774:2416], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:29.725548Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [8:7521669838362811775:2417], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-06-30T09:20:29.725581Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:29.759829Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7521669838362811772:2415]: Pool not found 2025-06-30T09:20:29.759949Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:138: [cleanup removed queues] there are no queues to delete Http output full {"__type":"MissingParameter","message":"ydb/core/http_proxy/json_proto_conversion.h:395: Top level of json value is not a map"} 2025-06-30T09:20:30.675087Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:44396) incoming connection opened 2025-06-30T09:20:30.675130Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:44396) -> (POST /Root, 4 bytes) 2025-06-30T09:20:30.675204Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [580a:297d:f873:0:400a:297d:f873:0] request [CreateStream] url [/Root] database [/Root] requestId: f739d613-19f51b49-1cb1e75-ead28d9b 2025-06-30T09:20:30.675407Z node 8 :HTTP_PROXY INFO: http_req.cpp:1211: http request [CreateStream] requestId [f739d613-19f51b49-1cb1e75-ead28d9b] reply with status: BAD_REQUEST message: ydb/core/http_proxy/json_proto_conversion.h:395: Top level of json value is not a map 2025-06-30T09:20:30.675478Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:44396) <- (400 MissingParameter, 127 bytes) 2025-06-30T09:20:30.675489Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:289: (#37,[::1]:44396) Request: POST /Root HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.CreateStream X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked null 2025-06-30T09:20:30.675494Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:44396) Response: HTTP/1.1 400 MissingParameter Connection: close x-amzn-requestid: f739d613-19f51b49-1cb1e75-ead28d9b x-amz-crc32: 851558042 Content-Type: application/x-amz-json-1.1 Content-Length: 127 2025-06-30T09:20:30.675598Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:44396) connection closed >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-true >> TestKinesisHttpProxy::GoodRequestCreateStream [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::SimpleTenantsTest [GOOD] Test command err: 2025-06-30T09:20:29.901536Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669835890885876:2154];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00489e/r3tmp/tmpJ9WZjc/pdisk_1.dat 2025-06-30T09:20:29.945572Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:20:29.974915Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:29.996988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:29.997015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:29.999291Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:20:29.999725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12554, node 1 2025-06-30T09:20:30.011001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:30.011015Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:30.011018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:30.011070Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16785 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:30.055069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:30.087676Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7521669840185853785:2274], Recipient [1:7521669835890886224:2200]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:48194" } 2025-06-30T09:20:30.087706Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-30T09:20:30.087717Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:30.087722Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:30.087763Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:48194" 2025-06-30T09:20:30.087831Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1751275230086386) 2025-06-30T09:20:30.087964Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1751275230086386 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-30T09:20:30.088038Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-30T09:20:30.089604Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-30T09:20:30.089877Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275230086386&action=1" } } } 2025-06-30T09:20:30.089930Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:30.089965Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-30T09:20:30.090012Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-30T09:20:30.090235Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-30T09:20:30.090269Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-30T09:20:30.094525Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-30T09:20:30.094560Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:30.094582Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7521669840185853790:2200], Recipient [1:7521669835890886224:2200]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:30.094587Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:30.094594Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:30.094597Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:30.094611Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-30T09:20:30.094620Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-30T09:20:30.094638Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-30T09:20:30.097038Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7521669840185853793:2275], Recipient [1:7521669835890886224:2200]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275230086386&action=1" } UserToken: "" } 2025-06-30T09:20:30.097058Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-30T09:20:30.097146Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275230086386&action=1" } } 2025-06-30T09:20:30.099503Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-30T09:20:30.099524Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:30.099527Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:30.099529Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:30.099559Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-30T09:20:30.099569Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1751275230086386 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:30.100726Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-30T09:20:30.100773Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:30.100790Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-30T09:20:30.100792Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-30T09:20:30.101975Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-30T09:20:30.102677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:20:30.103922Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 2025-06-30T09:20:30.103953Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxC ... 5660 2025-06-30T09:20:30.468234Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715660 2025-06-30T09:20:30.468249Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-30T09:20:30.468265Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-30T09:20:30.468298Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7521669840185854594:2200], Recipient [1:7521669835890886224:2200]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-30T09:20:30.468302Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-30T09:20:30.468309Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:30.468312Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:30.468341Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-30T09:20:30.468348Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1751275230449461 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:30.468367Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1751275230449461 issue= 2025-06-30T09:20:30.469841Z node 3 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72075186224037888 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found - using supplied 72075186224037888 2025-06-30T09:20:30.478862Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-30T09:20:30.478901Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-30T09:20:30.478906Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:30.478981Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7521669835890886079:2199], Recipient [1:7521669835890886224:2200]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-30T09:20:30.478985Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-30T09:20:30.478992Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:30.478995Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:30.479001Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-30T09:20:30.479009Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1751275230449461 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:30.480370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:39: Got DeleteTabletReply with Forward response from Hive 72075186224037888 to Hive 72057594037968897 shardIdx 72057594046644480:1 2025-06-30T09:20:30.482441Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037897 not found 2025-06-30T09:20:30.482461Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-30T09:20:30.482465Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037893 not found 2025-06-30T09:20:30.482467Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037894 not found 2025-06-30T09:20:30.482469Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-30T09:20:30.482472Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-30T09:20:30.482475Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037895 not found 2025-06-30T09:20:30.482477Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-30T09:20:30.482482Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037896 not found 2025-06-30T09:20:30.484124Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-30T09:20:30.484149Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:30.484160Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-30T09:20:30.484214Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-30T09:20:30.484940Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-06-30T09:20:30.484963Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-06-30T09:20:30.488521Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-06-30T09:20:30.488551Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7521669840185854672:2200], Recipient [1:7521669835890886224:2200]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-30T09:20:30.488568Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-30T09:20:30.488573Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:30.488575Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:30.488585Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-30T09:20:30.488590Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-30T09:20:30.490073Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-30T09:20:30.490088Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:30.490090Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:30.490092Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:30.490115Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1751275230449461 2025-06-30T09:20:30.490118Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1751275230449461 issue= 2025-06-30T09:20:30.490121Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1751275230449461 issue= 2025-06-30T09:20:30.490122Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-30T09:20:30.490149Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1751275230449461 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:30.495341Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-30T09:20:30.495390Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:30.501434Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-30T09:20:30.507669Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7521669840185854713:2422], Recipient [1:7521669835890886224:2200]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275230449461&action=2" } UserToken: "" } 2025-06-30T09:20:30.507686Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-30T09:20:30.507770Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275230449461&action=2" ready: true status: SUCCESS } } 2025-06-30T09:20:30.508750Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7521669840185854716:2424], Recipient [1:7521669835890886224:2200]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "" PeerName: "ipv6:[::1]:48194" } 2025-06-30T09:20:30.508762Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-30T09:20:30.508803Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3377: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: NOT_FOUND issues { message: "Unknown tenant /Root/users/user-1" severity: 1 } } } 2025-06-30T09:20:30.510038Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285123, Sender [1:7521669840185854719:2425], Recipient [1:7521669835890886224:2200]: NKikimr::NConsole::TEvConsole::TEvListTenantsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:48194" } 2025-06-30T09:20:30.510049Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:967: StateWork, processing event TEvConsole::TEvListTenantsRequest 2025-06-30T09:20:30.510127Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3421: Send TEvConsole::TEvListTenantsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.ListDatabasesResult] { } } } } 2025-06-30T09:20:30.511278Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-30T09:20:30.511354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected >> TestKinesisHttpProxy::ListShardsToken >> TGRpcCmsTest::DescribeOptionsTest >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-true [GOOD] >> THiveTest::TestBridgeCreateTablet [GOOD] >> THiveTest::TestBridgeDisconnect ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:28.789044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:28.789082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:28.789089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:28.789096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:28.789115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:28.789120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:28.789132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:28.789152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:28.789298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:28.789404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:28.802805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:28.802838Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:28.806689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:28.806793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:28.806856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:28.808884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:28.808966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:28.809115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.809192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:28.810014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:28.810066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:28.810391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:28.810405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:28.810433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:28.810442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:28.810449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:28.810471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.812093Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:28.830886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:28.830982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.831062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:28.831072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:28.831124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:28.831141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:28.832063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.832110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:28.832169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.832180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:28.832186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:28.832192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:28.832620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.832630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:28.832636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:28.832947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.832956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.832963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.832970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:28.833679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:28.834170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:28.834228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:28.834447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.834477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:28.834499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.834576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:28.834583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.834621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:28.834633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:28.835122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:28.835131Z node 1 :FLAT_TX_SCHEMESHARD ... 46678944, LocalPathId: 2], 7 2025-06-30T09:20:31.348831Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5913: Handle TEvUpdateTenantSchemeShard, at schemeshard: 72075186234409546, msg: TabletId: 72057594046678944 Generation: 2 UserAttributes { Key: "user__attr_1" Value: "value" } UserAttributesVersion: 2 2025-06-30T09:20:31.348854Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:79: TTxUpdateTenant DoExecute, msg: TabletId: 72057594046678944 Generation: 2 UserAttributes { Key: "user__attr_1" Value: "value" } UserAttributesVersion: 2, at schemeshard: 72075186234409546 2025-06-30T09:20:31.348896Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:590: Cannot publish paths for unknown operation id#0 2025-06-30T09:20:31.348931Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:31.348936Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:20:31.348968Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:31.348973Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:210:2210], at schemeshard: 72057594046678944, txId: 104, path id: 2 FAKE_COORDINATOR: Erasing txId 104 2025-06-30T09:20:31.349166Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:20:31.349182Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:20:31.349188Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:20:31.349194Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-06-30T09:20:31.349201Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-30T09:20:31.349219Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-30T09:20:31.349792Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5900: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 2 TabletID: 72075186234409546 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 4 UserAttributesVersion: 2 TenantHive: 72075186233409546 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-06-30T09:20:31.349814Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:20:31.349835Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[7:393:2362], EffectiveACLVersion: 0, SubdomainVersion: 4, UserAttributesVersion: 2, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 4, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 2, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:20:31.349869Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186234409546 2025-06-30T09:20:31.349874Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186234409546, txId: 0, path id: [OwnerId: 72075186234409546, LocalPathId: 1] 2025-06-30T09:20:31.349898Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186234409546 2025-06-30T09:20:31.349903Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:486:2427], at schemeshard: 72075186234409546, txId: 0, path id: 1 2025-06-30T09:20:31.350228Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186234409546, msg: Owner: 72075186234409546 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72075186234409546, cookie: 0 2025-06-30T09:20:31.350257Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:20:31.350269Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-30T09:20:31.350328Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-30T09:20:31.350337Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-30T09:20:31.350413Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-30T09:20:31.350433Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-30T09:20:31.350438Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [7:589:2528] TestWaitNotification: OK eventTxId 104 2025-06-30T09:20:31.350520Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:20:31.350552Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 40us result status StatusSuccess 2025-06-30T09:20:31.350649Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 2 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } StoragePools { Name: "pool-2" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "user__attr_1" Value: "value" } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:31.350732Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186234409546 2025-06-30T09:20:31.350780Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186234409546 describe path "/MyRoot/USER_0" took 49us result status StatusSuccess 2025-06-30T09:20:31.350851Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "MyRoot/USER_0" PathId: 1 SchemeshardId: 72075186234409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 2 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } StoragePools { Name: "pool-2" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/USER_0" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "user__attr_1" Value: "value" } } PathId: 1 PathOwnerId: 72075186234409546, at schemeshard: 72075186234409546 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestTagQueueMultipleQueriesInflight [GOOD] Test command err: 2025-06-30T09:20:06.120028Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669736367279954:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:06.120235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004ace/r3tmp/tmpw7vS0h/pdisk_1.dat 2025-06-30T09:20:06.191291Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:06.191530Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669736367279857:2080] 1751275206119166 != 1751275206119169 TServer::EnableGrpc on GrpcPort 18063, node 1 2025-06-30T09:20:06.217113Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:06.217129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:06.217131Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:06.217181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:06.221326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:06.221367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:06.222462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28810 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:06.271866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:28810 2025-06-30T09:20:06.300467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:20:06.302416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:20:06.303151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-30T09:20:06.316125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:06.351067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.367906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:06.400793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.421003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.436121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.450110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.462119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.478229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.508640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.701427Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669736367281209:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.701468Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.701681Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669736367281221:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.702633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:06.705844Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669736367281223:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715673 completed, doublechecking } 2025-06-30T09:20:06.803116Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669736367281274:2856] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:06.871577Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jz025y1c4khwt5qdt95864xd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTNiOTBmOGMtYzJlMGIxN2YtZGViN2EzNGUtZWViM2I0MTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:20:06.892000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.908311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchem ... ', folder_id='folder4' 2025-06-30T09:20:31.045735Z node 7 :SQS DEBUG: proxy_actor.cpp:78: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Request proxy started 2025-06-30T09:20:31.045764Z node 7 :SQS DEBUG: service.cpp:761: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Answer configuration for queue [cloud4/000000000000000301v0] without leader 2025-06-30T09:20:31.045785Z node 7 :SQS DEBUG: proxy_actor.cpp:97: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Get configuration duration: 0ms 2025-06-30T09:20:31.045808Z node 7 :SQS DEBUG: proxy_service.cpp:246: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Send get leader node request to sqs service for cloud4/000000000000000301v0 2025-06-30T09:20:31.045821Z node 7 :SQS DEBUG: service.cpp:581: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Leader node for queue [cloud4/000000000000000301v0] is 7 2025-06-30T09:20:31.045832Z node 7 :SQS DEBUG: proxy_service.cpp:170: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Got leader node for queue response. Node id: 7. Status: 0 2025-06-30T09:20:31.045874Z node 7 :SQS TRACE: proxy_service.cpp:303: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Sending request from proxy to leader node 7: ListQueueTags { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000301v0" } RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" 2025-06-30T09:20:31.045893Z node 7 :SQS DEBUG: proxy_service.cpp:70: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Received Sqs Request: ListQueueTags { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000301v0" } RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" 2025-06-30T09:20:31.045924Z node 7 :SQS DEBUG: action.h:133: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Request started. Actor: [7:7521669846052840452:5452] 2025-06-30T09:20:31.045938Z node 7 :SQS TRACE: service.cpp:1472: Inc local leader ref for actor [7:7521669846052840452:5452] 2025-06-30T09:20:31.045949Z node 7 :SQS DEBUG: service.cpp:754: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Forward configuration request to queue [cloud4/000000000000000301v0] leader 2025-06-30T09:20:31.049109Z node 7 :SQS TRACE: executor.cpp:286: Request [cc8b64a4-c1b98ad1-9dafa86e-70e2da5] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] HandleResponse { Status: 48 TxId: 281474976715933 Step: 1751275231097 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "attrs" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "ContentBasedDeduplication" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "DelaySeconds" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "DlqArn" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "MaxReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MaximumMessageSize" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MessageRetentionPeriod" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveMessageWaitTime" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ShowDetailedCountersDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "VisibilityTimeout" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "queueExists" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "tags" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } Value { Struct { Optional { Optional { Struct { Optional { Bool: false } } Struct { Optional { Uint64: 0 } } Struct { Optional { Text: "" } } Struct { Optional { Text: "" } } Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Uint64: 262144 } } Struct { Optional { Uint64: 345600000 } } Struct { Optional { Uint64: 0 } } Struct { } Struct { Optional { Uint64: 30000 } } } } } Struct { Optional { Bool: true } } Struct { Optional { Text: "{\"k46\":\"v\"}" } } } } } 2025-06-30T09:20:31.049127Z node 7 :SQS DEBUG: executor.cpp:287: Request [cc8b64a4-c1b98ad1-9dafa86e-70e2da5] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Attempt 1 execution duration: 4ms 2025-06-30T09:20:31.049221Z node 7 :SQS TRACE: executor.cpp:325: Request [cc8b64a4-c1b98ad1-9dafa86e-70e2da5] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Sending mkql execution result: { Status: 48 TxId: 281474976715933 Step: 1751275231097 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "attrs" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "ContentBasedDeduplication" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "DelaySeconds" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "DlqArn" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "MaxReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MaximumMessageSize" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MessageRetentionPeriod" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveMessageWaitTime" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ShowDetailedCountersDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "VisibilityTimeout" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "queueExists" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "tags" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } Value { Struct { Optional { Optional { Struct { Optional { Bool: false } } Struct { Optional { Uint64: 0 } } Struct { Optional { Text: "" } } Struct { Optional { Text: "" } } Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Uint64: 262144 } } Struct { Optional { Uint64: 345600000 } } Struct { Optional { Uint64: 0 } } Struct { } Struct { Optional { Uint64: 30000 } } } } } Struct { Optional { Bool: true } } Struct { Optional { Text: "{\"k46\":\"v\"}" } } } } } 2025-06-30T09:20:31.049245Z node 7 :SQS TRACE: executor.cpp:327: Request [cc8b64a4-c1b98ad1-9dafa86e-70e2da5] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Minikql data response: {"attrs": {"ContentBasedDeduplication": false, "DelaySeconds": 0, "DlqArn": "", "DlqName": "", "FifoQueue": true, "MaxReceiveCount": 0, "MaximumMessageSize": 262144, "MessageRetentionPeriod": 345600000, "ReceiveMessageWaitTime": 0, "ShowDetailedCountersDeadline": null, "VisibilityTimeout": 30000}, "queueExists": true, "tags": "{\"k46\":\"v\"}"} 2025-06-30T09:20:31.049287Z node 7 :SQS DEBUG: executor.cpp:401: Request [cc8b64a4-c1b98ad1-9dafa86e-70e2da5] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] execution duration: 4ms 2025-06-30T09:20:31.049304Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [cc8b64a4-c1b98ad1-9dafa86e-70e2da5] Sending executed reply 2025-06-30T09:20:31.049424Z node 7 :SQS DEBUG: action.h:627: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Get configuration duration: 3ms 2025-06-30T09:20:31.049435Z node 7 :SQS TRACE: action.h:647: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Got configuration. Root url: http://ghrun-x4qzxsyz2q.auto.internal:8771, Shards: 1, Fail: 0 2025-06-30T09:20:31.049442Z node 7 :SQS TRACE: action.h:427: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] DoRoutine 2025-06-30T09:20:31.049475Z node 7 :SQS TRACE: action.h:264: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] SendReplyAndDie from action actor { ListQueueTags { RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" Tags { Key: "k46" Value: "v" } } } 2025-06-30T09:20:31.049510Z node 7 :SQS TRACE: proxy_service.h:35: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Sending sqs response: { ListQueueTags { RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" Tags { Key: "k46" Value: "v" } } RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k46" Value: "v" } } 2025-06-30T09:20:31.049539Z node 7 :SQS TRACE: service.cpp:1483: Dec local leader ref for actor [7:7521669846052840452:5452]. Found: 1 2025-06-30T09:20:31.049541Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse ListQueueTags { RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" Tags { Key: "k46" Value: "v" } } RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k46" Value: "v" } 2025-06-30T09:20:31.049555Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7521669846052840451:2730]: ListQueueTags { RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" Tags { Key: "k46" Value: "v" } } RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k46" Value: "v" } 2025-06-30T09:20:31.049625Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] HandleResponse: { ListQueueTags { RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" Tags { Key: "k46" Value: "v" } } RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k46" Value: "v" } }, status: OK 2025-06-30T09:20:31.049647Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Sending reply from proxy actor: { ListQueueTags { RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" Tags { Key: "k46" Value: "v" } } RequestId: "b2a20a5d-7e961ff1-d3808ae5-bade61e5" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k46" Value: "v" } } 2025-06-30T09:20:31.049722Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [ListQueueTags] requestId [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Got succesfult GRPC response. 2025-06-30T09:20:31.049772Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListQueueTags] requestId [b2a20a5d-7e961ff1-d3808ae5-bade61e5] reply ok 2025-06-30T09:20:31.049817Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [ListQueueTags] requestId [b2a20a5d-7e961ff1-d3808ae5-bade61e5] Send metering event. HttpStatusCode: 200 IsFifo: 1 FolderId: folder4 RequestSizeInBytes: 530 ResponseSizeInBytes: 197 SourceAddress: 5860:387c:f970:0:4060:387c:f970:0 ResourceId: 000000000000000301v0 Action: ListQueueTags 2025-06-30T09:20:31.049875Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:36830) <- (200 , 20 bytes) 2025-06-30T09:20:31.049944Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:36830) connection closed Http output full {"Tags":{"k46":"v"}} |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::AutoDropping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:24.501446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:24.501476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.501482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:24.501487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:24.501499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:24.501503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:24.501513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.501529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:24.501643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:24.501713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:24.516783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:24.516808Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:24.520521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:24.520579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:24.520622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:24.522598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:24.522666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:24.522798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.522858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:24.523588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.523627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:24.523854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.523864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.523888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:24.523895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:24.523901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:24.523918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.525211Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:24.550363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:24.550435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.550493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:24.550501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:24.550543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:24.550556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:24.551463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.551521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:24.551598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.551610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:24.551616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:24.551621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:24.552255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.552274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:24.552295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:24.552800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.552814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.552822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.552830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:24.553559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:24.554063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:24.554113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:24.554296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.554327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:24.554336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.554397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:24.554405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.554441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:24.554455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:24.554940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.554951Z node 1 :FLAT_TX_SCHEMESHARD ... 74976710761 2025-06-30T09:20:31.229468Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-30T09:20:31.229475Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-30T09:20:31.229480Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-06-30T09:20:31.229921Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-30T09:20:31.229959Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:20:31.229966Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:478:2437] TestWaitNotification: OK eventTxId 102 2025-06-30T09:20:31.230157Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:20:31.230186Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 47us result status StatusSuccess 2025-06-30T09:20:31.230304Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 9 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 desc: 1 2025-06-30T09:20:31.230363Z node 5 :EXPORT DEBUG: schemeshard_export__forget.cpp:79: TExport::TTxForget, dropping export tables, info: { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Done WaitTxId: 281474976710761 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 } 2025-06-30T09:20:31.230918Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-30T09:20:31.230936Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:739: TExport::TTxProgress: Resume: id# 102 2025-06-30T09:20:31.230944Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:537: TExport::TTxProgress: Allocate txId: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 0 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 } 2025-06-30T09:20:31.230953Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-30T09:20:31.230970Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 102, at schemeshard: 72057594046678944 2025-06-30T09:20:31.230974Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-30T09:20:31.230978Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:859: TExport::TTxProgress: OnAllocateResult: txId# 281474976710762, id# 102 2025-06-30T09:20:31.230983Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:529: TExport::TTxProgress: Drop propose: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 0 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 }, txId# 281474976710762 2025-06-30T09:20:31.231000Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-30T09:20:31.231683Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "export-102" } Internal: true } TxId: 281474976710762 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:31.231710Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:28: TRmDir Propose, path: /MyRoot/export-102, pathId: 0, opId: 281474976710762:0, at schemeshard: 72057594046678944 2025-06-30T09:20:31.231736Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976710762:1, propose status:StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/export-102', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:36, at schemeshard: 72057594046678944 2025-06-30T09:20:31.232315Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 281474976710762, response: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:36" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761, at schemeshard: 72057594046678944 2025-06-30T09:20:31.232370Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710762, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/export-102', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:36, operation: DROP DIRECTORY, path: /MyRoot/export-102 2025-06-30T09:20:31.232412Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6788: Handle: TEvModifySchemeTransactionResult: txId# 281474976710762, status# StatusPathDoesNotExist 2025-06-30T09:20:31.232424Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6790: Message: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:36" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761 2025-06-30T09:20:31.232435Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-30T09:20:31.232440Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:920: TExport::TTxProgress: OnModifyResult: txId# 281474976710762, status# StatusPathDoesNotExist 2025-06-30T09:20:31.232445Z node 5 :EXPORT TRACE: schemeshard_export__create.cpp:921: Message: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:36" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761 2025-06-30T09:20:31.232479Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:1102: TExport::TTxProgress: Wait for completion: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 281474976710761 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 }, itemIdx# 4294967295, txId# 281474976710761 2025-06-30T09:20:31.232844Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-30T09:20:31.232868Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-30T09:20:31.232883Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-30T09:20:31.232892Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710761 2025-06-30T09:20:31.232895Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-30T09:20:31.232899Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-30T09:20:31.232902Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-06-30T09:20:31.233306Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 102 2025-06-30T09:20:31.233366Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:20:31.233374Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-30T09:20:31.233453Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:20:31.233467Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:20:31.233473Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:694:2648] TestWaitNotification: OK eventTxId 102 |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-true >> TBlobStorageStoragePoolMonTest::SizeClassCalcTest [GOOD] >> CommitOffset::Commit_WithSession_ToPastParentPartition [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::GoodRequestCreateStream [GOOD] Test command err: 2025-06-30T09:20:05.828773Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669732070067632:2150];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:05.828828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004ad7/r3tmp/tmpuLHkL3/pdisk_1.dat 2025-06-30T09:20:05.908953Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26907, node 1 2025-06-30T09:20:05.930108Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:05.930121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:05.930123Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:05.930172Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:05.934508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:05.934550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:05.935652Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30770 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:05.983495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:05.988032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 TClient is connected to server localhost:30770 2025-06-30T09:20:06.011344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:20:06.013057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:20:06.013744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-30T09:20:06.016608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:20:06.017935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.084947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.110451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:06.130425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.145362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.158642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.228648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.250323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.263109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.276155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:06.360387Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669736365036170:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.360422Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.360637Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669736365036182:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:06.361653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:06.364800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715673, at schemeshard: 72057594046644480 2025-06-30T09:20:06.364982Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669736365036184:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715673 completed, doublechecking } 2025-06-30T09:20:06.437772Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669736365036235:2858] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:06.540008Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jz025xpq9eba8erc9rbh35mb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTBhNDAwZDQtMzZmZWZiMDEtMTU3MTAxZjUtYzZjMGVjMTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:20:06.556222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 720575 ... _impl.cpp:2898: [PQ: 72075186224037909] server connected, pipe [8:7521669846222804088:2481], now have 1 active actors on pipe 2025-06-30T09:20:31.528995Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037910] server connected, pipe [8:7521669846222804089:2482], now have 1 active actors on pipe 2025-06-30T09:20:31.529002Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037911] server connected, pipe [8:7521669846222804090:2483], now have 1 active actors on pipe 2025-06-30T09:20:31.529793Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DescribeStream] requestId [8a4e2801-a17eb7e4-e7d595c3-e4562ae9] reply ok 2025-06-30T09:20:31.529938Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:34684) <- (200 , 1672 bytes) 2025-06-30T09:20:31.530025Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:34684) connection closed 2025-06-30T09:20:31.530164Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037911] server disconnected, pipe [8:7521669846222804090:2483] destroyed 2025-06-30T09:20:31.530334Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037907] server disconnected, pipe [8:7521669846222804086:2479] destroyed 2025-06-30T09:20:31.530349Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037908] server disconnected, pipe [8:7521669846222804087:2480] destroyed 2025-06-30T09:20:31.530356Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037909] server disconnected, pipe [8:7521669846222804088:2481] destroyed 2025-06-30T09:20:31.530362Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037910] server disconnected, pipe [8:7521669846222804089:2482] destroyed Http output full {"StreamDescription":{"RetentionPeriodHours":24,"WriteQuotaKbPerSec":1024,"StreamModeDetails":{"StreamMode":"ON_DEMAND"},"StreamArn":"testtopic","PartitioningSettings":{"MinActivePartitions":5,"AutoPartitioningSettings":{"Strategy":"AUTO_PARTITIONING_STRATEGY_DISABLED","PartitionWriteSpeed":{"StabilizationWindow":{"Nanos":0,"Seconds":300},"DownUtilizationPercent":30,"UpUtilizationPercent":90}},"MaxActivePartitions":5},"Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}},{"ShardId":"shard-000002","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"204169420152563078078024764459060926872","StartingHashKey":"136112946768375385385349842972707284582"}},{"ShardId":"shard-000003","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"272225893536750770770699685945414569163","StartingHashKey":"204169420152563078078024764459060926873"}},{"ShardId":"shard-000004","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"340282366920938463463374607431768211455","StartingHashKey":"272225893536750770770699685945414569164"}}],"KeyId":"","Owner":"Service1_id@as","StreamStatus":"ACTIVE","HasMoreShards":false,"EncryptionType":"ENCRYPTION_UNDEFINED","StreamCreationTimestamp":1751275232,"StorageLimitMb":0,"StreamName":"testtopic"}} 200 {"StreamDescription":{"RetentionPeriodHours":24,"WriteQuotaKbPerSec":1024,"StreamModeDetails":{"StreamMode":"ON_DEMAND"},"StreamArn":"testtopic","PartitioningSettings":{"MinActivePartitions":5,"AutoPartitioningSettings":{"Strategy":"AUTO_PARTITIONING_STRATEGY_DISABLED","PartitionWriteSpeed":{"StabilizationWindow":{"Nanos":0,"Seconds":300},"DownUtilizationPercent":30,"UpUtilizationPercent":90}},"MaxActivePartitions":5},"Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}},{"ShardId":"shard-000002","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"204169420152563078078024764459060926872","StartingHashKey":"136112946768375385385349842972707284582"}},{"ShardId":"shard-000003","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"272225893536750770770699685945414569163","StartingHashKey":"204169420152563078078024764459060926873"}},{"ShardId":"shard-000004","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"340282366920938463463374607431768211455","StartingHashKey":"272225893536750770770699685945414569164"}}],"KeyId":"","Owner":"Service1_id@as","StreamStatus":"ACTIVE","HasMoreShards":false,"EncryptionType":"ENCRYPTION_UNDEFINED","StreamCreationTimestamp":1751275232,"StorageLimitMb":0,"StreamName":"testtopic"}} 2025-06-30T09:20:31.531354Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:34686) incoming connection opened 2025-06-30T09:20:31.531395Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:34686) -> (POST /Root, 30 bytes) 2025-06-30T09:20:31.531466Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [58d7:7efc:4151:0:40d7:7efc:4151:0] request [DescribeStreamSummary] url [/Root] database [/Root] requestId: 5dd4a421-1974983-fe9b7ff-b50fb69a 2025-06-30T09:20:31.531610Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [DescribeStreamSummary] requestId [5dd4a421-1974983-fe9b7ff-b50fb69a] got new request from [58d7:7efc:4151:0:40d7:7efc:4151:0] database '/Root' stream 'testtopic' 2025-06-30T09:20:31.531768Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [DescribeStreamSummary] requestId [5dd4a421-1974983-fe9b7ff-b50fb69a] [auth] Authorized successfully 2025-06-30T09:20:31.531795Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [DescribeStreamSummary] requestId [5dd4a421-1974983-fe9b7ff-b50fb69a] sending grpc request to '' database: '/Root' iam token size: 0 2025-06-30T09:20:31.532175Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DescribeStreamSummary] requestId [5dd4a421-1974983-fe9b7ff-b50fb69a] reply ok 2025-06-30T09:20:31.532258Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:34686) <- (200 , 239 bytes) 2025-06-30T09:20:31.532316Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:34686) connection closed Http output full {"StreamDescriptionSummary":{"RetentionPeriodHours":24,"OpenShardCount":5,"StreamArn":"testtopic","ConsumerCount":0,"KeyId":"","StreamStatus":"ACTIVE","EncryptionType":"NONE","StreamCreationTimestamp":1751275.231,"StreamName":"testtopic"}} 200 {"StreamDescriptionSummary":{"RetentionPeriodHours":24,"OpenShardCount":5,"StreamArn":"testtopic","ConsumerCount":0,"KeyId":"","StreamStatus":"ACTIVE","EncryptionType":"NONE","StreamCreationTimestamp":1751275.231,"StreamName":"testtopic"}} 2025-06-30T09:20:31.532636Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:34688) incoming connection opened 2025-06-30T09:20:31.532661Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:34688) -> (POST /Root, 30 bytes) 2025-06-30T09:20:31.532725Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [d8d4:b3fa:4151:0:c0d4:b3fa:4151:0] request [DescribeStream] url [/Root] database [/Root] requestId: c63cbc55-38c1533b-19f8b779-ddc68ee8 2025-06-30T09:20:31.532801Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [DescribeStream] requestId [c63cbc55-38c1533b-19f8b779-ddc68ee8] got new request from [d8d4:b3fa:4151:0:c0d4:b3fa:4151:0] database '/Root' stream 'testtopic' 2025-06-30T09:20:31.532904Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [DescribeStream] requestId [c63cbc55-38c1533b-19f8b779-ddc68ee8] [auth] Authorized successfully 2025-06-30T09:20:31.532925Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [DescribeStream] requestId [c63cbc55-38c1533b-19f8b779-ddc68ee8] sending grpc request to '' database: '/Root' iam token size: 0 2025-06-30T09:20:31.533143Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037907] server connected, pipe [8:7521669846222804113:2491], now have 1 active actors on pipe 2025-06-30T09:20:31.533150Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037910] server connected, pipe [8:7521669846222804116:2494], now have 1 active actors on pipe 2025-06-30T09:20:31.533164Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037911] server connected, pipe [8:7521669846222804117:2495], now have 1 active actors on pipe 2025-06-30T09:20:31.533178Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037908] server connected, pipe [8:7521669846222804114:2492], now have 1 active actors on pipe 2025-06-30T09:20:31.533193Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037909] server connected, pipe [8:7521669846222804115:2493], now have 1 active actors on pipe 2025-06-30T09:20:31.533427Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DescribeStream] requestId [c63cbc55-38c1533b-19f8b779-ddc68ee8] reply ok 2025-06-30T09:20:31.533520Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037910] server disconnected, pipe [8:7521669846222804116:2494] destroyed 2025-06-30T09:20:31.533520Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037907] server disconnected, pipe [8:7521669846222804113:2491] destroyed 2025-06-30T09:20:31.533524Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037908] server disconnected, pipe [8:7521669846222804114:2492] destroyed 2025-06-30T09:20:31.533528Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037909] server disconnected, pipe [8:7521669846222804115:2493] destroyed 2025-06-30T09:20:31.533531Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037911] server disconnected, pipe [8:7521669846222804117:2495] destroyed 2025-06-30T09:20:31.533538Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:34688) <- (200 , 1672 bytes) Http output full {"StreamDescription":{"RetentionPeriodHours":24,"WriteQuotaKbPerSec":1024,"StreamModeDetails":{"StreamMode":"ON_DEMAND"},"StreamArn":"testtopic","PartitioningSettings":{"MinActivePartitions":5,"AutoPartitioningSettings":{"Strategy":"AUTO_PARTITIONING_STRATEGY_DISABLED","PartitionWriteSpeed":{"StabilizationWindow":{"Nanos":0,"Seconds":300},"DownUtilizationPercent":30,"UpUtilizationPercent":90}},"MaxActivePartitions":5},"Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}},{"ShardId":"shard-000002","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"204169420152563078078024764459060926872","StartingHashKey":"136112946768375385385349842972707284582"}},{"ShardId":"shard-000003","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"272225893536750770770699685945414569163","StartingHashKey":"204169420152563078078024764459060926873"}},{"ShardId":"shard-000004","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"340282366920938463463374607431768211455","StartingHashKey":"272225893536750770770699685945414569164"}}],"KeyId":"","Owner":"Service1_id@as","StreamStatus":"ACTIVE","HasMoreShards":false,"EncryptionType":"ENCRYPTION_UNDEFINED","StreamCreationTimestamp":1751275232,"StorageLimitMb":0,"StreamName":"testtopic"}} 2025-06-30T09:20:31.533573Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:34688) connection closed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:29.317259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:29.317290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:29.317297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:29.317302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:29.317317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:29.317322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:29.317333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:29.317349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:29.317482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:29.317569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:29.331056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:29.331087Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:29.333956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:29.334010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:29.334048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:29.336119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:29.336198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:29.336323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:29.336391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:29.337035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:29.337074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:29.337363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:29.337373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:29.337398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:29.337408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:29.337414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:29.337433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:29.338858Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:29.358757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:29.358842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:29.358911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:29.358920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:29.358971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:29.358987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:29.359897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:29.359948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:29.359998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:29.360010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:29.360017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:29.360023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:29.360547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:29.360562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:29.360573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:29.361043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:29.361057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:29.361064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:29.361071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:29.361949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:29.362714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:29.362821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:29.363067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:29.363104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:29.363127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:29.363207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:29.363218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:29.363251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:29.363263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:29.364148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:29.364174Z node 1 :FLAT_TX_SCHEMESHARD ... Id: 2] 2025-06-30T09:20:31.662985Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 128 -> 134 2025-06-30T09:20:31.663232Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:20:31.663335Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:20:31.663545Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:20:31.663552Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:104: TDropExtSubdomain TDeleteExternalShards, operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:31.663570Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 134 -> 135 2025-06-30T09:20:31.663586Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:31.663592Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:20:31.663880Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:31.663887Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:31.663909Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:20:31.663927Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:31.663931Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-30T09:20:31.663934Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-30T09:20:31.663973Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:20:31.663977Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:400: [72057594046678944] TDeleteParts opId# 102:0 ProgressState 2025-06-30T09:20:31.663981Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 135 -> 240 2025-06-30T09:20:31.664077Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:20:31.664087Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:20:31.664091Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:20:31.664099Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-30T09:20:31.664104Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:20:31.664184Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:20:31.664191Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:20:31.664194Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:20:31.664196Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-30T09:20:31.664199Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:20:31.664205Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-30T09:20:31.664796Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:20:31.664805Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:20:31.664814Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:20:31.664818Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:20:31.664824Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:20:31.664828Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:20:31.664833Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:20:31.664839Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:20:31.664845Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:20:31.664849Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:20:31.664858Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:20:31.664897Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:20:31.664901Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:20:31.664913Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:20:31.664944Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:20:31.664948Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:20:31.664954Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:31.664986Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:20:31.665192Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:20:31.665568Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:20:31.665579Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-30T09:20:31.665616Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:20:31.665622Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-30T09:20:31.665680Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:20:31.665691Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:20:31.665694Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [7:343:2332] TestWaitNotification: OK eventTxId 102 2025-06-30T09:20:31.665749Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:20:31.665775Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 36us result status StatusPathDoesNotExist 2025-06-30T09:20:31.665803Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TGRpcCmsTest::DescribeOptionsTest [GOOD] |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TBlobStorageStoragePoolMonTest::SizeClassCalcTest [GOOD] |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::DescribeOptionsTest [GOOD] Test command err: 2025-06-30T09:20:31.895476Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669847311643149:2154];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:31.895498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004899/r3tmp/tmp12qlQy/pdisk_1.dat 2025-06-30T09:20:31.977215Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:31.993073Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 21490, node 1 2025-06-30T09:20:31.995255Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:31.995290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:31.996826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:31.998958Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:31.998972Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:31.998975Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:31.999036Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25059 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:32.040523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:25059 2025-06-30T09:20:32.063860Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:131: TTxProcessor(tenants) is now locking 2025-06-30T09:20:32.063874Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:143: TTxProcessor(tenants) is now locked by parent 2025-06-30T09:20:32.065799Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:102: TTxProcessor(tenants) is now active 2025-06-30T09:20:32.074304Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285140, Sender [1:7521669851606611045:2275], Recipient [1:7521669847311643464:2197]: NKikimr::NConsole::TEvConsole::TEvDescribeTenantOptionsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:43546" } 2025-06-30T09:20:32.074324Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:964: StateWork, processing event TEvConsole::TEvDescribeTenantOptionsRequest 2025-06-30T09:20:32.074724Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3335: Send TEvConsole::TEvDescribeTenantOptionsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.DescribeDatabaseOptionsResult] { storage_units { kind: "hdd2" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "hdd" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "hdd1" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "ssd" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "test" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } availability_zones { name: "dc-1" labels { key: "collocation" value: "disabled" } labels { key: "fixed_data_center" value: "DC-1" } } availability_zones { name: "any" labels { key: "any_data_center" value: "true" } labels { key: "collocation" value: "disabled" } } computational_units { kind: "slot" labels { key: "slot_type" value: "default" } labels { key: "type" value: "dynamic_slot" } allowed_availability_zones: "any" allowed_availability_zones: "dc-1" } } } } } >> TExportToS3Tests::CancelUponTransferringMultiShardTableShouldSucceed [GOOD] |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TestYmqHttpProxy::TestSendMessageBatch [GOOD] >> TExportToS3Tests::CancelUponTransferringSingleTableShouldSucceed [GOOD] >> TExportToS3Tests::CancelUponTransferringManyTablesShouldSucceed >> THiveTest::TestBridgeDisconnect [GOOD] >> THiveTest::TestBridgeDisconnectWithReboots >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-true [GOOD] |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch [GOOD] >> TestKinesisHttpProxy::TestWrongRequest [GOOD] >> TestYmqHttpProxy::TestListDeadLetterSourceQueues [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:28.171513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:28.171536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:28.171540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:28.171544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:28.171556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:28.171559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:28.171566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:28.171578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:28.171670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:28.171732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:28.184698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:28.184726Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:28.188396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:28.188466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:28.188517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:28.190513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:28.190587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:28.190703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.190809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:28.191712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:28.191763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:28.192100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:28.192115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:28.192142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:28.192152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:28.192158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:28.192184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.193836Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:28.209195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:28.209271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.209324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:28.209330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:28.209364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:28.209376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:28.210113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.210147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:28.210192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.210200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:28.210204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:28.210208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:28.210520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.210528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:28.210532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:28.210804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.210812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:28.210817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.210823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:28.211325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:28.211744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:28.211786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:28.211939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:28.211960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:28.211977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.212026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:28.212031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:28.212058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:28.212070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:28.212435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:28.212442Z node 1 :FLAT_TX_SCHEMESHARD ... HARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 116, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-30T09:20:33.153013Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 116, path id: [OwnerId: 72075186233409546, LocalPathId: 9] 2025-06-30T09:20:33.153034Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-30T09:20:33.153041Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:730:2630], at schemeshard: 72075186233409546, txId: 116, path id: 1 2025-06-30T09:20:33.153048Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:730:2630], at schemeshard: 72075186233409546, txId: 116, path id: 9 2025-06-30T09:20:33.153243Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 116:0, at schemeshard: 72075186233409546 2025-06-30T09:20:33.153279Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 116:0 ProgressState, operation type: TxCreateTable, at tablet# 72075186233409546 2025-06-30T09:20:33.153342Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 116:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72075186233409546 OwnerIdx: 11 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 9 BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-30T09:20:33.153514Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 1 Version: 16 PathOwnerId: 72075186233409546, cookie: 116 2025-06-30T09:20:33.153532Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 1 Version: 16 PathOwnerId: 72075186233409546, cookie: 116 2025-06-30T09:20:33.153538Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 116 2025-06-30T09:20:33.153545Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 116, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 16 2025-06-30T09:20:33.153552Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 13 2025-06-30T09:20:33.153871Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 9 Version: 1 PathOwnerId: 72075186233409546, cookie: 116 2025-06-30T09:20:33.153889Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 9 Version: 1 PathOwnerId: 72075186233409546, cookie: 116 2025-06-30T09:20:33.153894Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 116 2025-06-30T09:20:33.153900Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 116, pathId: [OwnerId: 72075186233409546, LocalPathId: 9], version: 1 2025-06-30T09:20:33.153906Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 9] was 4 2025-06-30T09:20:33.153924Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 116, ready parts: 0/1, is published: true 2025-06-30T09:20:33.154669Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 116:0 from tablet: 72075186233409546 to tablet: 72057594037968897 cookie: 72075186233409546:11 msg type: 268697601 2025-06-30T09:20:33.154711Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 116, partId: 0, tablet: 72057594037968897 2025-06-30T09:20:33.154719Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1790: TOperation RegisterRelationByShardIdx, TxId: 116, shardIdx: 72075186233409546:11, partId: 0 2025-06-30T09:20:33.154841Z node 7 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72075186233409546 OwnerIdx: 11 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 9 BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-30T09:20:33.154917Z node 7 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72075186233409546, OwnerIdx 11, type DataShard, boot OK, tablet id 72075186233409556 2025-06-30T09:20:33.154946Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5958: Handle TEvCreateTabletReply at schemeshard: 72075186233409546 message: Status: OK Owner: 72075186233409546 OwnerIdx: 11 TabletID: 72075186233409556 Origin: 72057594037968897 2025-06-30T09:20:33.154953Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1804: TOperation FindRelatedPartByShardIdx, TxId: 116, shardIdx: 72075186233409546:11, partId: 0 2025-06-30T09:20:33.154973Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 116:0, at schemeshard: 72075186233409546, message: Status: OK Owner: 72075186233409546 OwnerIdx: 11 TabletID: 72075186233409556 Origin: 72057594037968897 2025-06-30T09:20:33.154983Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 116:0 HandleReply TEvCreateTabletReply, at tabletId: 72075186233409546 2025-06-30T09:20:33.154990Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:180: TCreateParts opId# 116:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72075186233409546 OwnerIdx: 11 TabletID: 72075186233409556 Origin: 72057594037968897 2025-06-30T09:20:33.155017Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 116:0 2 -> 3 2025-06-30T09:20:33.155552Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 116 2025-06-30T09:20:33.155809Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 116 2025-06-30T09:20:33.160101Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 116:0, at schemeshard: 72075186233409546 2025-06-30T09:20:33.160201Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 116:0, at schemeshard: 72075186233409546 2025-06-30T09:20:33.160216Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_table.cpp:197: TCreateTable TConfigureParts operationId# 116:0 ProgressState at tabletId# 72075186233409546 2025-06-30T09:20:33.160238Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:217: TCreateTable TConfigureParts operationId# 116:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409556 seqNo: 3:8 2025-06-30T09:20:33.160364Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:233: TCreateTable TConfigureParts operationId# 116:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409556 message: TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 677 RawX2: 30064773660 } TxBody: "\n\236\004\n\007Table11\020\t\032\r\n\003key\030\002 \001(\000@\000\032\020\n\005Value\030\200$ \002(\000@\000(\001:\262\003\022\253\003\010\200\200\200\002\020\254\002\030\364\003 \200\200\200\010(\0000\200\200\200 8\200\200\200\010@\2008H\000RX\010\000\020\000\030\010 \010(\200\200\200@0\377\377\377\377\0178\001B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen1P\nX\200\200\001`nh\000p\000Rb\010\001\020\200\200\200\024\030\005 \020(\200\200\200\200\0020\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen2P\nX\200\200\001`nh\200\200\200\004p\200\200\200\004Rc\010\002\020\200\200\200\310\001\030\005 \020(\200\200\200\200@0\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen3P\nX\200\200\001`nh\200\200\200(p\200\200\200(X\001`\005j$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionr\017compaction_gen0z\017compaction_gen0\202\001\004scan\210\001\200\200\200\010\220\001\364\003\230\0012\270\001\2008\300\001\006R\002\020\001J\026/MyRoot/USER_0/Table11\242\001\006\001\000\000\000\000\200\252\001\000\260\001\001\270\001\000\210\002\001\222\002\013\t\n\000\220\000\000\020\000\001\020\t:\004\010\003\020\010" TxId: 116 ExecLevel: 0 Flags: 0 SchemeShardId: 72075186233409546 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } SubDomainPathId: 1 2025-06-30T09:20:33.161965Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 116:0 from tablet: 72075186233409546 to tablet: 72075186233409556 cookie: 72075186233409546:11 msg type: 269549568 2025-06-30T09:20:33.162046Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 116, partId: 0, tablet: 72075186233409556 TestModificationResult got TxId: 116, wait until txId: 116 TestModificationResults wait txId: 117 2025-06-30T09:20:33.168360Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table12" Columns { Name: "key" Type: "Uint32" } Columns { Name: "Value" Type: "Utf8" } KeyColumnNames: "key" } } TxId: 117 TabletId: 72075186233409546 , at schemeshard: 72075186233409546 2025-06-30T09:20:33.169071Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 117, response: Status: StatusQuotaExceeded Reason: "Request exceeded a limit on the number of schema operations, try again later." TxId: 117 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:20:33.169131Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 117, database: /MyRoot/USER_0, subject: , status: StatusQuotaExceeded, reason: Request exceeded a limit on the number of schema operations, try again later., operation: CREATE TABLE, path: /MyRoot/USER_0/Table12 TestModificationResult got TxId: 117, wait until txId: 117 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> CommitOffset::Commit_WithSession_ToPastParentPartition [GOOD] Test command err: 2025-06-30T09:18:52.839080Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669421871078329:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:52.841051Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:52.917464Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004035/r3tmp/tmpUhDK89/pdisk_1.dat 2025-06-30T09:18:52.955373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:52.955408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:52.960454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:52.971329Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12844, node 1 2025-06-30T09:18:52.981397Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004035/r3tmp/yandexe2axHo.tmp 2025-06-30T09:18:52.981408Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004035/r3tmp/yandexe2axHo.tmp 2025-06-30T09:18:52.981476Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004035/r3tmp/yandexe2axHo.tmp 2025-06-30T09:18:52.981517Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:52.989458Z INFO: TTestServer started on Port 29529 GrpcPort 12844 TClient is connected to server localhost:29529 PQClient connected to localhost:12844 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:53.065544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:18:53.074320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:18:53.090434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:18:53.212435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:53.521298Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669426166046332:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.521337Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.521749Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669426166046359:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.522887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:53.527163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-30T09:18:53.527373Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669426166046361:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:18:53.582361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.596795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.616588Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669426166046573:2530] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:53.634627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.640086Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669426166046583:2323], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:53.640285Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NmM2ZTM4NjUtZTI5YWZmZC03NDNkZWZiLTk5YTk3ZTM5, ActorId: [1:7521669426166046329:2296], ActorState: ExecuteState, TraceId: 01jz023pje2x5ehk86krdn3j51, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:53.640833Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669426166046718:2611] 2025-06-30T09:18:53.841189Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:18:57.839306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669421871078329:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:57.839338Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:18:58.900776Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-30T09:18:58.915529Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:18:58.916075Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521669447640883390:2684], Recipient [1:7521669421871078645:2177]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:58.916083Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:58.916087Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:18:58.916103Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521669447640883386:2681], Recipient [1:7521669421871078645:2177]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-30T09:18:58.916106Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:18:58.927338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { Par ... [72075186224037893][test-topic] pipe [7:7521669843061071407:3032] client test-consumer disconnected session test-consumer_7_1_5241401567894132557_v1 2025-06-30T09:20:31.985399Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669825881201047:2654], Partition 1, Sender [0:0:0], Recipient [7:7521669825881201122:2661], Cookie: 0 2025-06-30T09:20:31.985424Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669825881201122:2661]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:31.985430Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:31.985449Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:31.985476Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:31.985479Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:31.985487Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:31.985503Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669825881201044:2653], Partition 2, Sender [0:0:0], Recipient [7:7521669825881201120:2659], Cookie: 0 2025-06-30T09:20:31.985519Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669825881201120:2659]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:31.985521Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:31.985526Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:31.985532Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:31.985534Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:31.985537Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:31.994108Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669817291265561:2416], Partition 0, Sender [0:0:0], Recipient [7:7521669817291265618:2419], Cookie: 0 2025-06-30T09:20:31.994137Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669817291265618:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:31.994143Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:31.994160Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:31.994192Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:31.994195Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:31.994203Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:32.047347Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669838766103454:2800], Partition 3, Sender [0:0:0], Recipient [7:7521669838766103525:2808], Cookie: 0 2025-06-30T09:20:32.047378Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669838766103525:2808]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:32.047384Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:32.047398Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:32.047425Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:32.047427Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:32.047433Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:32.047791Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669838766103438:2798], Partition 4, Sender [0:0:0], Recipient [7:7521669838766103522:2805], Cookie: 0 2025-06-30T09:20:32.047799Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669838766103522:2805]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:32.047801Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:32.047804Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:32.047810Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:32.047812Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:32.047814Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:32.085739Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669825881201047:2654], Partition 1, Sender [0:0:0], Recipient [7:7521669825881201122:2661], Cookie: 0 2025-06-30T09:20:32.085754Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669825881201044:2653], Partition 2, Sender [0:0:0], Recipient [7:7521669825881201120:2659], Cookie: 0 2025-06-30T09:20:32.085765Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669825881201122:2661]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:32.085768Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669825881201120:2659]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:32.085770Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:32.085774Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:32.085784Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:32.085788Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:32.085817Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:32.085819Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:32.085819Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:32.085822Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:32.085825Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:32.085828Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:32.094437Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7521669817291265561:2416], Partition 0, Sender [0:0:0], Recipient [7:7521669817291265618:2419], Cookie: 0 2025-06-30T09:20:32.094462Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7521669817291265618:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:32.094467Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:32.094480Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:32.094504Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:32.094506Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:32.094516Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:32.133901Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [7:7521669838766103438:2798], Partition 4, Sender [7:7521669838766103537:2811], Recipient [7:7521669838766103522:2805], Cookie: 0 2025-06-30T09:20:32.133914Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [7:7521669838766103454:2800], Partition 3, Sender [7:7521669838766103544:2814], Recipient [7:7521669838766103525:2808], Cookie: 0 2025-06-30T09:20:32.133925Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188544, Sender [7:7521669838766103537:2811], Recipient [7:7521669838766103522:2805]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-30T09:20:32.133927Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188544, Sender [7:7521669838766103544:2814], Recipient [7:7521669838766103525:2808]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-30T09:20:32.133931Z node 7 :PERSQUEUE TRACE: partition.h:630: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-30T09:20:32.133933Z node 7 :PERSQUEUE TRACE: partition.h:630: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestSendMessageBatch [GOOD] Test command err: 2025-06-30T09:20:08.664486Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669748157776793:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:08.665221Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004ac6/r3tmp/tmpLAE3CV/pdisk_1.dat 2025-06-30T09:20:08.856280Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669748157776663:2080] 1751275208659123 != 1751275208659126 2025-06-30T09:20:08.877503Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:08.878267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:08.878287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 1888, node 1 2025-06-30T09:20:08.907062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:08.927102Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:08.927114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:08.927117Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:08.927167Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28814 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:08.987825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:08.999266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 TClient is connected to server localhost:28814 waiting... 2025-06-30T09:20:09.055311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-30T09:20:09.059380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:20:09.060108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-30T09:20:09.073798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.107122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:09.129540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:09.155840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.213976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.225743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.242204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.266942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.290936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.307735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.643760Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669752452745314:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:09.643786Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669752452745325:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:09.643796Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:09.644987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:09.648194Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669752452745328:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715673 completed, doublechecking } 2025-06-30T09:20:09.663040Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:09.703243Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669752452745388:2860] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:09.768715Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jz0260xaf946kbq8r0qnyzj2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDMxYWFmZjQtNWRmNjYyZjgtYjQ3ZTBhOWEtYWRhNmU2ZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:20:09.789101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 720575940466444 ... 8062135ae945eca30bf" MessageId: "71e13a1f-6207671b-94fddea0-17d3b9f2" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "c7110b40-c1e03a7d-a3265384-2816e4dc" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true } 2025-06-30T09:20:33.160871Z node 7 :SQS TRACE: service.cpp:1483: Dec local leader ref for actor [7:7521669852272222952:3494]. Found: 1 2025-06-30T09:20:33.160894Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse SendMessageBatch { RequestId: "c7110b40-c1e03a7d-a3265384-2816e4dc" Entries { MD5OfMessageAttributes: "3d778967e1fa431d626ffb890c486385" MD5OfMessageBody: "94a29778a1f1f41bf68142847b2e6106" MessageId: "b592ceb1-cc080408-66d009da-4d7c85e9" SequenceNumber: 1 Id: "Id-0" } Entries { MD5OfMessageBody: "3bf7e6d806a0b8062135ae945eca30bf" MessageId: "71e13a1f-6207671b-94fddea0-17d3b9f2" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "c7110b40-c1e03a7d-a3265384-2816e4dc" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true 2025-06-30T09:20:33.160934Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7521669852272222948:2457]: SendMessageBatch { RequestId: "c7110b40-c1e03a7d-a3265384-2816e4dc" Entries { MD5OfMessageAttributes: "3d778967e1fa431d626ffb890c486385" MD5OfMessageBody: "94a29778a1f1f41bf68142847b2e6106" MessageId: "b592ceb1-cc080408-66d009da-4d7c85e9" SequenceNumber: 1 Id: "Id-0" } Entries { MD5OfMessageBody: "3bf7e6d806a0b8062135ae945eca30bf" MessageId: "71e13a1f-6207671b-94fddea0-17d3b9f2" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "c7110b40-c1e03a7d-a3265384-2816e4dc" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true 2025-06-30T09:20:33.161049Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [c7110b40-c1e03a7d-a3265384-2816e4dc] HandleResponse: { SendMessageBatch { RequestId: "c7110b40-c1e03a7d-a3265384-2816e4dc" Entries { MD5OfMessageAttributes: "3d778967e1fa431d626ffb890c486385" MD5OfMessageBody: "94a29778a1f1f41bf68142847b2e6106" MessageId: "b592ceb1-cc080408-66d009da-4d7c85e9" SequenceNumber: 1 Id: "Id-0" } Entries { MD5OfMessageBody: "3bf7e6d806a0b8062135ae945eca30bf" MessageId: "71e13a1f-6207671b-94fddea0-17d3b9f2" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "c7110b40-c1e03a7d-a3265384-2816e4dc" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true }, status: OK 2025-06-30T09:20:33.161078Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [c7110b40-c1e03a7d-a3265384-2816e4dc] Sending reply from proxy actor: { SendMessageBatch { RequestId: "c7110b40-c1e03a7d-a3265384-2816e4dc" Entries { MD5OfMessageAttributes: "3d778967e1fa431d626ffb890c486385" MD5OfMessageBody: "94a29778a1f1f41bf68142847b2e6106" MessageId: "b592ceb1-cc080408-66d009da-4d7c85e9" SequenceNumber: 1 Id: "Id-0" } Entries { MD5OfMessageBody: "3bf7e6d806a0b8062135ae945eca30bf" MessageId: "71e13a1f-6207671b-94fddea0-17d3b9f2" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "c7110b40-c1e03a7d-a3265384-2816e4dc" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true } 2025-06-30T09:20:33.161172Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [SendMessageBatch] requestId [c7110b40-c1e03a7d-a3265384-2816e4dc] Got succesfult GRPC response. 2025-06-30T09:20:33.161232Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [SendMessageBatch] requestId [c7110b40-c1e03a7d-a3265384-2816e4dc] reply ok 2025-06-30T09:20:33.161283Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [SendMessageBatch] requestId [c7110b40-c1e03a7d-a3265384-2816e4dc] Send metering event. HttpStatusCode: 200 IsFifo: 1 FolderId: folder4 RequestSizeInBytes: 1063 ResponseSizeInBytes: 644 SourceAddress: 18fa:5b7a:a330:0:fa:5b7a:a330:0 ResourceId: 000000000000000101v0 Action: SendMessageBatch 2025-06-30T09:20:33.161332Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:47632) <- (200 , 465 bytes) 2025-06-30T09:20:33.161453Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:47632) connection closed Http output full {"Successful":[{"SequenceNumber":"1","Id":"Id-0","MD5OfMessageBody":"94a29778a1f1f41bf68142847b2e6106","MD5OfMessageAttributes":"3d778967e1fa431d626ffb890c486385","MessageId":"b592ceb1-cc080408-66d009da-4d7c85e9"},{"SequenceNumber":"2","Id":"Id-1","MD5OfMessageBody":"3bf7e6d806a0b8062135ae945eca30bf","MessageId":"71e13a1f-6207671b-94fddea0-17d3b9f2"}],"Failed":[{"Message":"No MessageGroupId parameter.","Id":"Id-2","Code":"MissingParameter","SenderFault":true}]} 2025-06-30T09:20:33.161585Z node 7 :SQS TRACE: executor.cpp:256: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Compile program response: { Status: 48 MiniKQLCompileResults { CompiledProgram: "\037\016\nFlags\010Name\010Args\016Payload\022Parameter\014Offset\032SentTimestamp\006\002\206\202\t\211\004\202\203\005@\206\205\004\207\203\010\207\203\010\026\032$SetResult\000\003?\002\020messages\t\211\004?\016\205\004?\016\203\014\020List$Truncated\203\004\030Member\000\t\211\026?\026\203\005\004\200\205\004\203\004\203\004\026\032\213\010\203\010\203\010\203\010\203\010\213\010?$?&\203\010\203\010\203\004\203\010\203\010\203\004\206\203\014\203\014,SelectRange\000\003?\034 \000\001\205\000\000\000\000\001\032\000\000\000\000\000\000\000?\014\005?\"\003?\036\010\003? \006\003\013?,\t\351\000?$\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?R\003?T(QUEUE_ID_NUMBER_HASH\003\022\000\t\351\000?&\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?h\003?j\036QUEUE_ID_NUMBER\003\022\000\t\351\000?(\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?~\003?\200\022TIME_FROM\003\022\000\003?*\000\010\013?2?`?v\003?.\177\377\377\377\377\377\377\377\377\003?0\177\377\377\377\377\377\377\377\377\014\003?4\000\003?6\002\003?8\000\003?:\000\006\010?>\003\203\014\000\003\203\014\000\003\203\014\000\003\203\014\000\017\003?@\000\377\007\003?\030\000\002\001\000/" } } 2025-06-30T09:20:33.161599Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] compilation duration: 1ms 2025-06-30T09:20:33.161614Z node 7 :SQS DEBUG: queue_leader.cpp:464: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) has been prepared 2025-06-30T09:20:33.161617Z node 7 :SQS DEBUG: queue_leader.cpp:514: Request [] Executing compiled query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) 2025-06-30T09:20:33.161632Z node 7 :SQS DEBUG: executor.cpp:83: Request [] Starting executor actor for query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID). Mode: COMPILE_AND_EXEC 2025-06-30T09:20:33.161651Z node 7 :SQS TRACE: executor.cpp:154: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Serializing params: {"QUEUE_ID_NUMBER": 2, "QUEUE_ID_NUMBER_HASH": 17472595041006102391, "SHARD": 0, "QUEUE_ID_NUMBER_AND_SHARD_HASH": 12311263855443095412, "TIME_FROM": 0} 2025-06-30T09:20:33.161731Z node 7 :SQS TRACE: executor.cpp:203: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Execute program: { Transaction { MiniKQLTransaction { Mode: COMPILE_AND_EXEC Program { Bin: "\037\016\nFlags\010Name\010Args\016Payload\022Parameter\014Offset\032SentTimestamp\006\002\206\202\t\211\004\202\203\005@\206\205\004\207\203\010\207\203\010\026\032$SetResult\000\003?\002\020messages\t\211\004?\016\205\004?\016\203\014\020List$Truncated\203\004\030Member\000\t\211\026?\026\203\005\004\200\205\004\203\004\203\004\026\032\213\010\203\010\203\010\203\010\203\010\213\010?$?&\203\010\203\010\203\004\203\010\203\010\203\004\206\203\014\203\014,SelectRange\000\003?\034 \000\001\205\000\000\000\000\001\032\000\000\000\000\000\000\000?\014\005?\"\003?\036\010\003? \006\003\013?,\t\351\000?$\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?R\003?T(QUEUE_ID_NUMBER_HASH\003\022\000\t\351\000?&\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?h\003?j\036QUEUE_ID_NUMBER\003\022\000\t\351\000?(\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?~\003?\200\022TIME_FROM\003\022\000\003?*\000\010\013?2?`?v\003?.\177\377\377\377\377\377\377\377\377\003?0\177\377\377\377\377\377\377\377\377\014\003?4\000\003?6\002\003?8\000\003?:\000\006\010?>\003\203\014\000\003\203\014\000\003\203\014\000\003\203\014\000\017\003?@\000\377\007\003?\030\000\002\001\000/" } Params { Bin: "\037\000\005\205\n\203\010\203\010\203\010\203\004\203\010> TestYmqHttpProxy::TestListQueueTags >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Query [GOOD] >> TPartSlice::TrivialMerge [GOOD] >> TPartSlice::SupersetByRowId [GOOD] >> TPartSlice::Subtract [GOOD] >> TPartSlice::UnsplitBorrow [GOOD] >> TPartSliceLoader::RestoreMissingSlice ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::TestWrongRequest [GOOD] Test command err: 2025-06-30T09:20:08.846886Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669745281962436:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:08.857127Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004aa0/r3tmp/tmpyZVcKA/pdisk_1.dat 2025-06-30T09:20:09.019644Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:09.020699Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669745281962245:2080] 1751275208824761 != 1751275208824764 TServer::EnableGrpc on GrpcPort 27271, node 1 2025-06-30T09:20:09.075045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:09.075077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:09.078319Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:09.078333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:09.078336Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:09.078378Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:09.078903Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63208 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:09.138597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:09.151547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 TClient is connected to server localhost:63208 2025-06-30T09:20:09.203519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:20:09.210438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:20:09.215531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-30T09:20:09.218757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-30T09:20:09.220112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:09.312087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.341092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715663, at schemeshard: 72057594046644480 2025-06-30T09:20:09.342467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:09.367074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.387515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.411687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.434901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.458814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.487432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.501335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:09.640017Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669749576930892:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:09.640023Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669749576930904:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:09.640048Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:09.640978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:09.643414Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669749576930906:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715673 completed, doublechecking } 2025-06-30T09:20:09.734025Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669749576930958:2857] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:09.802704Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jz0260x76pcpwqtmjnfqta3z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2NjYThkNmItODZmYzJmYWEtNTI3MjViYWEtYTgyYzczYzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:20:09.815661Z node 1 :FLAT_TX ... l { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:32.367238Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-06-30T09:20:32.367250Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 29ms 2025-06-30T09:20:32.367344Z node 8 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:32.367346Z node 8 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 29ms 2025-06-30T09:20:32.367407Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:32.367410Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-06-30T09:20:32.367418Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 29ms 2025-06-30T09:20:32.367459Z node 8 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:32.367536Z node 8 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-30T09:20:32.369471Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7521669851079405006:2400]: Pool not found 2025-06-30T09:20:32.369690Z node 8 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-06-30T09:20:32.412910Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7521669851079405004:2399]: Pool not found 2025-06-30T09:20:32.413055Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-06-30T09:20:32.414335Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7521669851079405115:2416], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:32.414354Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [8:7521669851079405116:2417], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-06-30T09:20:32.414364Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:32.453312Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7521669851079405113:2415]: Pool not found 2025-06-30T09:20:32.453676Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:138: [cleanup removed queues] there are no queues to delete 2025-06-30T09:20:33.333247Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:50682) incoming connection opened 2025-06-30T09:20:33.333310Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:50682) -> (POST /, 87 bytes) 2025-06-30T09:20:33.333399Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [d84f:b1ff:4251:0:c04f:b1ff:4251:0] request [CreateStream] url [/] database [] requestId: 9bf04922-2160ffa4-68b22ce5-a2d81d7d 2025-06-30T09:20:33.333636Z node 8 :HTTP_PROXY WARN: http_req.cpp:948: http request [CreateStream] requestId [9bf04922-2160ffa4-68b22ce5-a2d81d7d] got new request with incorrect json from [d84f:b1ff:4251:0:c04f:b1ff:4251:0] database '' 2025-06-30T09:20:33.333693Z node 8 :HTTP_PROXY INFO: http_req.cpp:1211: http request [CreateStream] requestId [9bf04922-2160ffa4-68b22ce5-a2d81d7d] reply with status: BAD_REQUEST message: ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName 2025-06-30T09:20:33.333752Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:50682) <- (400 InvalidArgumentException, 135 bytes) 2025-06-30T09:20:33.333777Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:289: (#37,[::1]:50682) Request: POST / HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.CreateStream X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked { "ShardCount":5, "StreamName":"testtopic", "WrongStreamName":"WrongStreamName" } 2025-06-30T09:20:33.333791Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:50682) Response: HTTP/1.1 400 InvalidArgumentException Connection: close x-amzn-requestid: 9bf04922-2160ffa4-68b22ce5-a2d81d7d x-amz-crc32: 3053902336 Content-Type: application/x-amz-json-1.1 Content-Length: 135 2025-06-30T09:20:33.333918Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:50682) connection closed Http output full {"__type":"InvalidArgumentException","message":"ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName"} 400 {"__type":"InvalidArgumentException","message":"ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName"} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch [GOOD] Test command err: 2025-06-30T09:20:05.278369Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669733861536333:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:05.278483Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004ae2/r3tmp/tmptQXYPa/pdisk_1.dat 2025-06-30T09:20:05.388528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:05.388580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:05.389658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:05.395361Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:05.398848Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669733861536222:2080] 1751275205276950 != 1751275205276953 TServer::EnableGrpc on GrpcPort 23138, node 1 2025-06-30T09:20:05.411364Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:05.411380Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:05.411382Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:05.411436Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65249 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:05.447466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:65249 2025-06-30T09:20:05.472736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:20:05.474825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-30T09:20:05.475429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-30T09:20:05.486260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.552305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:05.577130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:05.601010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:05.614560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.626396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.640288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.699894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.718728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.730684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.768644Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669733861537578:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.768675Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.768911Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669733861537590:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.770015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:05.774183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710673, at schemeshard: 72057594046644480 2025-06-30T09:20:05.774282Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669733861537592:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-30T09:20:05.846856Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669733861537643:2857] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:05.904952Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jz025x472gndane427zf4nc6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWM0ZjZiYjktYzAzMDNkODYtYjVmNjA5ZmQtMjQzMDhhZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root waiting... 2025-06-30T09:20:05.928970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cp ... ITY_ID). Mode: COMPILE_AND_EXEC 2025-06-30T09:20:33.329988Z node 7 :SQS TRACE: executor.cpp:154: Request [3c9ae688-778acc05-ee935555-60058c69] Query(idx=CHANGE_VISIBILITY_ID) Queue [cloud4/000000000000000101v0] Serializing params: {"QUEUE_ID_NUMBER": 2, "QUEUE_ID_NUMBER_HASH": 17472595041006102391, "SHARD": 3, "QUEUE_ID_NUMBER_AND_SHARD_HASH": 1902612799742245372, "NOW": 1751275233329, "GROUPS_READ_ATTEMPT_IDS_PERIOD": 300000, "KEYS": [{"LockTimestamp": 1751275233287, "Offset": 1, "NewVisibilityDeadline": 1751275234329}, {"LockTimestamp": 1751275233305, "Offset": 2, "NewVisibilityDeadline": 1751275235329}]} 2025-06-30T09:20:33.330120Z node 7 :SQS TRACE: executor.cpp:203: Request [3c9ae688-778acc05-ee935555-60058c69] Query(idx=CHANGE_VISIBILITY_ID) Queue [cloud4/000000000000000101v0] Execute program: { Transaction { MiniKQLTransaction { Mode: COMPILE_AND_EXEC Program { Bin: "O\034\014Exists*NewVisibilityDeadline\014Offset\006Arg\014Member\nFlags\010Name\010Args\016Payload\022Parameter\006And\032LockTimestamp$VisibilityDeadline\014Invoke\t\211\004\206\202?\000\206\202\030Extend\000\006\002?\000\t\211\004\202\203\005@\206\205\n\203\014\207\203\010\203\014\203\010?\020(ChangeConddCurrentVisibilityDeadline\002\006\n$SetResult\000\003?\006\014result\t\211\006?\024\206\205\006?\020?\020?\020.\006\n?\032?\0220MapParameter\000\t\351\000?\034\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?&\003?(\010KEYS\003&\000\t\251\000?\032\016\000\005?\022\t\211\004?\010\207\203\014?\010 Coalesce\000\t\211\004?<\207\203\014\207\203\014*\000\t\211\006?B\203\005@\203\010?\0146\000\003?J\026LessOrEqual\t\351\000?L\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?X\003?Z\006NOW\003&\000\t\211\004?\014\207\205\004\207\203\010?\014.2\203\004\022\000\t\211\n?n\203\005\004\200\205\004\203\004\203\004.2\213\010\203\010\203\010\203\004?\020\203\004$SelectRow\000\003?t \000\001\205\000\000\000\000\001\030\000\000\000\000\000\000\000?l\005?z\003?v\020\003?x\026\003\013?\202\t\351\000?|\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?\226\003?\230> TPartSliceLoader::RestoreMissingSlice [GOOD] >> TPartSliceLoader::RestoreOneSlice [GOOD] >> TPartSliceLoader::RestoreMissingSliceFullScreen [GOOD] >> TPartSliceLoader::RestoreFromScreenIndexKeys [GOOD] >> TPartSliceLoader::RestoreFromScreenDataKeys [GOOD] >> TRowVersionRangesTest::SimpleInserts [GOOD] >> TRowVersionRangesTest::MergeFailLeft [GOOD] >> TRowVersionRangesTest::MergeFailRight [GOOD] >> TRowVersionRangesTest::MergeFailOuter [GOOD] >> TRowVersionRangesTest::MergeFailInner [GOOD] >> TRowVersionRangesTest::MergeExtendLeft [GOOD] >> TRowVersionRangesTest::MergeExtendLeftInner [GOOD] >> TRowVersionRangesTest::MergeExtendLeftComplete [GOOD] >> TRowVersionRangesTest::MergeExtendRight [GOOD] >> TRowVersionRangesTest::MergeExtendRightInner [GOOD] >> TRowVersionRangesTest::MergeExtendRightComplete [GOOD] >> TRowVersionRangesTest::MergeExtendBoth [GOOD] >> TRowVersionRangesTest::MergeHoleExact [GOOD] >> TRowVersionRangesTest::MergeHoleInner [GOOD] >> TRowVersionRangesTest::MergeHoleOuter [GOOD] >> TRowVersionRangesTest::MergeAllOuter [GOOD] >> TRowVersionRangesTest::MergeAllInner [GOOD] >> TRowVersionRangesTest::MergeAllEdges [GOOD] >> TRowVersionRangesTest::ContainsEmpty [GOOD] >> TRowVersionRangesTest::ContainsNonEmpty [GOOD] >> TRowVersionRangesTest::ContainsInvalid [GOOD] >> TRowVersionRangesTest::AdjustDown [GOOD] >> TRowVersionRangesTest::AdjustDownSnapshot [GOOD] >> TRowVersionRangesTest::SteppedCookieAllocatorOrder [GOOD] >> TRowVersionRangesTest::SteppedCookieAllocatorLowerBound [GOOD] >> TS3FIFOCache::Touch [GOOD] >> TS3FIFOCache::Touch_MainQueue [GOOD] >> TS3FIFOCache::EvictNext [GOOD] >> TS3FIFOCache::UpdateLimit [GOOD] >> TS3FIFOCache::Erase [GOOD] >> TS3FIFOCache::Random [GOOD] >> TS3FIFOGhostQueue::Basics [GOOD] >> TScheme::Shapshot [GOOD] >> TScheme::Delta [GOOD] >> TScheme::Policy [GOOD] >> TScreen::Cuts [GOOD] >> TScreen::Join [GOOD] >> TScreen::Sequential >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionEnqueue >> TBlobStorageStoragePoolMonTest::ReducedSizeClassCalcTest [GOOD] >> TScreen::Sequential [GOOD] >> TScreen::Random >> Bloom::Conf [GOOD] >> Bloom::Hashes [GOOD] >> Bloom::Rater [GOOD] >> Bloom::Dipping >> GroupReconfiguration::ReassignsDoNotCauseErrorMessagesMirror3dc [GOOD] >> GroupReconfiguration::ReassignsDoNotCauseErrorMessagesMirror3of4 >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionEnqueue [GOOD] >> TFlatTableExecutor_ExecutorTxLimit::TestExecutorTxLimit [GOOD] >> TFlatTableExecutor_Follower::BasicFollowerRead [GOOD] >> TFlatTableExecutor_Follower::FollowerEarlyRebootHoles [GOOD] >> TFlatTableExecutor_Follower::FollowerAttachOnTxQueueScanSnapshot [GOOD] >> TFlatTableExecutor_Follower::FollowerAttachAfterLoan >> TestKinesisHttpProxy::ListShardsToken [GOOD] >> TScreen::Random [GOOD] >> TScreen::Shrink [GOOD] >> TScreen::Cook [GOOD] >> TSharedPageCache::Limits >> Bloom::Dipping [GOOD] >> Bloom::Basics [GOOD] >> Bloom::Stairs >> TBtreeIndexBuilder::NoNodes [GOOD] >> TBtreeIndexBuilder::OneNode [GOOD] >> TBtreeIndexBuilder::FewNodes [GOOD] >> TBtreeIndexBuilder::SplitBySize [GOOD] >> TBtreeIndexNode::TIsNullBitmap [GOOD] >> TBtreeIndexNode::CompareTo [GOOD] >> TBtreeIndexNode::Basics [GOOD] >> TBtreeIndexNode::Group [GOOD] >> TBtreeIndexNode::History [GOOD] >> TBtreeIndexNode::OneKey [GOOD] >> TBtreeIndexNode::Reusable [GOOD] >> TBtreeIndexNode::CutKeys [GOOD] >> TBtreeIndexTPart::Conf [GOOD] >> TBtreeIndexTPart::NoNodes [GOOD] >> TBtreeIndexTPart::OneNode [GOOD] >> TBtreeIndexTPart::FewNodes [GOOD] >> TBtreeIndexTPart::Erases [GOOD] >> TBtreeIndexTPart::Groups [GOOD] >> TBtreeIndexTPart::History >> TFlatTableExecutor_Follower::FollowerAttachAfterLoan [GOOD] >> TFlatTableExecutor_Gc::TestFailedGcAfterReboot [GOOD] >> TFlatTableExecutor_IndexLoading::CalculateReadSize_FlatIndex >> LocalPartition::WithoutPartitionWithRestart [GOOD] >> LocalPartition::WithoutPartitionUnknownEndpoint >> TPart::State [GOOD] >> TPart::Trivials [GOOD] >> TPart::Basics [GOOD] >> TPart::CellDefaults [GOOD] >> TPart::Matter [GOOD] >> TPart::External [GOOD] >> TPart::Outer [GOOD] >> TPart::MassCheck >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Table >> Bloom::Stairs [GOOD] >> BuildStatsBTreeIndex::Single [GOOD] >> BuildStatsBTreeIndex::Single_Slices >> TBtreeIndexTPart::History [GOOD] >> TBtreeIndexTPart::External [GOOD] >> TChargeBTreeIndex::NoNodes >> TFlatTableExecutor_ResourceProfile::TestExecutorStaticMemoryLimits [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxDataLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxDataGC [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxPartialDataHold [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxHoldAndUse [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxHoldOnRelease [GOOD] >> TFlatTableExecutor_ResourceProfile::TestUpdateConfig [GOOD] >> TFlatTableExecutor_SliceOverlapScan::TestSliceOverlapScan |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest |80.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation >> TFlatTableExecutor_IndexLoading::CalculateReadSize_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::CalculateReadSize_BTreeIndex >> TPart::MassCheck [GOOD] >> TPart::WreckPart >> TSharedPageCache::Limits [GOOD] >> BuildStatsBTreeIndex::Single_Slices [GOOD] >> TSharedPageCache::Limits_Config >> BuildStatsBTreeIndex::Single_History [GOOD] >> BuildStatsBTreeIndex::Single_History_Slices >> TFlatTableExecutor_IndexLoading::CalculateReadSize_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_FlatIndex |80.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |80.6%| [LD] {RESULT} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TBlobStorageStoragePoolMonTest::ReducedSizeClassCalcTest [GOOD] >> TPart::WreckPart [GOOD] >> TPart::PageFailEnv >> DataCleanup::CleanupDataNoTables [GOOD] >> DataCleanup::CleanupDataNoTablesWithRestart >> TSharedPageCache::ThreeLeveledLRU >> BuildStatsBTreeIndex::Single_History_Slices [GOOD] >> BuildStatsBTreeIndex::Single_Groups [GOOD] >> BuildStatsBTreeIndex::Single_Groups_Slices >> TClockProCache::Touch [GOOD] >> TClockProCache::UpdateLimit [GOOD] >> TCompaction::OneMemtable [GOOD] >> TCompaction::ManyParts >> DataCleanup::CleanupDataNoTablesWithRestart [GOOD] >> DataCleanup::CleanupDataLog [GOOD] >> DataCleanup::CleanupData [GOOD] >> DataCleanup::CleanupDataMultipleFamilies [GOOD] >> DataCleanup::CleanupDataMultipleTables [GOOD] >> DataCleanup::CleanupDataWithFollowers [GOOD] >> DataCleanup::CleanupDataMultipleTimes [GOOD] >> DataCleanup::CleanupDataEmptyTable [GOOD] >> DataCleanup::CleanupDataWithRestarts [GOOD] >> DataCleanup::CleanupDataRetryWithNotGreaterGenerations [GOOD] >> DataCleanup::CleanupDataWithTabletGCErrors |80.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |80.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |80.6%| [LD] {RESULT} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut >> TSharedPageCache::Limits_Config [GOOD] >> TSharedPageCache::ClockPro >> BuildStatsBTreeIndex::Single_Groups_Slices [GOOD] >> BuildStatsBTreeIndex::Single_Groups_History [GOOD] >> BuildStatsBTreeIndex::Single_Groups_History_Slices >> TPart::PageFailEnv [GOOD] >> TPart::ForwardEnv >> DataCleanup::CleanupDataWithTabletGCErrors [GOOD] >> DataCleanup::CleanupDataWithSysTabletGCErrors >> TCompaction::ManyParts [GOOD] >> TCompaction::BootAbort >> THiveTest::TestBridgeDisconnectWithReboots [GOOD] >> THiveTest::TestBridgeDemotion >> TPart::ForwardEnv [GOOD] >> TPart::WreckPartColumnGroups >> TSharedPageCache::ThreeLeveledLRU [GOOD] >> TSharedPageCache::S3FIFO >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheMiddle [GOOD] >> TCompaction::BootAbort [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloaded >> TCompaction::Defaults [GOOD] >> TCompaction::Merges [GOOD] >> TCompactionMulti::ManyParts >> BuildStatsBTreeIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsBTreeIndex::Mixed [GOOD] >> BuildStatsBTreeIndex::Mixed_Groups |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest |80.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TSharedPageCache::ClockPro [GOOD] >> TSharedPageCache::BigCache_BTreeIndex >> DataCleanup::CleanupDataWithSysTabletGCErrors [GOOD] >> DBase::WideKey >> TCompactionMulti::ManyParts [GOOD] >> TCompactionMulti::MainPageCollectionEdge ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::ListShardsToken [GOOD] Test command err: 2025-06-30T09:20:10.172184Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669754542914609:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:10.172250Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a97/r3tmp/tmpFza4NC/pdisk_1.dat 2025-06-30T09:20:10.229932Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 64863, node 1 2025-06-30T09:20:10.238345Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:10.238584Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669754542914587:2080] 1751275210172000 != 1751275210172003 2025-06-30T09:20:10.238653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:10.238662Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:10.238664Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:10.238717Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13645 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:10.274043Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:10.274070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:10.275078Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:10.311977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:13645 2025-06-30T09:20:10.336612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:20:10.338028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:20:10.338558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... waiting... 2025-06-30T09:20:10.344622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:10.417059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:10.435271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:10.508532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:10.531625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:10.548079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:10.567576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:10.582017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:10.596887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:10.609663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:10.655091Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669754542915958:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:10.655120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669754542915947:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:10.655226Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:10.656099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:10.659878Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669754542915961:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715673 completed, doublechecking } 2025-06-30T09:20:10.756279Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669754542916013:2858] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:10.822068Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jz0261wyfgtqxrvhh4hq0pt8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDhlNTk1MjMtMmZhM2EwMy1mMmMzM2FlMy0yNzAwODgyZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:20:10.832921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:10.843443Z node 1 :FLAT_TX ... 660: [PQ: 72075186224037908] add an TxId 281474976715690 to the list for deletion 2025-06-30T09:20:34.351891Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037908] TxId 281474976715690, NewState DELETING 2025-06-30T09:20:34.351894Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037908] delete key for TxId 281474976715690 2025-06-30T09:20:34.351898Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037908] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:20:34.352320Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037909] Registered with mediator time cast 2025-06-30T09:20:34.352344Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037907] Registered with mediator time cast 2025-06-30T09:20:34.352359Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037910] Registered with mediator time cast 2025-06-30T09:20:34.352365Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037911] Registered with mediator time cast 2025-06-30T09:20:34.352368Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037908] Registered with mediator time cast 2025-06-30T09:20:34.352371Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037908] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:20:34.352374Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037908] Try execute txs with state DELETING 2025-06-30T09:20:34.352376Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037908] TxId 281474976715690, State DELETING 2025-06-30T09:20:34.352379Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037908] delete TxId 281474976715690 2025-06-30T09:20:34.352456Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [CreateStream] requestId [a4bb78e1-c858deea-514e0c1-1be569dc] reply ok 2025-06-30T09:20:34.352517Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:36044) <- (200 , 2 bytes) 2025-06-30T09:20:34.352636Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:36044) connection closed Http output full {} 200 {} 2025-06-30T09:20:34.353181Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#40,[::1]:36046) incoming connection opened 2025-06-30T09:20:34.353220Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#40,[::1]:36046) -> (POST /Root, 157 bytes) 2025-06-30T09:20:34.353279Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [d8e9:f8ba:bc13:0:c0e9:f8ba:bc13:0] request [ListShards] url [/Root] database [/Root] requestId: 73c9531f-e89dbac3-f33f39fd-112c16c 2025-06-30T09:20:34.353408Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [ListShards] requestId [73c9531f-e89dbac3-f33f39fd-112c16c] got new request from [d8e9:f8ba:bc13:0:c0e9:f8ba:bc13:0] database '/Root' stream 'teststream' 2025-06-30T09:20:34.353572Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [ListShards] requestId [73c9531f-e89dbac3-f33f39fd-112c16c] [auth] Authorized successfully 2025-06-30T09:20:34.353601Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [ListShards] requestId [73c9531f-e89dbac3-f33f39fd-112c16c] sending grpc request to '' database: '/Root' iam token size: 0 E0000 00:00:1751275234.353634 194759 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-30T09:20:34.354110Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037911] server connected, pipe [8:7521669858327680796:2480], now have 1 active actors on pipe 2025-06-30T09:20:34.354113Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037907] server connected, pipe [8:7521669858327680795:2479], now have 1 active actors on pipe 2025-06-30T09:20:34.354237Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037907] server disconnected, pipe [8:7521669858327680795:2479] destroyed 2025-06-30T09:20:34.354241Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037911] server disconnected, pipe [8:7521669858327680796:2480] destroyed 2025-06-30T09:20:34.354255Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListShards] requestId [73c9531f-e89dbac3-f33f39fd-112c16c] reply ok 2025-06-30T09:20:34.354291Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#40,[::1]:36046) <- (200 , 449 bytes) 2025-06-30T09:20:34.354351Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#40,[::1]:36046) connection closed Http output full {"NextToken":"CLLIjYH8MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 200 {"NextToken":"CLLIjYH8MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 2025-06-30T09:20:34.354816Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:36062) incoming connection opened 2025-06-30T09:20:34.354850Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:36062) -> (POST /Root, 157 bytes) 2025-06-30T09:20:34.354894Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [589b:15bc:bc13:0:409b:15bc:bc13:0] request [ListShards] url [/Root] database [/Root] requestId: 5e8651c8-738099fb-b50f2d2c-86e68add 2025-06-30T09:20:34.355001Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [ListShards] requestId [5e8651c8-738099fb-b50f2d2c-86e68add] got new request from [589b:15bc:bc13:0:409b:15bc:bc13:0] database '/Root' stream 'teststream' 2025-06-30T09:20:34.355118Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [ListShards] requestId [5e8651c8-738099fb-b50f2d2c-86e68add] [auth] Authorized successfully 2025-06-30T09:20:34.355131Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [ListShards] requestId [5e8651c8-738099fb-b50f2d2c-86e68add] sending grpc request to '' database: '/Root' iam token size: 0 E0000 00:00:1751275234.355145 194760 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-30T09:20:34.355394Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037907] server connected, pipe [8:7521669858327680807:2484], now have 1 active actors on pipe 2025-06-30T09:20:34.355403Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037911] server connected, pipe [8:7521669858327680808:2485], now have 1 active actors on pipe Http output full {"NextToken":"CLPIjYH8MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 200 {"NextToken":"CLPIjYH8MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 2025-06-30T09:20:34.355602Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037907] server disconnected, pipe [8:7521669858327680807:2484] destroyed 2025-06-30T09:20:34.355607Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037911] server disconnected, pipe [8:7521669858327680808:2485] destroyed 2025-06-30T09:20:34.355613Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListShards] requestId [5e8651c8-738099fb-b50f2d2c-86e68add] reply ok 2025-06-30T09:20:34.355651Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:36062) <- (200 , 449 bytes) 2025-06-30T09:20:34.355724Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:36062) connection closed 2025-06-30T09:20:34.356047Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:36072) incoming connection opened 2025-06-30T09:20:34.356076Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:36072) -> (POST /Root, 157 bytes) 2025-06-30T09:20:34.356107Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [9829:f9ba:bc13:0:8029:f9ba:bc13:0] request [ListShards] url [/Root] database [/Root] requestId: 848a8a9f-20a64f76-76f87288-11e5a5db 2025-06-30T09:20:34.356189Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [ListShards] requestId [848a8a9f-20a64f76-76f87288-11e5a5db] got new request from [9829:f9ba:bc13:0:8029:f9ba:bc13:0] database '/Root' stream 'teststream' 2025-06-30T09:20:34.356272Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [ListShards] requestId [848a8a9f-20a64f76-76f87288-11e5a5db] [auth] Authorized successfully 2025-06-30T09:20:34.356289Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [ListShards] requestId [848a8a9f-20a64f76-76f87288-11e5a5db] sending grpc request to '' database: '/Root' iam token size: 0 E0000 00:00:1751275234.356301 194760 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-30T09:20:34.356485Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037907] server connected, pipe [8:7521669858327680819:2489], now have 1 active actors on pipe 2025-06-30T09:20:34.356487Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037911] server connected, pipe [8:7521669858327680820:2490], now have 1 active actors on pipe 2025-06-30T09:20:34.356571Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037907] server disconnected, pipe [8:7521669858327680819:2489] destroyed 2025-06-30T09:20:34.356577Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037911] server disconnected, pipe [8:7521669858327680820:2490] destroyed 2025-06-30T09:20:34.356586Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListShards] requestId [848a8a9f-20a64f76-76f87288-11e5a5db] reply ok 2025-06-30T09:20:34.356618Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:36072) <- (200 , 449 bytes) Http output full {"NextToken":"CLTIjYH8MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 2025-06-30T09:20:34.356671Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:36072) connection closed 200 {"NextToken":"CLTIjYH8MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} >> TSharedPageCache::S3FIFO [GOOD] >> TSharedPageCache::ReplacementPolicySwitch >> TChargeBTreeIndex::NoNodes [GOOD] >> TChargeBTreeIndex::NoNodes_Groups >> TSharedPageCache::BigCache_BTreeIndex [GOOD] >> TSharedPageCache::BigCache_FlatIndex >> BuildStatsHistogram::Three_Serial_Small_2_Levels >> DBase::WideKey [GOOD] >> DBase::VersionPureMem >> TPart::WreckPartColumnGroups [GOOD] >> TPart::PageFailEnvColumnGroups >> CommitOffset::DistributedTxCommit_Flat_CheckOffsetCommitForDifferentCases [GOOD] >> TPersQueueMirrorer::TestBasicRemote [GOOD] >> TSharedPageCache::ReplacementPolicySwitch [GOOD] >> TSharedPageCache::MiddleCache_FlatIndex >> TSharedPageCache::BigCache_FlatIndex [GOOD] >> TSharedPageCache::MiddleCache_BTreeIndex [GOOD] >> BuildStatsHistogram::Three_Serial_Small_2_Levels [GOOD] >> BuildStatsHistogram::Three_Serial_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Three_Serial_Small_1_Level [GOOD] >> BuildStatsHistogram::Three_Serial_Small_0_Levels [GOOD] >> BuildStatsMixedIndex::Single [GOOD] >> BuildStatsMixedIndex::Single_Slices >> TFlatTableExecutor_SliceOverlapScan::TestSliceOverlapScan [GOOD] >> TFlatTableExecutor_SnapshotWithCommits::SnapshotWithCommits [GOOD] >> TFlatTableExecutor_StickyPages::TestNonSticky_FlatIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestNonSticky_BTreeIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestSticky >> DBase::VersionPureMem [GOOD] >> DBase::VersionPureParts |80.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest |80.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TSharedPageCache::MiddleCache_FlatIndex [GOOD] >> TSharedPageCache::ZeroCache_BTreeIndex >> TFlatTableExecutor_StickyPages::TestSticky [GOOD] >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_FlatIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_BTreeIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyMain [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAlt_FlatIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAlt_BTreeIndex >> BuildStatsMixedIndex::Single_Slices [GOOD] >> BuildStatsMixedIndex::Single_History [GOOD] >> BuildStatsMixedIndex::Single_History_Slices [GOOD] >> BuildStatsMixedIndex::Single_Groups [GOOD] >> BuildStatsMixedIndex::Single_Groups_Slices >> DBase::VersionPureParts [GOOD] >> DBase::VersionCompactedMem >> TFlatTableExecutor_StickyPages::TestStickyAlt_BTreeIndex [GOOD] >> BuildStatsMixedIndex::Single_Groups_Slices [GOOD] >> BuildStatsMixedIndex::Single_Groups_History [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_Slices >> TFlatTableExecutor_StickyPages::TestStickyAll [GOOD] >> TFlatTableExecutor_StickyPages::TestAlterAddFamilySticky [GOOD] >> TFlatTableExecutor_StickyPages::TestAlterAddFamilyPartiallySticky [GOOD] >> TFlatTableExecutor_VersionedLargeBlobs::TestMultiVersionCompactionLargeBlobs >> TPart::PageFailEnvColumnGroups [GOOD] >> TPart::ForwardEnvColumnGroups >> TSharedPageCache::ZeroCache_BTreeIndex [GOOD] >> TSharedPageCache::ZeroCache_FlatIndex >> DBase::VersionCompactedMem [GOOD] >> DBase::VersionCompactedParts >> BuildStatsBTreeIndex::Mixed_Groups [GOOD] >> BuildStatsBTreeIndex::Mixed_Groups_History >> TFlatTableExecutor_VersionedLargeBlobs::TestMultiVersionCompactionLargeBlobs [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_Slices [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRows [GOOD] >> BuildStatsMixedIndex::Mixed [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsSmallBlobs >> BuildStatsMixedIndex::Mixed_Groups [GOOD] >> BuildStatsMixedIndex::Mixed_Groups_History >> TPart::ForwardEnvColumnGroups [GOOD] >> TPart::Versions [GOOD] >> TPart::ManyVersions [GOOD] >> TPart::ManyDeltas [GOOD] >> TPart::CutKeys_Lz4 [GOOD] >> TPart::CutKeys_Seek [GOOD] >> TPart::CutKeys_SeekPages [GOOD] >> TPart::CutKeys_SeekSlices [GOOD] >> TPart::CutKeys_CutString [GOOD] >> TPart::CutKeys_CutUtf8String [GOOD] >> TPartBtreeIndexIteration::NoNodes >> TSharedPageCache::ZeroCache_FlatIndex [GOOD] >> TSharedPageCache_Actor::Request_Basics [GOOD] >> TSharedPageCache_Actor::Request_Failed [GOOD] >> TSharedPageCache_Actor::Request_Queue [GOOD] >> TSharedPageCache_Actor::Request_Queue_Failed >> DBase::VersionCompactedParts [GOOD] >> Memtable::Basics [GOOD] >> Memtable::BasicsReverse [GOOD] >> Memtable::Markers [GOOD] >> Memtable::Overlap [GOOD] >> Memtable::Wreck [GOOD] >> Memtable::Erased >> TCompactionMulti::MainPageCollectionEdge [GOOD] >> TCompactionMulti::MainPageCollectionEdgeMany >> BuildStatsBTreeIndex::Mixed_Groups_History [GOOD] >> BuildStatsFlatIndex::Single [GOOD] >> BuildStatsFlatIndex::Single_Slices [GOOD] >> BuildStatsFlatIndex::Single_History >> BuildStatsMixedIndex::Mixed_Groups_History [GOOD] >> BuildStatsMixedIndex::Serial [GOOD] >> BuildStatsMixedIndex::Serial_Groups >> TxUsage::WriteToTopic_Demo_12_Query [GOOD] >> TKeyValueTest::TestBasicWriteRead [GOOD] >> TKeyValueTest::TestBasicWriteReadOverrun >> TSharedPageCache_Actor::Request_Queue_Failed [GOOD] >> TSharedPageCache_Actor::Request_Queue_Fast [GOOD] >> TSharedPageCache_Actor::Request_Sequential [GOOD] >> TSharedPageCache_Actor::Request_Cached [GOOD] >> TSharedPageCache_Actor::Request_Different_Collections [GOOD] >> TSharedPageCache_Actor::Request_Different_Pages [GOOD] >> TSharedPageCache_Actor::Request_Different_Pages_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Subset [GOOD] >> TSharedPageCache_Actor::Request_Subset_Shuffled [GOOD] >> TSharedPageCache_Actor::Request_Superset [GOOD] >> TSharedPageCache_Actor::Request_Superset_Reversed >> Memtable::Erased [GOOD] >> NFwd_TBlobs::MemTableTest [GOOD] >> NFwd_TBlobs::Lower [GOOD] >> NFwd_TBlobs::Sieve [GOOD] >> NFwd_TBlobs::SieveFiltered [GOOD] >> NFwd_TBlobs::Basics [GOOD] >> NFwd_TBlobs::Simple [GOOD] >> NFwd_TBlobs::Shuffle [GOOD] >> NFwd_TBlobs::Grow [GOOD] >> NFwd_TBlobs::Trace [GOOD] >> NFwd_TBlobs::Filtered [GOOD] >> NFwd_TBTreeIndexCache::Basics [GOOD] >> NFwd_TBTreeIndexCache::IndexPagesLocator [GOOD] >> NFwd_TBTreeIndexCache::GetTwice [GOOD] >> NFwd_TBTreeIndexCache::ForwardTwice [GOOD] >> NFwd_TBTreeIndexCache::Forward_OnlyUsed [GOOD] >> NFwd_TBTreeIndexCache::Skip_Done [GOOD] >> NFwd_TBTreeIndexCache::Skip_Done_None [GOOD] >> NFwd_TBTreeIndexCache::Skip_Keep [GOOD] >> NFwd_TBTreeIndexCache::Skip_Wait [GOOD] >> NFwd_TBTreeIndexCache::Trace_BTree [GOOD] >> NFwd_TBTreeIndexCache::Trace_Data [GOOD] >> NFwd_TBTreeIndexCache::End [GOOD] >> NFwd_TBTreeIndexCache::Slices [GOOD] >> NFwd_TBTreeIndexCache::ManyApplies [GOOD] >> NFwd_TFlatIndexCache::Basics [GOOD] >> NFwd_TFlatIndexCache::IndexPagesLocator [GOOD] >> NFwd_TFlatIndexCache::GetTwice [GOOD] >> NFwd_TFlatIndexCache::ForwardTwice [GOOD] >> NFwd_TFlatIndexCache::Skip_Done [GOOD] >> NFwd_TFlatIndexCache::Skip_Done_None [GOOD] >> NFwd_TFlatIndexCache::Skip_Keep [GOOD] >> NFwd_TFlatIndexCache::End [GOOD] >> BuildStatsFlatIndex::Single_History [GOOD] >> BuildStatsFlatIndex::Single_History_Slices [GOOD] >> BuildStatsFlatIndex::Single_Groups >> BuildStatsMixedIndex::Serial_Groups [GOOD] >> BuildStatsMixedIndex::Serial_Groups_History [GOOD] >> BuildStatsMixedIndex::Single_LowResolution |80.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TSharedPageCache_Actor::Request_Superset_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Crossing [GOOD] >> TSharedPageCache_Actor::Request_Crossing_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Crossing_Shuffled [GOOD] >> TSharedPageCache_Actor::Attach_Basics [GOOD] >> TSharedPageCache_Actor::Attach_Request [GOOD] >> TSharedPageCache_Actor::Detach_Basics [GOOD] >> TSharedPageCache_Actor::Detach_Cached [GOOD] >> TSharedPageCache_Actor::Detach_Expired >> TCompactionMulti::MainPageCollectionEdgeMany [GOOD] >> TCompactionMulti::MainPageCollectionOverflow [GOOD] >> TCompactionMulti::MainPageCollectionOverflowSmallRefs [GOOD] >> TCompactionMulti::MainPageCollectionOverflowLargeRefs [GOOD] >> TExecutorDb::RandomOps >> BuildStatsFlatIndex::Single_Groups [GOOD] >> BuildStatsFlatIndex::Single_Groups_Slices [GOOD] >> BuildStatsFlatIndex::Single_Groups_History >> BuildStatsMixedIndex::Single_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Slices_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_Slices_LowResolution >> TGRpcCmsTest::AuthTokenTest >> TxUsage::Sinks_Oltp_WriteToTopics_1_Table >> TSharedPageCache_Actor::Detach_Expired [GOOD] >> TSharedPageCache_Actor::Detach_InFly [GOOD] >> TSharedPageCache_Actor::Detach_Queued [GOOD] >> TSharedPageCache_Actor::Unregister_Basics [GOOD] >> TSharedPageCache_Actor::Unregister_Cached [GOOD] >> TSharedPageCache_Actor::Unregister_Expired [GOOD] >> TSharedPageCache_Actor::Unregister_InFly [GOOD] >> TSharedPageCache_Actor::Unregister_Queued >> BuildStatsFlatIndex::Single_Groups_History [GOOD] >> BuildStatsFlatIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsFlatIndex::Mixed [GOOD] >> BuildStatsFlatIndex::Mixed_Groups >> BuildStatsMixedIndex::Single_Groups_Slices_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_Slices_LowResolution >> TSharedPageCache_Actor::Unregister_Queued [GOOD] >> TSharedPageCache_Actor::Unregister_Queued_Pending [GOOD] >> TSwitchableCache::Touch [GOOD] >> TSwitchableCache::Erase [GOOD] >> TSwitchableCache::EvictNext [GOOD] >> TSwitchableCache::UpdateLimit [GOOD] >> TSwitchableCache::Switch_Touch_RotatePages_All [GOOD] >> TSwitchableCache::Switch_Touch_RotatePages_Parts [GOOD] >> TSwitchableCache::Switch_RotatePages_Force [GOOD] >> TSwitchableCache::Switch_RotatePages_Evicts [GOOD] >> TSwitchableCache::Switch_Touch [GOOD] >> TSwitchableCache::Switch_Erase [GOOD] >> TSwitchableCache::Switch_EvictNext [GOOD] >> TSwitchableCache::Switch_UpdateLimit [GOOD] >> TVersions::WreckHead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> TSharedPageCache::MiddleCache_BTreeIndex [GOOD] Test command err: SmallQueue: MainQueue: {11 0f 1b}, {14 1f 1b}, {15 2f 1b}, {18 0f 1b}, {19 0f 1b}, {23 0f 1b}, {27 0f 1b} GhostQueue: 9, 12, 13, 16, 17, 20, 21, 24, 25, 28 0.29126 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.447342Z 00000.005 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.005 II| FAKE_ENV: Starting storage for BS group 0 00000.006 II| FAKE_ENV: Starting storage for BS group 1 00000.006 II| FAKE_ENV: Starting storage for BS group 2 00000.006 II| FAKE_ENV: Starting storage for BS group 3 00000.006 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.007 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} release 4194304b of static, Memory{0 dyn 0} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.007 NN| TABLET_SAUSAGECACHE: Update config MemoryLimit: 8388608 ReplacementPolicy: ThreeLeveledLRU 00000.007 NN| TABLET_SAUSAGECACHE: Switch replacement policy from S3FIFO to ThreeLeveledLRU 00000.007 NN| TABLET_SAUSAGECACHE: Switch replacement policy done from S3FIFO to ThreeLeveledLRU 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{2, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{3, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 1 for step 4 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{4, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 1 for step 5 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{5, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 1 for step 6 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{6, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 1 for step 7 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{7, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:9} commited cookie 1 for step 8 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{8, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:10} commited cookie 1 for step 9 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{9, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:11} commited cookie 1 for step 10 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{10, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:12} commited cookie 1 for step 11 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{11, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:13} commited cookie 1 for step 12 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{12, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:14} commited cookie 1 for step 13 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{13, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:15} commited cookie 1 for step 14 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{14, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxW ... TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.084 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [4 4] 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.084 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 4 ] 00000.084 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 96 ] owner [6:580:2605] 00000.084 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 4 ] 00000.084 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 4 ] cookie 1 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.084 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 4 117 111 ] 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.084 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [3 4] 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.084 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 3 ] 00000.084 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 95 ] owner [6:580:2605] 00000.084 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 3 ] 00000.084 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 3 ] cookie 1 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.084 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 3 117 111 ] 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.084 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [2 4] 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.084 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 2 ] 00000.084 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 93 ] owner [6:580:2605] 00000.084 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 2 ] 00000.084 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 2 ] cookie 1 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.084 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 2 117 111 ] 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.084 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [1 4] 00000.084 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.085 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 1 ] 00000.085 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 92 ] owner [6:580:2605] 00000.085 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 1 ] 00000.085 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 1 ] cookie 1 00000.085 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.085 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.085 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.085 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 1 117 111 ] 00000.085 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.085 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.085 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.085 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.085 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.085 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [0 4] 00000.085 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.085 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 0 ] 00000.085 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 91 ] owner [6:580:2605] 00000.085 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 0 ] 00000.085 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 0 ] cookie 1 00000.085 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.085 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.085 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.085 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 0 117 111 ] Counters: Active:8313958/8388608, Passive:0, MemLimit:-1 00000.085 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.085 II| TABLET_EXECUTOR: Leader{1:3:2} suiciding, Waste{2:0, 10255801b +(0, 0b), 1 trc, -48685b acc} 00000.086 DD| TABLET_SAUSAGECACHE: Unregister owner [6:580:2605] 00000.086 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] 00000.086 DD| TABLET_SAUSAGECACHE: Remove owner [6:580:2605] 00000.086 NN| TABLET_SAUSAGECACHE: Poison cache serviced 138 reqs hit {0 0b} miss {139 12197190b} 00000.086 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.086 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {10191b, 107} 00000.086 II| FAKE_ENV: DS.1 gone, left {10257096b, 5}, put {10305919b, 107} 00000.086 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.086 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.086 II| FAKE_ENV: All BS storage groups are stopped 00000.086 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.086 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 2741}, stopped >> BuildStatsFlatIndex::Mixed_Groups [GOOD] >> BuildStatsFlatIndex::Mixed_Groups_History [GOOD] >> BuildStatsFlatIndex::Serial >> BuildStatsMixedIndex::Single_Groups_History_Slices_LowResolution [GOOD] >> Charge::Lookups [GOOD] >> Charge::ByKeysBasics [GOOD] >> Charge::ByKeysGroups [GOOD] >> Charge::ByKeysGroupsLimits [GOOD] >> Charge::ByKeysLimits [GOOD] >> Charge::ByKeysReverse [GOOD] >> Charge::ByKeysHistory [GOOD] >> Charge::ByKeysIndex [GOOD] >> Charge::ByRows [GOOD] >> Charge::ByRowsReverse [GOOD] >> Charge::ByRowsLimits [GOOD] >> Charge::ByRowsLimitsReverse [GOOD] >> DBase::Basics [GOOD] >> DBase::Select [GOOD] >> DBase::Defaults [GOOD] >> DBase::Subsets [GOOD] >> DBase::Garbage [GOOD] >> DBase::Affects [GOOD] >> DBase::Annex [GOOD] >> DBase::AnnexRollbackChanges [GOOD] >> DBase::Outer [GOOD] >> DBase::VersionBasics [GOOD] >> DBase::KIKIMR_15506_MissingSnapshotKeys [GOOD] >> DBase::EraseCacheWithUncommittedChanges [GOOD] >> DBase::EraseCacheWithUncommittedChangesCompacted [GOOD] >> DBase::AlterAndUpsertChangesVisibility [GOOD] >> DBase::UncommittedChangesVisibility [GOOD] >> DBase::UncommittedChangesCommitWithUpdates [GOOD] >> DBase::ReplayNewTable [GOOD] >> DBase::SnapshotNewTable [GOOD] >> DBase::DropModifiedTable [GOOD] >> DBase::KIKIMR_15598_Many_MemTables >> TGRpcCmsTest::RemoveWithAnotherTokenTest >> BuildStatsFlatIndex::Serial [GOOD] >> BuildStatsFlatIndex::Serial_Groups [GOOD] >> BuildStatsFlatIndex::Serial_Groups_History >> TPartBtreeIndexIteration::NoNodes [GOOD] >> TPartBtreeIndexIteration::NoNodes_Groups >> TGRpcCmsTest::SimpleTenantsTestSyncOperation >> TGRpcCmsTest::AlterRemoveTest >> BuildStatsFlatIndex::Serial_Groups_History [GOOD] >> BuildStatsHistogram::Single >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> NFwd_TFlatIndexCache::End [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.763221Z 00000.005 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.005 II| FAKE_ENV: Starting storage for BS group 0 00000.005 II| FAKE_ENV: Starting storage for BS group 1 00000.005 II| FAKE_ENV: Starting storage for BS group 2 00000.006 II| FAKE_ENV: Starting storage for BS group 3 00000.007 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.007 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.007 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.007 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {146b, 4} 00000.007 II| FAKE_ENV: DS.1 gone, left {105b, 3}, put {105b, 3} 00000.007 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.007 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.007 II| FAKE_ENV: All BS storage groups are stopped 00000.007 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.007 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.772324Z 00000.005 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.005 II| FAKE_ENV: Starting storage for BS group 0 00000.005 II| FAKE_ENV: Starting storage for BS group 1 00000.005 II| FAKE_ENV: Starting storage for BS group 2 00000.005 II| FAKE_ENV: Starting storage for BS group 3 00000.006 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.006 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.006 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.006 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {292b, 8} 00000.006 II| FAKE_ENV: DS.1 gone, left {210b, 6}, put {210b, 6} 00000.006 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: All BS storage groups are stopped 00000.006 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.006 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.780327Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.005 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.005 NN| TABLET_SAUSAGECACHE: Poison cache serviced 1 reqs hit {1 76b} miss {0 0b} 00000.005 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.005 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1179b, 13} 00000.005 II| FAKE_ENV: DS.1 gone, left {909b, 3}, put {1913b, 12} 00000.005 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {132b, 2} 00000.005 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {116b, 2} 00000.005 II| FAKE_ENV: All BS storage groups are stopped 00000.005 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.005 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.787344Z 00000.003 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.004 II| FAKE_ENV: Starting storage for BS group 0 00000.004 II| FAKE_ENV: Starting storage for BS group 1 00000.004 II| FAKE_ENV: Starting storage for BS group 2 00000.004 II| FAKE_ENV: Starting storage for BS group 3 00000.006 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.006 NN| TABLET_SAUSAGECACHE: Poison cache serviced 1 reqs hit {1 102443b} miss {0 0b} 00000.006 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.006 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {751b, 11} 00000.006 II| FAKE_ENV: DS.1 gone, left {541b, 3}, put {103970b, 10} 00000.006 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: All BS storage groups are stopped 00000.006 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.006 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.794937Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 ... blocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A cookie 0 00000.017 II| TABLET_SAUSAGECACHE: Wakeup 1 ... unblocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A 00000.017 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.018 NN| TABLET_SAUSAGECACHE: Poison cache serviced 11 reqs hit {18 513007b} miss {0 0b} 00000.018 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.018 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {2095b, 23} 00000.018 II| FAKE_ENV: DS.1 gone, left {774b, 4}, put {210604b, 21} 00000.018 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {205178b, 4} 00000.018 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {102690b, 4} 00000.018 II| FAKE_ENV: All BS storage groups are stopped 00000.018 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 15.00s 00000.018 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 16}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.814106Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.005 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.005 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {3 307329b} miss {0 0b} 00000.005 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.005 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1828b, 23} 00000.005 II| FAKE_ENV: DS.1 gone, left {1247b, 3}, put {311467b, 22} 00000.005 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.005 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.005 II| FAKE_ENV: All BS storage groups are stopped 00000.005 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.005 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.820880Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.001 II| FAKE_ENV: Starting storage for BS group 2 00000.001 II| FAKE_ENV: Starting storage for BS group 3 00000.006 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 5 actors 00000.006 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4 reqs hit {8 307836b} miss {0 0b} 00000.006 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.006 II| FAKE_ENV: DS.0 gone, left {57b, 2}, put {1436b, 31} 00000.006 II| FAKE_ENV: DS.1 gone, left {629b, 3}, put {310476b, 16} 00000.006 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: All BS storage groups are stopped 00000.006 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 0.000s 00000.006 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.828605Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.004 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.004 NN| TABLET_SAUSAGECACHE: Poison cache serviced 2 reqs hit {2 194646b} miss {0 0b} 00000.004 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.004 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1766b, 27} 00000.004 II| FAKE_ENV: DS.1 gone, left {732b, 6}, put {197813b, 24} 00000.004 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: All BS storage groups are stopped 00000.004 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.004 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.833882Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.001 II| FAKE_ENV: Starting storage for BS group 2 00000.001 II| FAKE_ENV: Starting storage for BS group 3 00000.002 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.002 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.002 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.002 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {326b, 7} 00000.002 II| FAKE_ENV: DS.1 gone, left {418b, 4}, put {453b, 5} 00000.002 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.002 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.002 II| FAKE_ENV: All BS storage groups are stopped 00000.002 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.002 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.837219Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.001 II| FAKE_ENV: Starting storage for BS group 2 00000.001 II| FAKE_ENV: Starting storage for BS group 3 ... blocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A cookie 0 00000.016 II| TABLET_SAUSAGECACHE: Wakeup 1 ... unblocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A 00000.016 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.016 NN| TABLET_SAUSAGECACHE: Poison cache serviced 6 reqs hit {8 410030b} miss {0 0b} 00000.017 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.017 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1492b, 23} 00000.017 II| FAKE_ENV: DS.1 gone, left {504b, 4}, put {310786b, 20} 00000.017 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.017 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.017 II| FAKE_ENV: All BS storage groups are stopped 00000.017 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 15.00s 00000.017 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 16}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.855156Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.001 II| FAKE_ENV: Starting storage for BS group 2 00000.001 II| FAKE_ENV: Starting storage for ... 3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} |80.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection |80.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection |80.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection |80.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest |80.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Table [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsSmallBlobs [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsLargeBlobs >> TestYmqHttpProxy::TestListQueueTags [GOOD] |80.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> THiveTest::TestBridgeDemotion [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Table >> TGRpcCmsTest::DisabledTxTest >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query |80.7%| [TA] $(B)/ydb/core/blobstorage/storagepoolmon/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TPersQueueMirrorer::TestBasicRemote [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004038/r3tmp/tmps4dsfN/pdisk_1.dat 2025-06-30T09:18:52.960393Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:52.969406Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:18:52.983522Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669421848898100:2080] 1751275132852173 != 1751275132852176 2025-06-30T09:18:52.986468Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63672, node 1 2025-06-30T09:18:53.003874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004038/r3tmp/yandexzcqwUI.tmp 2025-06-30T09:18:53.003889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004038/r3tmp/yandexzcqwUI.tmp 2025-06-30T09:18:53.006477Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004038/r3tmp/yandexzcqwUI.tmp 2025-06-30T09:18:53.006569Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:53.010212Z INFO: TTestServer started on Port 3954 GrpcPort 63672 TClient is connected to server localhost:3954 2025-06-30T09:18:53.053684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:53.053712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting PQClient connected to localhost:63672 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:18:53.067936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:53.115917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:18:53.138979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:53.151003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:18:53.152275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:18:53.231293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:18:53.553743Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669426143866168:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.553786Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.553908Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669426143866181:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:53.555283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:53.559498Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669426143866183:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:18:53.609951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.616314Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669426143866287:2454] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:53.626089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:53.633388Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669426143866317:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:53.634087Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZDdkMmI3MTQtODBmN2NiNTUtNGU0OTIzZmUtYjIyMjM0N2I=, ActorId: [1:7521669426143866143:2296], ActorState: ExecuteState, TraceId: 01jz023pkd3thsx3xkncn07jd0, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:53.634701Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:53.654666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669426143866539:2613] 2025-06-30T09:18:53.850766Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; === CheckClustersList. Ok 2025-06-30T09:18:58.914004Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-30T09:18:58.919576Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:18:58.920206Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521669447618703206:2683], Recipient [1:7521669421848898473:2179]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:58.920219Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:18:58.920223Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:18:58.920232Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521669447618703202:2680], Recipient [1:7521669421848898473:2179]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-30T09:18:58.920234Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:18:58.929964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitCh ... d4d2] Closing read session. Close timeout: 0.000000s 2025-06-30T09:20:35.037422Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:2:3 2025-06-30T09:20:35.037442Z :INFO: [/Root] [/Root] [a9314c2c-2b970da-cbbfaa47-d7fbd4d2] Counters: { Errors: 0 CurrentSessionLifetimeMs: 5015 BytesRead: 27 MessagesRead: 3 BytesReadCompressed: 87 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:35.037474Z :NOTICE: [/Root] [/Root] [a9314c2c-2b970da-cbbfaa47-d7fbd4d2] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:20:35.037486Z :DEBUG: [/Root] [/Root] [a9314c2c-2b970da-cbbfaa47-d7fbd4d2] [] Abort session to cluster 2025-06-30T09:20:35.037831Z :NOTICE: [/Root] [/Root] [a9314c2c-2b970da-cbbfaa47-d7fbd4d2] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:20:35.038182Z node 6 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_6_1_8726915099830318539_v1 grpc read done: success# 0, data# { } 2025-06-30T09:20:35.038202Z node 6 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_6_1_8726915099830318539_v1 grpc read failed 2025-06-30T09:20:35.038212Z node 6 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_6_1_8726915099830318539_v1 grpc closed 2025-06-30T09:20:35.038234Z node 6 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_6_1_8726915099830318539_v1 is DEAD 2025-06-30T09:20:35.038774Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][test-topic] pipe [6:7521669841067572464:2632] disconnected; active server actors: 1 2025-06-30T09:20:35.038788Z node 6 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][test-topic] pipe [6:7521669841067572464:2632] client test-consumer disconnected session test-consumer_6_1_8726915099830318539_v1 2025-06-30T09:20:35.038833Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 269877764, Sender [6:7521669841067572468:3050], Recipient [6:7521669836772604368:2416]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:35.038842Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5326: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:35.038847Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:2906: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:35.038854Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session test-consumer_6_1_8726915099830318539_v1 2025-06-30T09:20:35.038880Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [6:7521669841067572467:2635] destroyed 2025-06-30T09:20:35.038905Z node 6 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_6_1_8726915099830318539_v1 2025-06-30T09:20:35.070598Z node 6 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7521669836772604368:2416], Partition 0, Sender [0:0:0], Recipient [6:7521669836772604427:2419], Cookie: 0 2025-06-30T09:20:35.070631Z node 6 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7521669836772604427:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:35.070639Z node 6 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:35.070659Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:35.070695Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:35.070704Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:35.070712Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-30T09:20:35.137107Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186224037893][test-topic] TPersQueueReadBalancer::HandleWakeup 2025-06-30T09:20:35.137135Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037892 Cookie: 6 2025-06-30T09:20:35.137169Z node 6 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [6:7521669836772604368:2416], Partition 0, Sender [6:7521669836772604430:2421], Recipient [6:7521669836772604427:2419], Cookie: 0 2025-06-30T09:20:35.137184Z node 6 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188544, Sender [6:7521669836772604430:2421], Recipient [6:7521669836772604427:2419]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-30T09:20:35.137191Z node 6 :PERSQUEUE TRACE: partition.h:630: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-30T09:20:35.137232Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188536, Sender [6:7521669836772604366:2415], Recipient [6:7521669836772604368:2416]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-30T09:20:35.137236Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5340: HandleHook, processing event TEvPQ::TEvSubDomainStatus 2025-06-30T09:20:35.137251Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271187975, Sender [6:7521669836772604366:2415], Recipient [6:7521669836772604368:2416]: NKikimrPQ.TStatus GetStatForAllConsumers: true 2025-06-30T09:20:35.137254Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5315: HandleHook, processing event TEvPersQueue::TEvStatus 2025-06-30T09:20:35.137260Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:1813: [PQ: 72075186224037892] Handle TEvPersQueue::TEvStatus 2025-06-30T09:20:35.137277Z node 6 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [6:7521669836772604368:2416], Partition 0, Sender [6:7521669836772604368:2416], Recipient [6:7521669836772604427:2419], Cookie: 0 2025-06-30T09:20:35.137283Z node 6 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188536, Sender [6:7521669836772604368:2416], Recipient [6:7521669836772604427:2419]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-30T09:20:35.137285Z node 6 :PERSQUEUE TRACE: partition.h:626: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-06-30T09:20:35.137293Z node 6 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [6:7521669836772604368:2416], Partition 0, Sender [6:7521669836772604368:2416], Recipient [6:7521669836772604427:2419], Cookie: 0 2025-06-30T09:20:35.137310Z node 6 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188491, Sender [6:7521669836772604368:2416], Recipient [6:7521669836772604427:2419]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-30T09:20:35.137313Z node 6 :PERSQUEUE TRACE: partition.h:602: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-30T09:20:35.137385Z node 6 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-30T09:20:35.137424Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [6:7521669836772604427:2419], Recipient [6:7521669836772604368:2416]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-30T09:20:35.137438Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-30T09:20:35.137541Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 6 DataSize: 0 UsedReserveSize: 0 2025-06-30T09:20:35.137567Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][test-topic] ProcessPendingStats. PendingUpdates size 1 2025-06-30T09:20:35.137626Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271188001, Sender [6:7521669836772604366:2415], Recipient [6:7521669811002799560:2152]: NKikimrPQ.TEvPeriodicTopicStats PathId: 13 Generation: 1 Round: 6 DataSize: 0 UsedReserveSize: 0 SubDomainOutOfSpace: false 2025-06-30T09:20:35.137640Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4998: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-30T09:20:35.137647Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046644480, LocalPathId: 13] DataSize 0 UsedReserveSize 0 2025-06-30T09:20:35.137654Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.099998s, queue# 1 2025-06-30T09:20:35.138152Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [6:7521669836772604366:2415], Recipient [6:7521669811002799560:2152]: NKikimrSchemeOp.TDescribePath PathId: 13 SchemeshardId: 72057594046644480 2025-06-30T09:20:35.138159Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:20:35.170938Z node 6 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7521669836772604368:2416], Partition 0, Sender [0:0:0], Recipient [6:7521669836772604427:2419], Cookie: 0 2025-06-30T09:20:35.170969Z node 6 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7521669836772604427:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:35.170976Z node 6 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-30T09:20:35.170994Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-30T09:20:35.171024Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-30T09:20:35.171029Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-30T09:20:35.171037Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> DBase::KIKIMR_15598_Many_MemTables [GOOD] >> TGRpcCmsTest::AuthTokenTest [GOOD] |80.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |80.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |80.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/storagepoolmon/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TPQTest::TestSetClientOffset [GOOD] >> TPQTest::TestStatusWithMultipleConsumers >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-ExternalHive |80.7%| [LD] {RESULT} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Table [GOOD] >> TPQTest::TestStatusWithMultipleConsumers [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndWait >> TGRpcCmsTest::SimpleTenantsTestSyncOperation [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestListQueueTags [GOOD] Test command err: 2025-06-30T09:20:04.693628Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669727547616542:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:04.693677Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004afb/r3tmp/tmpIniUX7/pdisk_1.dat 2025-06-30T09:20:04.769570Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12160, node 1 2025-06-30T09:20:04.803103Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:04.803121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:04.803124Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:04.803149Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:04.803175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:04.803186Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:04.811293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19630 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:04.847668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:04.851838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 TClient is connected to server localhost:19630 2025-06-30T09:20:04.876416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:20:04.877316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:20:04.877790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-30T09:20:04.883900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:04.949011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:04.967932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:04.991600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.003781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.019974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.032934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:20:05.045010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.059600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:05.073403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.159396Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669731842585181:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.159417Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669731842585170:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.159443Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:05.160229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:05.162669Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669731842585184:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715673 completed, doublechecking } 2025-06-30T09:20:05.236382Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669731842585235:2857] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:05.294034Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jz025wh6czbf7bdthmgarkyd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjYxZDc5NmQtOGYzNGZmZWUtM2NlOTYwMmItMmFhMzMzYjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:20:05.313057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:20:05.325421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable ... 5-06-30T09:20:36.761909Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [] Sending executed reply 2025-06-30T09:20:36.761920Z node 7 :SQS INFO: purge.cpp:35: Request [89b8d5ff-467bd85e-efa9c2e5-c2ddbf7d] Create purge actor for queue /Root/SQS/cloud4/000000000000000301v0 2025-06-30T09:20:36.761924Z node 7 :SQS DEBUG: action.h:627: Request [349a3ff2-fd6d5415-c36c9a0-9cff7f42] Get configuration duration: 6ms 2025-06-30T09:20:36.761928Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [] Sending executed reply 2025-06-30T09:20:36.761929Z node 7 :SQS TRACE: action.h:647: Request [349a3ff2-fd6d5415-c36c9a0-9cff7f42] Got configuration. Root url: http://ghrun-x4qzxsyz2q.auto.internal:8771, Shards: 1, Fail: 0 2025-06-30T09:20:36.761935Z node 7 :SQS TRACE: action.h:427: Request [349a3ff2-fd6d5415-c36c9a0-9cff7f42] DoRoutine 2025-06-30T09:20:36.761959Z node 7 :SQS TRACE: action.h:264: Request [349a3ff2-fd6d5415-c36c9a0-9cff7f42] SendReplyAndDie from action actor { ListQueueTags { RequestId: "349a3ff2-fd6d5415-c36c9a0-9cff7f42" } } 2025-06-30T09:20:36.761981Z node 7 :SQS TRACE: queue_leader.cpp:2117: Infly load reply for shard [cloud4/000000000000000101v0/3]: { Status: 48 TxId: 281474976715711 Step: 1751275236809 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "createdTimestamp" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } Member { Name: "infly" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "DelayDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "RandomId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "VisibilityDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "inflyCount" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 3 } } } } } } } Member { Name: "inflyVersion" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "messageCount" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 3 } } } } } } } Member { Name: "readOffset" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } Value { Struct { Optional { Optional { Uint64: 1751275236654 } } } Struct { Optional { } } Struct { Optional { Optional { Int64: 0 } } } Struct { Optional { Uint64: 0 } } Struct { Optional { Optional { Int64: 0 } } } Struct { Optional { Optional { Uint64: 0 } } } } } } 2025-06-30T09:20:36.761989Z node 7 :SQS TRACE: proxy_service.h:35: Request [349a3ff2-fd6d5415-c36c9a0-9cff7f42] Sending sqs response: { ListQueueTags { RequestId: "349a3ff2-fd6d5415-c36c9a0-9cff7f42" } RequestId: "349a3ff2-fd6d5415-c36c9a0-9cff7f42" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true } 2025-06-30T09:20:36.761993Z node 7 :SQS DEBUG: queue_leader.cpp:2146: Infly version for shard [cloud4/000000000000000101v0/3]: 0 2025-06-30T09:20:36.762027Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse ListQueueTags { RequestId: "349a3ff2-fd6d5415-c36c9a0-9cff7f42" } RequestId: "349a3ff2-fd6d5415-c36c9a0-9cff7f42" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true 2025-06-30T09:20:36.762044Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7521669866903279663:2510]: ListQueueTags { RequestId: "349a3ff2-fd6d5415-c36c9a0-9cff7f42" } RequestId: "349a3ff2-fd6d5415-c36c9a0-9cff7f42" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true 2025-06-30T09:20:36.762066Z node 7 :SQS TRACE: queue_leader.cpp:2117: Infly load reply for shard [cloud4/000000000000000101v0/2]: { Status: 48 TxId: 281474976715713 Step: 1751275236809 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "createdTimestamp" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } Member { Name: "infly" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "DelayDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "RandomId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "VisibilityDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "inflyCount" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 3 } } } } } } } Member { Name: "inflyVersion" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "messageCount" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 3 } } } } } } } Member { Name: "readOffset" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } Value { Struct { Optional { Optional { Uint64: 1751275236654 } } } Struct { Optional { } } Struct { Optional { Optional { Int64: 0 } } } Struct { Optional { Uint64: 0 } } Struct { Optional { Optional { Int64: 0 } } } Struct { Optional { Optional { Uint64: 0 } } } } } } 2025-06-30T09:20:36.762069Z node 7 :SQS DEBUG: queue_leader.cpp:2146: Infly version for shard [cloud4/000000000000000101v0/2]: 0 2025-06-30T09:20:36.762071Z node 7 :SQS TRACE: service.cpp:1483: Dec local leader ref for actor [7:7521669866903279664:3639]. Found: 1 2025-06-30T09:20:36.762100Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [349a3ff2-fd6d5415-c36c9a0-9cff7f42] HandleResponse: { ListQueueTags { RequestId: "349a3ff2-fd6d5415-c36c9a0-9cff7f42" } RequestId: "349a3ff2-fd6d5415-c36c9a0-9cff7f42" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true }, status: OK 2025-06-30T09:20:36.762117Z node 7 :SQS TRACE: queue_leader.cpp:2117: Infly load reply for shard [cloud4/000000000000000101v0/1]: { Status: 48 TxId: 281474976715712 Step: 1751275236809 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "createdTimestamp" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } Member { Name: "infly" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "DelayDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "RandomId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "VisibilityDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "inflyCount" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 3 } } } } } } } Member { Name: "inflyVersion" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "messageCount" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 3 } } } } } } } Member { Name: "readOffset" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } Value { Struct { Optional { Optional { Uint64: 1751275236654 } } } Struct { Optional { } } Struct { Optional { Optional { Int64: 0 } } } Struct { Optional { Uint64: 0 } } Struct { Optional { Optional { Int64: 0 } } } Struct { Optional { Optional { Uint64: 0 } } } } } } 2025-06-30T09:20:36.762117Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [349a3ff2-fd6d5415-c36c9a0-9cff7f42] Sending reply from proxy actor: { ListQueueTags { RequestId: "349a3ff2-fd6d5415-c36c9a0-9cff7f42" } RequestId: "349a3ff2-fd6d5415-c36c9a0-9cff7f42" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true } 2025-06-30T09:20:36.762123Z node 7 :SQS DEBUG: queue_leader.cpp:2146: Infly version for shard [cloud4/000000000000000101v0/1]: 0 2025-06-30T09:20:36.762167Z node 7 :SQS TRACE: queue_leader.cpp:2117: Infly load reply for shard [cloud4/000000000000000101v0/0]: { Status: 48 TxId: 281474976715714 Step: 1751275236809 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "createdTimestamp" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } Member { Name: "infly" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "DelayDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "RandomId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "VisibilityDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "inflyCount" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 3 } } } } } } } Member { Name: "inflyVersion" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "messageCount" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 3 } } } } } } } Member { Name: "readOffset" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } Value { Struct { Optional { Optional { Uint64: 1751275236654 } } } Struct { Optional { } } Struct { Optional { Optional { Int64: 0 } } } Struct { Optional { Uint64: 0 } } Struct { Optional { Optional { Int64: 0 } } } Struct { Optional { Optional { Uint64: 0 } } } } } } 2025-06-30T09:20:36.762174Z node 7 :SQS DEBUG: queue_leader.cpp:2146: Infly version for shard [cloud4/000000000000000101v0/0]: 0 2025-06-30T09:20:36.762183Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [ListQueueTags] requestId [349a3ff2-fd6d5415-c36c9a0-9cff7f42] Got succesfult GRPC response. 2025-06-30T09:20:36.762200Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListQueueTags] requestId [349a3ff2-fd6d5415-c36c9a0-9cff7f42] reply ok 2025-06-30T09:20:36.762260Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [ListQueueTags] requestId [349a3ff2-fd6d5415-c36c9a0-9cff7f42] Send metering event. HttpStatusCode: 200 IsFifo: 1 FolderId: folder4 RequestSizeInBytes: 530 ResponseSizeInBytes: 178 SourceAddress: 18d9:aef9:4411:0:d9:aef9:4411:0 ResourceId: 000000000000000301v0 Action: ListQueueTags 2025-06-30T09:20:36.762296Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:49060) <- (200 , 2 bytes) 2025-06-30T09:20:36.762337Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:49060) connection closed Http output full {} >> BuildStatsHistogram::Single [GOOD] >> TGRpcCmsTest::AlterRemoveTest [GOOD] >> BuildStatsHistogram::Single_Slices >> TGRpcCmsTest::RemoveWithAnotherTokenTest [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::AuthTokenTest [GOOD] Test command err: 2025-06-30T09:20:36.125176Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669868371900162:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:36.125201Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004886/r3tmp/tmpUFcgzG/pdisk_1.dat 2025-06-30T09:20:36.197572Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:36.211306Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 25468, node 1 2025-06-30T09:20:36.219043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:36.219060Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:36.219063Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:36.219122Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:36.225759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:36.225801Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:36.227657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2378 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:36.272598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:36.296959Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7521669868371900860:2274], Recipient [1:7521669868371900560:2209]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:35476" } 2025-06-30T09:20:36.297000Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-30T09:20:36.297013Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.297018Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.297068Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:35476" 2025-06-30T09:20:36.297151Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1751275236296717) 2025-06-30T09:20:36.297308Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1751275236296717 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-30T09:20:36.297380Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-30T09:20:36.299143Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-30T09:20:36.299357Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236296717&action=1" } } } 2025-06-30T09:20:36.299404Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.299435Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-30T09:20:36.299484Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-30T09:20:36.299677Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-30T09:20:36.299710Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-30T09:20:36.300767Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7521669868371900869:2275], Recipient [1:7521669868371900560:2209]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236296717&action=1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" } 2025-06-30T09:20:36.300781Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-30T09:20:36.300848Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236296717&action=1" } } 2025-06-30T09:20:36.301449Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-30T09:20:36.301469Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:36.301482Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7521669868371900865:2209], Recipient [1:7521669868371900560:2209]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:36.301486Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:36.301492Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.301494Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.301505Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-30T09:20:36.301511Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-30T09:20:36.301530Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-30T09:20:36.302861Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-30T09:20:36.302876Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.302879Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.302881Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.302897Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-30T09:20:36.302905Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1751275236296717 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:36.304240Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-30T09:20:36.304293Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.304342Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-30T09:20:36.304348Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-30T09:20:36.305394Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "Root" 2025-06-30T09:20:36.305910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard ... TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-30T09:20:36.548338Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7521669868371902088:2500], Recipient [1:7521669868371900560:2209]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:35476" } 2025-06-30T09:20:36.548348Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-30T09:20:36.548357Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.548384Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7521669868371900457:2199], Recipient [1:7521669868371900560:2209]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.548393Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-30T09:20:36.548480Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-30T09:20:36.549099Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7521669868371902092:2501], Recipient [1:7521669868371900560:2209]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:35476" } 2025-06-30T09:20:36.549109Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-30T09:20:36.549119Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.549178Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7521669868371900457:2199], Recipient [1:7521669868371900560:2209]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.549182Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-30T09:20:36.549287Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-30T09:20:36.549798Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7521669868371902099:2502], Recipient [1:7521669868371900560:2209]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:35476" } 2025-06-30T09:20:36.549810Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-30T09:20:36.549819Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.549833Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715659 2025-06-30T09:20:36.549852Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-30T09:20:36.549859Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-30T09:20:36.549905Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7521669868371900457:2199], Recipient [1:7521669868371900560:2209]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.549912Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-30T09:20:36.550003Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-30T09:20:36.550027Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435076, Sender [1:7521669868371900967:2209], Recipient [1:7521669868371900560:2209]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-30T09:20:36.550035Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:979: StateWork, processing event TEvPrivate::TEvSubdomainReady 2025-06-30T09:20:36.550041Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.550043Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.550055Z node 1 :CMS_TENANTS DEBUG: console__update_confirmed_subdomain.cpp:22: TTxUpdateConfirmedSubdomain for tenant /Root/users/user-1 to 2 2025-06-30T09:20:36.550063Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=RUNNING txid=1751275236296717 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:36.550087Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2913: Update database for /Root/users/user-1 confirmedsubdomain=2 2025-06-30T09:20:36.550580Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7521669868371902103:2503], Recipient [1:7521669868371900560:2209]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:35476" } 2025-06-30T09:20:36.550589Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-30T09:20:36.550597Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.550638Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7521669868371900457:2199], Recipient [1:7521669868371900560:2209]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.550645Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-30T09:20:36.550769Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-30T09:20:36.553105Z node 1 :CMS_TENANTS DEBUG: console__update_confirmed_subdomain.cpp:42: TTxUpdateConfirmedSubdomain complete for /Root/users/user-1 2025-06-30T09:20:36.553128Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.559317Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7521669868371902131:2516], Recipient [1:7521669868371900560:2209]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:35476" } 2025-06-30T09:20:36.559336Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-30T09:20:36.559350Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.559405Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7521669868371900457:2199], Recipient [1:7521669868371900560:2209]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.559408Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-30T09:20:36.559557Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: RUNNING required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } TClient is connected to server localhost:2378 TClient::Ls request: /Root/users/user-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root/users/user-1" PathId: 1 SchemeshardId: 72075186224037897 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72075186224037897 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 3 ProcessingParams { Version: 3 PlanReso... (TRUNCATED) 2025-06-30T09:20:36.590680Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-30T09:20:36.590846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected >> AggregateStatistics::ShouldBePings >> TExportToS3Tests::CancelUponTransferringManyTablesShouldSucceed [GOOD] >> TExportToS3Tests::CancelledExportEndTime >> TSchemeShardExtSubDomainTest::CreateAndWait [GOOD] >> TSchemeShardExtSubDomainTest::CreateItemsInsideExtSubdomainAtGSSwithoutTSS >> AggregateStatistics::ShouldBePings [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> DBase::KIKIMR_15598_Many_MemTables [GOOD] Test command err: 3 parts: [0:0:1:0:0:0:0] 167 rows, 7 pages, 1 levels: (91, 38) (166, 63) (325, 116) (394, 139) (481, 168) [0:0:2:0:0:0:0] 166 rows, 8 pages, 2 levels: (631, 218) (709, 244) (853, 292) (934, 319) (1087, 370) [0:0:3:0:0:0:0] 167 rows, 8 pages, 2 levels: (1156, 393) (1246, 423) (1396, 473) (1471, 498) (1633, 552) Checking BTree: adding part [0:0:1:0:0:0:0] data size (14.1KiB in total) adding group {0,0} PageId: 8 RowCount: 167 DataSize: 13685 ErasedRowCount: 0 LevelCount: 1 IndexSize: 371 added slice [0, 167) data size (13.4KiB - 0B) => 13.4KiB adding part [0:0:2:0:0:0:0] data size (14.6KiB in total) adding group {0,0} PageId: 11 RowCount: 166 DataSize: 14080 ErasedRowCount: 0 LevelCount: 2 IndexSize: 530 added slice [0, 166) data size (13.8KiB - 0B) => 27.1KiB adding part [0:0:3:0:0:0:0] data size (14.8KiB in total) adding group {0,0} PageId: 11 RowCount: 167 DataSize: 14255 ErasedRowCount: 0 LevelCount: 2 IndexSize: 530 added slice [0, 167) data size (13.9KiB - 0B) => 41KiB building histogram with row resolution 0, data size resolution 42B slicing part [0:0:1:0:0:0:0]: { {rows: [0, 166] keys: [{7, 10}, {553, 192}]} } slicing node Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 => take adding node future events -1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 slicing part [0:0:2:0:0:0:0]: { {rows: [0, 165] keys: [{556, 193}, {1087, 370}]} } slicing node Part: [0:0:2:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 14080 BeginKey: {556, 193} EndKey: {1087, 370} State: 0 => take adding node future events -1 Part: [0:0:2:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 14080 BeginKey: {556, 193} EndKey: {1087, 370} State: 0 slicing part [0:0:3:0:0:0:0]: { {rows: [0, 166] keys: [{1090, 371}, {1645, 556}]} } slicing node Part: [0:0:3:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14255 BeginKey: {1090, 371} EndKey: {1645, 556} State: 0 => take adding node future events -1 Part: [0:0:3:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14255 BeginKey: {1090, 371} EndKey: {1645, 556} State: 0 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 42 closedRowCount: 0 closedDataSize: 0 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 6 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 1 closedRowCount: 0 openedRowCount: 167 nextHistogramRowCount: 0 adding part [0:0:1:0:0:0:0] data size (14.1KiB in total) adding group {0,0} PageId: 8 RowCount: 167 DataSize: 13685 ErasedRowCount: 0 LevelCount: 1 IndexSize: 371 added slice [0, 167) data size (13.4KiB - 0B) => 13.4KiB adding part [0:0:2:0:0:0:0] data size (14.6KiB in total) adding group {0,0} PageId: 11 RowCount: 166 DataSize: 14080 ErasedRowCount: 0 LevelCount: 2 IndexSize: 530 added slice [0, 166) data size (13.8KiB - 0B) => 27.1KiB adding part [0:0:3:0:0:0:0] data size (14.8KiB in total) adding group {0,0} PageId: 11 RowCount: 167 DataSize: 14255 ErasedRowCount: 0 LevelCount: 2 IndexSize: 530 added slice [0, 167) data size (13.9KiB - 0B) => 41KiB building histogram with row resolution 0, data size resolution 42B slicing part [0:0:1:0:0:0:0]: { {rows: [0, 166] keys: [{7, 10}, {553, 192}]} } slicing node Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 => take adding node future events -1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 slicing part [0:0:2:0:0:0:0]: { {rows: [0, 165] keys: [{556, 193}, {1087, 370}]} } slicing node Part: [0:0:2:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 14080 BeginKey: {556, 193} EndKey: {1087, 370} State: 0 => take adding node future events -1 Part: [0:0:2:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 14080 BeginKey: {556, 193} EndKey: {1087, 370} State: 0 slicing part [0:0:3:0:0:0:0]: { {rows: [0, 166] keys: [{1090, 371}, {1645, 556}]} } slicing node Part: [0:0:3:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14255 BeginKey: {1090, 371} EndKey: {1645, 556} State: 0 => take adding node future events -1 Part: [0:0:3:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14255 BeginKey: {1090, 371} EndKey: {1645, 556} State: 0 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 42 closedRowCount: 0 closedDataSize: 0 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 6 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 1 closedRowCount: 0 openedRowCount: 167 nextHistogramRowCount: 0 adding event 0 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 1 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 1 Level: 0 BeginRowId: 25 EndRowId: 50 BeginDataSize: 1974 EndDataSize: 3992 BeginKey: {91, 38} EndKey: {166, 63} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 1 Level: 0 BeginRowId: 25 EndRowId: 50 BeginDataSize: 1974 EndDataSize: 3992 BeginKey: {91, 38} EndKey: {166, 63} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 2 Level: 0 BeginRowId: 50 EndRowId: 74 BeginDataSize: 3992 EndDataSize: 5889 BeginKey: {166, 63} EndKey: {253, 92} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 2 Level: 0 BeginRowId: 50 EndRowId: 74 BeginDataSize: 3992 EndDataSize: 5889 BeginKey: {166, 63} EndKey: {253, 92} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 3 Level: 0 BeginRowId: 74 EndRowId: 96 BeginDataSize: 5889 EndDataSize: 7868 BeginKey: {253, 92} EndKey: {325, 116} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 3 Level: 0 BeginRowId: 74 EndRowId: 96 BeginDataSize: 5889 EndDataSize: 7868 BeginKey: {253, 92} EndKey: {325, 116} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 4 Level: 0 BeginRowId: 96 EndRowId: 119 BeginDataSize: 7868 EndDataSize: 9910 BeginKey: {325, 116} EndKey: {394, 139} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 4 Level: 0 BeginRowId: 96 EndRowId: 119 BeginDataSize: 7868 EndDataSize: 9910 BeginKey: {325, 116} EndKey: {394, 139} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 5 Level: 0 BeginRowId: 119 EndRowId: 144 BeginDataSize: 9910 EndDataSize: 11938 BeginKey: {394, 139} EndKey: {481, 168} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 5 Level: 0 BeginRowId: 119 EndRowId: 144 BeginDataSize: 9910 EndDataSize: 11938 BeginKey: {394, 139} EndKey: {481, 168} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 6 Level: 0 BeginRowId: 144 EndRowId: 167 BeginDataSize: 11938 EndDataSize: 13685 BeginKey: {481, 168} EndKey: {553, 192} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 6 Level: 0 BeginRowId: 144 EndRowId: 167 BeginDataSize: 11938 EndDataSize: 13685 BeginKey: {481, 168} EndKey: {553, 192} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 1 closedRowCount: 0 openedRowCount: 25 nextHistogramRowCount: 0 loading node by data size triggerPart: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 3 closedDataSize: 0 openedDataSize: 1974 nextHistogramDataSize: 42 loading node by data size triggerPart: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 1 closedDataSize: 0 openedDataSize: 1974 nextHistogramDataSize: 42 checking stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 42 closedRowCount: 0 closedDataSize: 0 openedRowCount: 25 openedDataSize: 1974 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 18 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 3 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 42 closedRowCount: 0 closedDataSize: 0 openedRowCount: 25 openedDataSize: 1974 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 18 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 1 processing event IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 1 checking stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 42 closedRowCount: 25 closedDataSize: 1974 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 17 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 2 iterating stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 26 nextHistogramDataSize: 1975 closedRowCount: 25 closedDataSize: 1974 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 17 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 1 Level: 0 BeginRowId: 25 EndRowId: 50 BeginDataSize: 1974 EndDataSize: 3992 BeginKey: {91, 38} EndKey: {166, 63} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 1 Level: 0 BeginRowId: 25 EndRowId: 50 BeginDataSize: 1974 EndDataSize: 3992 BeginKey: {91, 38} EndKey: {166, 63} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 1 Level: 0 BeginRowId: 25 EndRowId: 50 BeginDataSize: 1974 EndDataSize: 3992 BeginKey: {91, 38} EndKey: {166, 63} State: 1 closedRowCount: 25 openedRowCount: 25 nextHistogramRowCount: 26 loading node by data size t ... : 41 closedRowCount: 0 closedDataSize: 0 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 6 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 1 closedRowCount: 0 openedRowCount: 167 nextHistogramRowCount: 0 loading node by data size triggerPart: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 1 closedDataSize: 0 openedDataSize: 13565 nextHistogramDataSize: 41 checking stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 41 closedRowCount: 0 closedDataSize: 0 openedRowCount: 167 openedDataSize: 13565 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 5 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 1 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 41 closedRowCount: 0 closedDataSize: 0 openedRowCount: 167 openedDataSize: 13565 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 5 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 1 processing event IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 1 checking stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 41 closedRowCount: 167 closedDataSize: 13565 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 4 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 2 iterating stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 168 nextHistogramDataSize: 13566 closedRowCount: 167 closedDataSize: 13565 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 4 currentKeyPointer: IsBegin: 1 Part: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 0 processing event IsBegin: 1 Part: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 0 loading node by row count triggerPart: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 1 closedRowCount: 167 openedRowCount: 166 nextHistogramRowCount: 168 loading node by data size triggerPart: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 1 closedDataSize: 13565 openedDataSize: 13940 nextHistogramDataSize: 13566 checking stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 168 nextHistogramDataSize: 13566 closedRowCount: 167 closedDataSize: 13565 openedRowCount: 166 openedDataSize: 13940 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 3 currentKeyPointer: IsBegin: 1 Part: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 1 iterating stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 168 nextHistogramDataSize: 13566 closedRowCount: 167 closedDataSize: 13565 openedRowCount: 166 openedDataSize: 13940 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 3 currentKeyPointer: IsBegin: 0 Part: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 1 processing event IsBegin: 0 Part: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 1 checking stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 168 nextHistogramDataSize: 13566 closedRowCount: 333 closedDataSize: 27505 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 2 currentKeyPointer: IsBegin: 0 Part: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 2 iterating stats.RowCountHistogram: 2 stats.DataSizeHistogram: 2 nextHistogramRowCount: 334 nextHistogramDataSize: 27506 closedRowCount: 333 closedDataSize: 27505 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 2 currentKeyPointer: IsBegin: 1 Part: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 0 processing event IsBegin: 1 Part: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 0 loading node by row count triggerPart: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 1 closedRowCount: 333 openedRowCount: 167 nextHistogramRowCount: 334 loading node by data size triggerPart: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 1 closedDataSize: 27505 openedDataSize: 14115 nextHistogramDataSize: 27506 checking stats.RowCountHistogram: 2 stats.DataSizeHistogram: 2 nextHistogramRowCount: 334 nextHistogramDataSize: 27506 closedRowCount: 333 closedDataSize: 27505 openedRowCount: 167 openedDataSize: 14115 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 1 currentKeyPointer: IsBegin: 1 Part: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 1 iterating stats.RowCountHistogram: 2 stats.DataSizeHistogram: 2 nextHistogramRowCount: 334 nextHistogramDataSize: 27506 closedRowCount: 333 closedDataSize: 27505 openedRowCount: 167 openedDataSize: 14115 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 1 currentKeyPointer: IsBegin: 0 Part: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 1 processing event IsBegin: 0 Part: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 1 checking stats.RowCountHistogram: 2 stats.DataSizeHistogram: 2 nextHistogramRowCount: 334 nextHistogramDataSize: 27506 closedRowCount: 500 closedDataSize: 41620 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 0 currentKeyPointer: IsBegin: 0 Part: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 2 finished stats.RowCountHistogram: 2 stats.DataSizeHistogram: 2 nextHistogramRowCount: 334 nextHistogramDataSize: 27506 closedRowCount: 500 closedDataSize: 41620 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 0 Touched 0% bytes, 0 pages RowCountHistogram: 33% (actual 33%) key = (553, 192) value = 167 (actual 166 - 0% error) 33% (actual 33%) key = (1087, 370) value = 333 (actual 332 - 0% error) 33% (actual 33%) DataSizeHistogram: 32% (actual 32%) key = (553, 192) value = 13565 (actual 13565 - 0% error) 33% (actual 33%) key = (1087, 370) value = 27505 (actual 27505 - 0% error) 33% (actual 33%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 33% (actual 33%) key = (556, 193) value = 167 (actual 167 - 0% error) 33% (actual 33%) key = (1090, 371) value = 333 (actual 333 - 0% error) 33% (actual 33%) DataSizeHistogram: 32% (actual 32%) key = (556, 193) value = 13565 (actual 13565 - 0% error) 33% (actual 33%) key = (1090, 371) value = 27505 (actual 27505 - 0% error) 33% (actual 33%) Checking Mixed: Touched 0% bytes, 0 pages RowCountHistogram: 100% (actual 100%) DataSizeHistogram: 100% (actual 100%) Got : 24000 2106439 49449 38 44 Expected: 24000 2106439 49449 38 44 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 49449 20 23 Expected: 12816 1121048 49449 20 23 Got : 24000 3547100 81694 64 44 Expected: 24000 3547100 81694 64 44 { [1012, 1475), [1682, 1985), [2727, 3553), [3599, 3992), [5397, 7244), [9181, 9807), [9993, 10178), [12209, 14029), [15089, 15342), [16198, 16984), [17238, 18436), [21087, 21876), [23701, 23794) } Got : 9582 1425198 81694 26 17 Expected: 9582 1425198 81694 26 17 Got : 24000 2460139 23760 42 41 Expected: 24000 2460139 23760 42 41 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 23760 18 18 Expected: 10440 1060798 23760 18 18 Got : 24000 4054050 46562 68 43 Expected: 24000 4054050 46562 68 43 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2277890 46562 38 24 Expected: 13570 2277890 46562 38 24 Got : 24000 2106459 49449 38 44 Expected: 24000 2106459 49449 38 44 Got : 24000 2460219 23555 41 41 Expected: 24000 2460219 23555 41 41 Got : 24000 4054270 46543 66 43 Expected: 24000 4054270 46543 66 43 Got : 24000 2106479 49555 38 44 Expected: 24000 2106479 49555 38 44 Got : 24000 2460259 23628 41 41 Expected: 24000 2460259 23628 41 41 Got : 24000 4054290 46640 65 43 Expected: 24000 4054290 46640 65 43 Got : 24000 2106439 66674 3 4 Expected: 24000 2106439 66674 3 4 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 66674 2 2 Expected: 12816 1121048 66674 2 2 Got : 24000 2460139 33541 4 4 Expected: 24000 2460139 33541 4 4 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 33541 1 1 Expected: 10440 1060798 33541 1 1 Got : 24000 4054050 64742 7 4 Expected: 24000 4054050 64742 7 4 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2234982 64742 4 2 Expected: 13570 2234982 64742 4 2 ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::SimpleTenantsTestSyncOperation [GOOD] Test command err: 2025-06-30T09:20:36.522163Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669868573670080:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:36.522199Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004881/r3tmp/tmpxNX1QB/pdisk_1.dat 2025-06-30T09:20:36.614990Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:36.625436Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:36.625461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:36.626918Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 3050, node 1 2025-06-30T09:20:36.627319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:36.633015Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:36.633028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:36.633030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:36.633081Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12461 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:36.668270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:36.687073Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7521669868573670729:2273], Recipient [1:7521669868573670534:2191]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { operation_params { operation_mode: SYNC } path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:55658" } 2025-06-30T09:20:36.687096Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-30T09:20:36.687104Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.687107Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.687140Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { operation_params { operation_mode: SYNC } path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:55658" 2025-06-30T09:20:36.687183Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1751275236687017) 2025-06-30T09:20:36.687306Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1751275236687017 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-30T09:20:36.687368Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-30T09:20:36.688686Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-30T09:20:36.688819Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236687017&action=1" } } } 2025-06-30T09:20:36.688872Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.688900Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-30T09:20:36.688936Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-30T09:20:36.688976Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285139, Sender [1:7521669868573670729:2273], Recipient [1:7521669868573670534:2191]: NKikimr::NConsole::TEvConsole::TEvNotifyOperationCompletionRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236687017&action=1" } UserToken: "" PeerName: "ipv6:[::1]:55658" } 2025-06-30T09:20:36.688984Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:968: StateWork, processing event TEvConsole::TEvNotifyOperationCompletionRequest 2025-06-30T09:20:36.689075Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:3443: Add subscription to /Root/users/user-1 for [1:7521669868573670729:2273] 2025-06-30T09:20:36.689092Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3451: Send TEvConsole::TEvNotifyOperationCompletionResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236687017&action=1" } } 2025-06-30T09:20:36.690160Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 4 2025-06-30T09:20:36.690208Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-30T09:20:36.692180Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-30T09:20:36.692195Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:36.692207Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7521669868573670735:2191], Recipient [1:7521669868573670534:2191]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:36.692211Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:36.692216Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.692219Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.692229Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-30T09:20:36.692242Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-30T09:20:36.692260Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-30T09:20:36.693761Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-30T09:20:36.693773Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.693775Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.693777Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.693795Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-30T09:20:36.693804Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1751275236687017 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:36.695192Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-30T09:20:36.695249Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.695263Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-30T09:20:36.695269Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-30T09:20:36.696292Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-30T09:20:36.696825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:20:36.697659Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/us ... onsole_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976710660 2025-06-30T09:20:36.950269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5503: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 3] by tx: 281474976710660 2025-06-30T09:20:36.950903Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976710660 2025-06-30T09:20:36.961206Z node 3 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72075186224037888 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found - using supplied 72075186224037888 2025-06-30T09:20:36.961993Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976710660 2025-06-30T09:20:36.962010Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-30T09:20:36.962025Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-30T09:20:36.962063Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7521669868573672116:2191], Recipient [1:7521669868573670534:2191]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-30T09:20:36.962073Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-30T09:20:36.962081Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.962083Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.962092Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-30T09:20:36.962100Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1751275236947121 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:36.962122Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1751275236947121 issue= 2025-06-30T09:20:36.964948Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-30T09:20:36.964981Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.964991Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.965089Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7521669868573670393:2208], Recipient [1:7521669868573670534:2191]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.965101Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-30T09:20:36.965111Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.965117Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.965125Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-30T09:20:36.965132Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1751275236947121 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:36.965479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:39: Got DeleteTabletReply with Forward response from Hive 72075186224037888 to Hive 72057594037968897 shardIdx 72057594046644480:1 2025-06-30T09:20:36.966723Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-30T09:20:36.966752Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.966761Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-30T09:20:36.966805Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-30T09:20:36.966993Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-06-30T09:20:36.967020Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-06-30T09:20:36.968098Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-30T09:20:36.967773Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-30T09:20:36.967788Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-30T09:20:36.967791Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037895 not found 2025-06-30T09:20:36.967794Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-30T09:20:36.967797Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037896 not found 2025-06-30T09:20:36.967799Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037897 not found 2025-06-30T09:20:36.967802Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-30T09:20:36.967805Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037893 not found 2025-06-30T09:20:36.967808Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037894 not found 2025-06-30T09:20:36.968764Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-06-30T09:20:36.968796Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7521669868573672202:2191], Recipient [1:7521669868573670534:2191]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-30T09:20:36.968817Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-30T09:20:36.968822Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.968824Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.968834Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-30T09:20:36.968841Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-30T09:20:36.970558Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-30T09:20:36.970564Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.970566Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.970567Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.970582Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1751275236947121 2025-06-30T09:20:36.970584Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1751275236947121 issue= 2025-06-30T09:20:36.970586Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1751275236947121 issue= 2025-06-30T09:20:36.970587Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-30T09:20:36.970606Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1751275236947121 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:36.971657Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-30T09:20:36.971718Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2431: Send /Root/users/user-1 notification to [1:7521669868573672076:2588]: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236947121&action=2" ready: true status: SUCCESS } } 2025-06-30T09:20:36.971749Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.972850Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7521669868573672258:2594], Recipient [1:7521669868573670534:2191]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "" PeerName: "ipv6:[::1]:55658" } 2025-06-30T09:20:36.972864Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-30T09:20:36.972916Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3377: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: NOT_FOUND issues { message: "Unknown tenant /Root/users/user-1" severity: 1 } } } 2025-06-30T09:20:36.973489Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285123, Sender [1:7521669868573672261:2595], Recipient [1:7521669868573670534:2191]: NKikimr::NConsole::TEvConsole::TEvListTenantsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:55658" } 2025-06-30T09:20:36.973499Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:967: StateWork, processing event TEvConsole::TEvListTenantsRequest 2025-06-30T09:20:36.973556Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3421: Send TEvConsole::TEvListTenantsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.ListDatabasesResult] { } } } } 2025-06-30T09:20:36.974509Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-30T09:20:36.974564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected >> TGRpcCmsTest::DisabledTxTest [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-ExternalHive >> TFlatTableExecutor_VersionedRows::TestVersionedRowsLargeBlobs [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2NoRestart >> TPartBtreeIndexIteration::NoNodes_Groups [GOOD] |80.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/ut_aggregation/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::AlterRemoveTest [GOOD] Test command err: 2025-06-30T09:20:36.549640Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669867402610217:2189];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:36.550236Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004871/r3tmp/tmpfgNryD/pdisk_1.dat 2025-06-30T09:20:36.608949Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7649, node 1 2025-06-30T09:20:36.625926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:36.625939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:36.625942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:36.625995Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:36.647361Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:36.647396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:36.649474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29103 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:36.684985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:36.708986Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7521669867402610787:2274], Recipient [1:7521669867402610518:2194]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:43448" } 2025-06-30T09:20:36.709007Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-30T09:20:36.709014Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.709018Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.709054Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:43448" 2025-06-30T09:20:36.709108Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1751275236708703) 2025-06-30T09:20:36.709218Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1751275236708703 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-30T09:20:36.709276Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-30T09:20:36.711000Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-30T09:20:36.711277Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236708703&action=1" } } } 2025-06-30T09:20:36.711323Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.711353Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-30T09:20:36.711399Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-30T09:20:36.711556Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-30T09:20:36.711591Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-30T09:20:36.712408Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7521669867402610795:2275], Recipient [1:7521669867402610518:2194]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236708703&action=1" } UserToken: "" } 2025-06-30T09:20:36.712418Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-30T09:20:36.712446Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236708703&action=1" } } 2025-06-30T09:20:36.719036Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-30T09:20:36.719056Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:36.719074Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7521669867402610792:2194], Recipient [1:7521669867402610518:2194]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:36.719078Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:36.719084Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.719087Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.719101Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-30T09:20:36.719109Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-30T09:20:36.719130Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-30T09:20:36.721786Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-30T09:20:36.721806Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.721809Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.721811Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.721866Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-30T09:20:36.721884Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1751275236708703 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:36.723093Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-30T09:20:36.723137Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.723153Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-30T09:20:36.723168Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-30T09:20:36.723902Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-30T09:20:36.724471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:20:36.725869Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 2025-06-30T09:20:36.725889Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976715658 2025-06-30T09:20:36.726984Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Roo ... MS_TENANTS TRACE: console_tenants_manager.cpp:651: TSubdomainManip(/Root/users/user-1) send subdomain drop cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root/users" OperationType: ESchemeOpForceDropExtSubDomain Drop { Name: "user-1" } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-30T09:20:36.769024Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7521669867402610972:2282], Recipient [1:7521669867402610518:2194]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236766525&action=2" } UserToken: "" } 2025-06-30T09:20:36.769038Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-30T09:20:36.769087Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236766525&action=2" } } 2025-06-30T09:20:36.769253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5503: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 3] by tx: 281474976715660 2025-06-30T09:20:36.769344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpForceDropExtSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp:309) 2025-06-30T09:20:36.770369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5503: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 3] by tx: 281474976715660 2025-06-30T09:20:36.770516Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715659 2025-06-30T09:20:36.770518Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-30T09:20:36.770525Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-30T09:20:36.770545Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-30T09:20:36.770548Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.770561Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976715660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 2025-06-30T09:20:36.770567Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976715660 2025-06-30T09:20:36.770577Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435076, Sender [1:7521669867402610894:2194], Recipient [1:7521669867402610518:2194]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-30T09:20:36.770579Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:979: StateWork, processing event TEvPrivate::TEvSubdomainReady 2025-06-30T09:20:36.770582Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:3661: Ignoring ready subdomain for tenant /Root/users/user-1 in REMOVING_SUBDOMAIN state 2025-06-30T09:20:36.771243Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976715660 2025-06-30T09:20:36.774656Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715660 2025-06-30T09:20:36.774660Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-30T09:20:36.774670Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-30T09:20:36.774815Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7521669867402610967:2194], Recipient [1:7521669867402610518:2194]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-30T09:20:36.774820Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-30T09:20:36.774825Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.774826Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.774838Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-30T09:20:36.774844Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1751275236766525 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:36.774863Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1751275236766525 issue= 2025-06-30T09:20:36.776189Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-30T09:20:36.776205Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.776213Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.776271Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7521669867402610375:2196], Recipient [1:7521669867402610518:2194]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.776276Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-30T09:20:36.776282Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.776284Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.776296Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-30T09:20:36.776310Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1751275236766525 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:36.777856Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-30T09:20:36.777877Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.777889Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-30T09:20:36.777932Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-30T09:20:36.778084Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-06-30T09:20:36.778106Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-06-30T09:20:36.780349Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-06-30T09:20:36.780393Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7521669867402611062:2194], Recipient [1:7521669867402610518:2194]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-30T09:20:36.780411Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-30T09:20:36.780416Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.780418Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.780433Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-30T09:20:36.780438Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-30T09:20:36.782096Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-30T09:20:36.782112Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.782115Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.782117Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.782139Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1751275236766525 2025-06-30T09:20:36.782142Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1751275236766525 issue= 2025-06-30T09:20:36.782146Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1751275236766525 issue= 2025-06-30T09:20:36.782148Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-30T09:20:36.782178Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1751275236766525 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:36.783628Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-30T09:20:36.783668Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.821482Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7521669867402611082:2285], Recipient [1:7521669867402610518:2194]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236766525&action=2" } UserToken: "" } 2025-06-30T09:20:36.821496Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-30T09:20:36.821534Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236766525&action=2" ready: true status: SUCCESS } } >> TPartBtreeIndexIteration::NoNodes_History >> KqpRm::NotEnoughExecutionUnits >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2NoRestart [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2 [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1 [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1ToSchema2 [GOOD] >> TGenCompaction::OverloadFactorDuringForceCompaction ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::RemoveWithAnotherTokenTest [GOOD] Test command err: 2025-06-30T09:20:36.342900Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669866331007738:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:36.342929Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004884/r3tmp/tmpdvqmys/pdisk_1.dat 2025-06-30T09:20:36.444283Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:36.445936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:36.445965Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:36.447712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:36.454918Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 31220, node 1 2025-06-30T09:20:36.460908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:36.460925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:36.460927Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:36.460975Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18640 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:36.510265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:18640 2025-06-30T09:20:36.539068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:20:36.555568Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7521669866331008441:2274], Recipient [1:7521669866331008171:2206]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)" PeerName: "ipv6:[::1]:34830" } 2025-06-30T09:20:36.555587Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-30T09:20:36.555597Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.555600Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.555631Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)" PeerName: "ipv6:[::1]:34830" 2025-06-30T09:20:36.555678Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1751275236555005) 2025-06-30T09:20:36.555791Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1751275236555005 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-30T09:20:36.555848Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-30T09:20:36.557278Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-30T09:20:36.557473Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236555005&action=1" } } } 2025-06-30T09:20:36.557513Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.557538Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-30T09:20:36.557582Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-30T09:20:36.557816Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-30T09:20:36.557884Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-30T09:20:36.559310Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-30T09:20:36.559321Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:36.559339Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7521669866331008446:2206], Recipient [1:7521669866331008171:2206]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:36.559344Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-30T09:20:36.559349Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.559352Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.559368Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-30T09:20:36.559374Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-30T09:20:36.559399Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-30T09:20:36.560769Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7521669866331008461:2276], Recipient [1:7521669866331008171:2206]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236555005&action=1" } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)" } 2025-06-30T09:20:36.560779Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-30T09:20:36.560821Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236555005&action=1" } } 2025-06-30T09:20:36.560923Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-30T09:20:36.560928Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.560930Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.560931Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.560972Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-30T09:20:36.560978Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1751275236555005 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-30T09:20:36.561874Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-30T09:20:36.561924Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.561941Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-30T09:20:36.561943Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-30T09:20:36.562602Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@w ... rationRequest 2025-06-30T09:20:36.904250Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236900667&action=2" } } 2025-06-30T09:20:36.904857Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976715663 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 2025-06-30T09:20:36.904875Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976715663 2025-06-30T09:20:36.905005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5503: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 3] by tx: 281474976715663 2025-06-30T09:20:36.905539Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976715663 2025-06-30T09:20:36.910074Z node 3 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72075186224037888 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found - using supplied 72075186224037888 2025-06-30T09:20:36.910219Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715663 2025-06-30T09:20:36.910231Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-30T09:20:36.910241Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-30T09:20:36.910277Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7521669866331009638:2206], Recipient [1:7521669866331008171:2206]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-30T09:20:36.910286Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-30T09:20:36.910293Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.910295Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.910307Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-30T09:20:36.910316Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1751275236900667 errorcode=UNAUTHORIZED issue=AccessDenied: Access denied for request 2025-06-30T09:20:36.910333Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1751275236900667 issue=AccessDenied: Access denied for request 2025-06-30T09:20:36.911510Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-30T09:20:36.911540Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.911557Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.911619Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7521669866331008026:2195], Recipient [1:7521669866331008171:2206]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-30T09:20:36.911631Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-30T09:20:36.911636Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.911637Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.911643Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-30T09:20:36.911654Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1751275236900667 errorcode=UNAUTHORIZED issue=AccessDenied: Access denied for request 2025-06-30T09:20:36.912999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:39: Got DeleteTabletReply with Forward response from Hive 72075186224037888 to Hive 72057594037968897 shardIdx 72057594046644480:1 2025-06-30T09:20:36.913667Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-30T09:20:36.913687Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.913704Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-30T09:20:36.913754Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-30T09:20:36.914191Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 2 } } Success: true ConfigTxSeqNo: 10 2025-06-30T09:20:36.914212Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 2 } } } 2025-06-30T09:20:36.914881Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037893 not found 2025-06-30T09:20:36.914899Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-30T09:20:36.914903Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037894 not found 2025-06-30T09:20:36.914906Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037897 not found 2025-06-30T09:20:36.914909Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-30T09:20:36.914912Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-30T09:20:36.914915Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037895 not found 2025-06-30T09:20:36.914918Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037896 not found 2025-06-30T09:20:36.914923Z node 3 :HIVE WARN: hive_impl.cpp:495: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-30T09:20:36.915908Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-30T09:20:36.916502Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 11 2025-06-30T09:20:36.916536Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7521669866331009713:2206], Recipient [1:7521669866331008171:2206]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-30T09:20:36.916553Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-30T09:20:36.916566Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.916569Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.916584Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-30T09:20:36.916593Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-30T09:20:36.916761Z node 1 :HIVE WARN: tx__block_storage_result.cpp:56: HIVE#72057594037968897 THive::TTxBlockStorageResult retrying for 72075186224037888 because of ERROR 2025-06-30T09:20:36.917960Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-30T09:20:36.917973Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-30T09:20:36.917976Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.917978Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-30T09:20:36.917995Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1751275236900667 2025-06-30T09:20:36.917997Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1751275236900667 issue=AccessDenied: Access denied for request 2025-06-30T09:20:36.918000Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1751275236900667 issue=AccessDenied: Access denied for request 2025-06-30T09:20:36.918002Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-30T09:20:36.918025Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1751275236900667 code=SUCCESS errorcode=UNAUTHORIZED issue=AccessDenied: Access denied for request 2025-06-30T09:20:36.923093Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-30T09:20:36.923131Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-30T09:20:36.955690Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7521669866331009748:2497], Recipient [1:7521669866331008171:2206]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236900667&action=2" } UserToken: "" } 2025-06-30T09:20:36.955707Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-30T09:20:36.955794Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1751275236900667&action=2" ready: true status: SUCCESS } } 2025-06-30T09:20:36.960864Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-30T09:20:36.960928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected >> TSchemeShardExtSubDomainTest::CreateItemsInsideExtSubdomainAtGSSwithoutTSS [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst >> TVersions::WreckHead [GOOD] >> TVersions::WreckHeadReverse >> KqpRm::SnapshotSharingByExchanger >> TGenCompaction::OverloadFactorDuringForceCompaction [GOOD] >> TGenCompaction::ForcedCompactionNoGenerations [GOOD] >> TGenCompaction::ForcedCompactionWithGenerations [GOOD] >> TGenCompaction::ForcedCompactionWithFinalParts [GOOD] >> TGenCompaction::ForcedCompactionByDeletedRows [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccData [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccDataRestart [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccDataBorrowed [GOOD] >> TIterator::Basics [GOOD] >> TIterator::External [GOOD] >> TIterator::Single >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBePings [GOOD] Test command err: 2025-06-30T09:20:37.737544Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-30T09:20:37.738464Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-30T09:20:37.840810Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 2 2025-06-30T09:20:37.840861Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-30T09:20:37.840873Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-30T09:20:37.841145Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:19:2055], server id = [0:0:0], tablet id = 2, status = ERROR 2025-06-30T09:20:37.841156Z node 2 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-30T09:20:37.841176Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:16:2056], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-30T09:20:37.841181Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-30T09:20:37.841189Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 2 2025-06-30T09:20:37.841197Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::DisabledTxTest [GOOD] Test command err: 2025-06-30T09:20:37.069531Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669870375059588:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:37.069628Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004856/r3tmp/tmpQStFDh/pdisk_1.dat 2025-06-30T09:20:37.172570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:37.172607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:37.173041Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:37.192699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:37.210107Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 13819, node 1 2025-06-30T09:20:37.216983Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:37.216998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:37.216999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:37.217038Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27808 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:37.264803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:37.327951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) 2025-06-30T09:20:37.340707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) >> TIterator::Single [GOOD] >> TIterator::SingleReverse >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst-ExternalHive >> BasicUsage::TWriteSession_WriteEncoded [GOOD] >> CompressExecutor::TestReorderedExecutor >> KqpRm::NotEnoughExecutionUnits [GOOD] >> TIterator::SingleReverse [GOOD] >> TIterator::Mixed >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst >> KqpRm::ManyTasks >> BuildStatsHistogram::Single_Slices [GOOD] >> BuildStatsHistogram::Single_History >> TExportToS3Tests::CancelledExportEndTime [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent >> KqpRm::NodesMembershipByExchanger >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-false >> KqpRm::SingleTask ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/unittest >> TPQTest::TestStatusWithMultipleConsumers [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] 2025-06-30T09:16:52.232293Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:52.232324Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927938 is [1:157:2176] sender: [1:158:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-30T09:16:52.237155Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:52.240478Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-30T09:16:52.240807Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-30T09:16:52.241744Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-30T09:16:52.242362Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-30T09:16:52.242911Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:190:2201] 2025-06-30T09:16:52.251978Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|24df9f3b-8e2e740e-32ac2083-dfff7abc_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 2025-06-30T09:16:52.252153Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|c0de13ad-e9cecc62-1f36a4d6-94a86f2c_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-06-30T09:16:52.262089Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|fb770550-bcf95fb4-66c85d39-d5f1892c_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:112:2057] recipient: [2:105:2137] 2025-06-30T09:16:52.670209Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:52.670252Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:158:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:181:2057] recipient: [2:14:2061] 2025-06-30T09:16:52.675676Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:52.676002Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 2 actor [2:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-30T09:16:52.676150Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:187:2198] 2025-06-30T09:16:52.677020Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:187:2198] 2025-06-30T09:16:52.677548Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:188:2199] 2025-06-30T09:16:52.678193Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:188:2199] 2025-06-30T09:16:52.680337Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|3781899-7f7b9f33-f5d90fa0-d17bf2a0_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 2025-06-30T09:16:52.680459Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|196c6e32-8302fd12-365f9e98-40d37b52_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-06-30T09:16:52.683496Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|be4168eb-311f97da-c26a628f-62e7dca_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:112:2057] recipient: [3:105:2137] 2025-06-30T09:16:52.932050Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:52.932083Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927938 is [3:157:2176] sender: [3:158:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:183:2057] recipient: [3:14:2061] 2025-06-30T09:16:52.943592Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:52.943877Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 3 actor [3:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-06-30T09:16:52.944107Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:189:2200] 2025-06-30T09:16:52.944790Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [3:189:2200] 2025-06-30T09:16:52.945301Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [3:190:2201] 2025-06-30T09:16:52.945769Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [3:190:2201] 2025-06-30T09:16:52.952105Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|49996713-5b1f09e5-a290f1be-ba9adb8d_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 2025-06-30T09:16:52.952224Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|f54a0cb-1af51264-9495b3df-e5c243a5_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-06-30T09:16:52.954984Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|83a112c9-46a88187-e6d2e035-f7a14349_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:107:2057] recipient: [4:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:107:2057] recipient: [4:105:2137] Leader for TabletID 72057594037927937 is [4:111:2141] sender: [4:112:2057] recipient: [4:105:2137] 2025-06-30T09:16:53.207680Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:53.207710Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:153:2057] recipient: [4:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:153:2057] recipient: [4:151:2172] Leader for TabletID 72057594037927938 is [4:157:2176] sender: [4:158:2057] recipient: [4:151:2172] Leader for TabletID 72057594037927937 is [4:111:2141] sender: [4:183:2057] recipient: [4:14:2061] 2025-06-30T09:16:53.212424Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:53.212659Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 4 actor [4:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 4 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 4 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 4 Important: false } 2025-06-30T09:16:53.212839Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: State ... 2:463:2455] connected; active server actors: 1 2025-06-30T09:20:37.330372Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:468:2460] connected; active server actors: 1 2025-06-30T09:20:37.330698Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:473:2465] connected; active server actors: 1 2025-06-30T09:20:37.331109Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:478:2470] connected; active server actors: 1 2025-06-30T09:20:37.331474Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:483:2475] connected; active server actors: 1 2025-06-30T09:20:37.331814Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:488:2480] connected; active server actors: 1 2025-06-30T09:20:37.332233Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:493:2485] connected; active server actors: 1 2025-06-30T09:20:37.332608Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:498:2490] connected; active server actors: 1 2025-06-30T09:20:37.332984Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:503:2495] connected; active server actors: 1 2025-06-30T09:20:37.333341Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:508:2500] connected; active server actors: 1 2025-06-30T09:20:37.333689Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:513:2505] connected; active server actors: 1 2025-06-30T09:20:37.334185Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:518:2510] connected; active server actors: 1 2025-06-30T09:20:37.334595Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:523:2515] connected; active server actors: 1 2025-06-30T09:20:37.335090Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:528:2520] connected; active server actors: 1 2025-06-30T09:20:37.335541Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:533:2525] connected; active server actors: 1 2025-06-30T09:20:37.335975Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:538:2530] connected; active server actors: 1 2025-06-30T09:20:37.336410Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:543:2535] connected; active server actors: 1 2025-06-30T09:20:37.336840Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:548:2540] connected; active server actors: 1 2025-06-30T09:20:37.337232Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:553:2545] connected; active server actors: 1 2025-06-30T09:20:37.337674Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:558:2550] connected; active server actors: 1 2025-06-30T09:20:37.338102Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:563:2555] connected; active server actors: 1 2025-06-30T09:20:37.338500Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:568:2560] connected; active server actors: 1 2025-06-30T09:20:37.338928Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:573:2565] connected; active server actors: 1 2025-06-30T09:20:37.339340Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:578:2570] connected; active server actors: 1 2025-06-30T09:20:37.339758Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:583:2575] connected; active server actors: 1 2025-06-30T09:20:37.340173Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:588:2580] connected; active server actors: 1 2025-06-30T09:20:37.340625Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:593:2585] connected; active server actors: 1 2025-06-30T09:20:37.341061Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:598:2590] connected; active server actors: 1 2025-06-30T09:20:37.341511Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:603:2595] connected; active server actors: 1 2025-06-30T09:20:37.341956Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:608:2600] connected; active server actors: 1 2025-06-30T09:20:37.342367Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:613:2605] connected; active server actors: 1 2025-06-30T09:20:37.342828Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:618:2610] connected; active server actors: 1 2025-06-30T09:20:37.343285Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:623:2615] connected; active server actors: 1 2025-06-30T09:20:37.343697Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:628:2620] connected; active server actors: 1 2025-06-30T09:20:37.344147Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:633:2625] connected; active server actors: 1 2025-06-30T09:20:37.344629Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:638:2630] connected; active server actors: 1 2025-06-30T09:20:37.345036Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:643:2635] connected; active server actors: 1 2025-06-30T09:20:37.345488Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:648:2640] connected; active server actors: 1 2025-06-30T09:20:37.345919Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:653:2645] connected; active server actors: 1 2025-06-30T09:20:37.346345Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:658:2650] connected; active server actors: 1 2025-06-30T09:20:37.346786Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:663:2655] connected; active server actors: 1 2025-06-30T09:20:37.347201Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:668:2660] connected; active server actors: 1 2025-06-30T09:20:37.347630Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:673:2665] connected; active server actors: 1 2025-06-30T09:20:37.348053Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:678:2670] connected; active server actors: 1 2025-06-30T09:20:37.349039Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:683:2675] connected; active server actors: 1 2025-06-30T09:20:37.349861Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:688:2680] connected; active server actors: 1 2025-06-30T09:20:37.350407Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:693:2685] connected; active server actors: 1 2025-06-30T09:20:37.350778Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:698:2690] connected; active server actors: 1 2025-06-30T09:20:37.352298Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:703:2695] connected; active server actors: 1 2025-06-30T09:20:37.352972Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:708:2700] connected; active server actors: 1 2025-06-30T09:20:37.353533Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:713:2705] connected; active server actors: 1 2025-06-30T09:20:37.354023Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:718:2710] connected; active server actors: 1 2025-06-30T09:20:37.354442Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:723:2715] connected; active server actors: 1 2025-06-30T09:20:37.354910Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:728:2720] connected; active server actors: 1 2025-06-30T09:20:37.355320Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:733:2725] connected; active server actors: 1 2025-06-30T09:20:37.355737Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:738:2730] connected; active server actors: 1 2025-06-30T09:20:37.356181Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:743:2735] connected; active server actors: 1 2025-06-30T09:20:37.356631Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:748:2740] connected; active server actors: 1 2025-06-30T09:20:37.357049Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:753:2745] connected; active server actors: 1 2025-06-30T09:20:37.357472Z node 242 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [242:758:2750], now have 1 active actors on pipe 2025-06-30T09:20:37.357663Z node 242 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [242:761:2753], now have 1 active actors on pipe 2025-06-30T09:20:37.357750Z node 242 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [242:764:2756], now have 1 active actors on pipe 2025-06-30T09:20:37.357852Z node 242 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [242:767:2759] connected; active server actors: 1 ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NotEnoughExecutionUnits [GOOD] Test command err: 2025-06-30T09:20:38.131857Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:184:2154] Bootstrap 2025-06-30T09:20:38.162032Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:184:2154] Become StateWork (SchemeCache [1:192:2159]) 2025-06-30T09:20:38.162167Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:185:2104] Bootstrap 2025-06-30T09:20:38.163508Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:185:2104] Become StateWork (SchemeCache [2:194:2107]) 2025-06-30T09:20:38.169089Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:38.172528Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:38.172584Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:20:38.173036Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:38.173170Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:20:38.173226Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:38.173232Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:38.173273Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:20:38.177179Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:20:38.177229Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:20:38.177262Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:20:38.177284Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:38.177314Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:38.177357Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:38.214789Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:38.214838Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:38.225730Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:38.225789Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:38.225806Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:38.225819Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:38.225856Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:38.225867Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:38.225874Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:38.225884Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:38.236700Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:38.236745Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:38.247554Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:38.247616Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:20:38.247803Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:20:38.247821Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:20:38.250103Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:20:38.250123Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:20:38.250504Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-30T09:20:38.250878Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/004289/r3tmp/tmpKFcXG4/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } } } } 2025-06-30T09:20:38.250964Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/cl3w/004289/r3tmp/tmpKFcXG4/pdisk_1.dat 2025-06-30T09:20:38.250974Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/cl3w/004289/r3tmp/tmpKFcXG4/pdisk_1.dat 2025-06-30T09:20:38.251192Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-30T09:20:38.251292Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:20:38.251320Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:38.251337Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:38.251383Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:20:38.251404Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-30T09:20:38.252035Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:38.252104Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:38.263287Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-30T09:20:38.263616Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-30T09:20:38.266260Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-30T09:20:38.266468Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/cl3w/004289/r3tmp/tmpKFcXG4/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-30T09:20:38.266618Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/cl3w/004289/r3tmp/tmpKFcXG4/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/cl3w/004289/r3tmp/tmpKFcXG4/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 208938125458320107 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-30T09:20:38.266812Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:38.266880Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:38.266885Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:38.266916Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:38.266925Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:38.266949Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:38.266964Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:38.266967Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:38.266972Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:38.266980Z node 2 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:38.267051Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:38.267058Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:38.267062Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:38.267075Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:421:2309] 2025-06-30T09:20:38.267130Z node 2 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:38.267135Z node 2 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:38.267138Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:38.267148Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:423:2117] 2025-06-30T09:20:38.267739Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:38.267746Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:410:2305] 2025-06-30T09:20:38.267764Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:38.267767Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:411:2113] 2025-06-30T09:20:38.267968Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:421:2309]} 2025-06-30T09:20:38.267983Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:38.267991Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:38.267995Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:38.268114Z node 2 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:423:2117]} 2025-06-30T09:20:38.268152Z node 2 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:38.268157Z node 2 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:38.268159Z node 2 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:38.287436Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:38.287466Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:38.287472Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:38.287479Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_BTreeIndex >> TPartBtreeIndexIteration::NoNodes_History [GOOD] >> TPartBtreeIndexIteration::OneNode >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst >> TChargeBTreeIndex::NoNodes_Groups [GOOD] >> TChargeBTreeIndex::NoNodes_History >> KqpRm::NotEnoughMemory >> KqpRm::SingleSnapshotByExchanger >> KqpRm::ManyTasks [GOOD] >> KqpRm::Reduce >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst [GOOD] >> KqpRm::DisonnectNodes >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TIterator::Mixed [GOOD] >> TIterator::MixedReverse >> TExecutorDb::RandomOps [GOOD] >> TExecutorDb::FullScan >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-false |80.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |80.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |80.7%| [LD] {RESULT} $(B)/ydb/core/mind/ut/ydb-core-mind-ut >> KqpRm::SingleTask [GOOD] >> KqpJoinOrder::TPCHEveryQueryWorks-ColumnStore [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::CancelledExportEndTime [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:24.465936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:24.465963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.465969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:24.465975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:24.465987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:24.465991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:24.466001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.466014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:24.466118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:24.466187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:24.480098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:24.480118Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:24.482950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:24.483014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:24.483063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:24.484724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:24.484804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:24.484943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.484998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:24.487114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.487164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:24.487412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.487419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.487439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:24.487446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:24.487450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:24.487470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.488659Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:24.515355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:24.515453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.515546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:24.515556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:24.515613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:24.515632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:24.520197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.520258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:24.520347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.520361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:24.520369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:24.520376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:24.520959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.520973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:24.520981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:24.521417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.521429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.521447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.521457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:24.522341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:24.522796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:24.522852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:24.523053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.523083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:24.523091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.523159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:24.523169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.523212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:24.523228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:24.523655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.523665Z node 1 :FLAT_TX_SCHEMESHARD ... 4046678944 TestWaitNotification wait txId: 102 2025-06-30T09:20:38.570075Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:20:38.570096Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-30T09:20:38.571190Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/export-102" OperationType: ESchemeOpBackup Backup { TableName: "0" NumberOfRetries: 0 S3Settings { Endpoint: "localhost:18715" Scheme: HTTP Bucket: "" ObjectKeyPattern: "" AccessKey: "" SecretKey: "" StorageClass: STORAGE_CLASS_UNSPECIFIED UseVirtualAddressing: true } Table { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } NeedToBill: true SnapshotStep: 0 SnapshotTxId: 0 EnableChecksums: false EnablePermissions: false } Internal: true } TxId: 281474976710759 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:38.571307Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_backup_restore_common.h:586: TBackup Propose, path: /MyRoot/export-102/0, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:20:38.571348Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:20:38.571357Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976710759:0 type: TxBackup target path: [OwnerId: 72057594046678944, LocalPathId: 4] source path: 2025-06-30T09:20:38.571468Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976710759:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:38.571481Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpBackup, opId: 281474976710759:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h:563) 2025-06-30T09:20:38.571799Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:62: NotifyTxCompletion export in-flight, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:20:38.571810Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:20:38.572369Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 281474976710759, response: Status: StatusAccepted TxId: 281474976710759 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:38.572421Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710759, database: /MyRoot, subject: , status: StatusAccepted, operation: BACKUP TABLE, path: /MyRoot/export-102/0 2025-06-30T09:20:38.572510Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6788: Handle: TEvModifySchemeTransactionResult: txId# 281474976710759, status# StatusAccepted 2025-06-30T09:20:38.572521Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6790: Message: Status: StatusAccepted TxId: 281474976710759 SchemeshardId: 72057594046678944 2025-06-30T09:20:38.572594Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:20:38.572605Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710759:0 ProgressState, operation type: TxBackup, at tablet# 72057594046678944 2025-06-30T09:20:38.572612Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976710759:0 ProgressState no shards to create, do next state 2025-06-30T09:20:38.572618Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710759:0 2 -> 3 2025-06-30T09:20:38.573476Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:71: TTxOperationProposeCancelTx Execute, at schemeshard: 72057594046678944, message: TargetTxId: 281474976710759 TxId: 102 2025-06-30T09:20:38.573491Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_cancel_tx.cpp:37: Execute cancel tx: opId# 102:0, target opId# 281474976710759:0 2025-06-30T09:20:38.573619Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:20:38.573627Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_backup_restore_common.h:58: TBackup TConfigurePart ProgressState, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:20:38.573654Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_backup.cpp:41: Propose backup to datashard 72075186233409547 txid 281474976710759:0 at schemeshard 72057594046678944 2025-06-30T09:20:38.574236Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:88: TTxOperationProposeCancelTx Complete, at schemeshard: 72057594046678944 2025-06-30T09:20:38.574304Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:20:38.574312Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_backup_restore_common.h:58: TBackup TConfigurePart ProgressState, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:20:38.574331Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_backup.cpp:41: Propose backup to datashard 72075186233409547 txid 281474976710759:0 at schemeshard 72057594046678944 2025-06-30T09:20:38.574620Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6875: Handle: TEvCancelTxResult: Cookie: 102, at schemeshard: 72057594046678944 2025-06-30T09:20:38.574643Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6877: Message: Status: StatusAccepted Result: "Cancelled at SchemeShard" TargetTxId: 281474976710759 TxId: 102 2025-06-30T09:20:38.574940Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710759:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2025-06-30T09:20:38.574980Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976710759, partId: 0, tablet: 72075186233409547 2025-06-30T09:20:38.575473Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710759:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2025-06-30T09:20:38.576050Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:20:38.576069Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:565:2521] TestWaitNotification: OK eventTxId 102 ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::ManyTasks [GOOD] Test command err: 2025-06-30T09:20:38.612615Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:184:2154] Bootstrap 2025-06-30T09:20:38.638678Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:184:2154] Become StateWork (SchemeCache [1:193:2160]) 2025-06-30T09:20:38.638845Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:185:2104] Bootstrap 2025-06-30T09:20:38.640345Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:185:2104] Become StateWork (SchemeCache [2:196:2107]) 2025-06-30T09:20:38.653125Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:38.656886Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:38.656989Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:20:38.657161Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:38.658007Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:20:38.658092Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:38.658100Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:38.658142Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:20:38.661777Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:20:38.661830Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:20:38.661870Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:20:38.661922Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:38.661938Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:38.661981Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:38.696813Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:38.696884Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:38.709285Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:38.709350Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:38.709370Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:38.709384Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:38.709412Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:38.709423Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:38.709431Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:38.709442Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:38.720657Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:38.720714Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:38.731778Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:38.731858Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:20:38.732072Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:20:38.732079Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:20:38.733693Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:20:38.733708Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:20:38.734152Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-30T09:20:38.734475Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/004258/r3tmp/tmpiAneDZ/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } } } } 2025-06-30T09:20:38.734554Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/cl3w/004258/r3tmp/tmpiAneDZ/pdisk_1.dat 2025-06-30T09:20:38.734561Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/cl3w/004258/r3tmp/tmpiAneDZ/pdisk_1.dat 2025-06-30T09:20:38.734729Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-30T09:20:38.734835Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:20:38.734856Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:38.734868Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:38.734903Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:20:38.735001Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-30T09:20:38.735368Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:38.735433Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:38.746600Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-30T09:20:38.749557Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-30T09:20:38.749789Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/cl3w/004258/r3tmp/tmpiAneDZ/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-30T09:20:38.750013Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/cl3w/004258/r3tmp/tmpiAneDZ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/cl3w/004258/r3tmp/tmpiAneDZ/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 4508230207810409651 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-30T09:20:38.750099Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-30T09:20:38.750298Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:38.750387Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:38.750395Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:38.750445Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:38.750459Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:38.750492Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:38.750510Z node 2 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:38.750518Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:38.750524Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:38.750535Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:38.750636Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:38.750645Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:38.750651Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:38.750668Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:419:2307] 2025-06-30T09:20:38.750790Z node 2 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:38.750802Z node 2 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:38.750807Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:38.750825Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:421:2117] 2025-06-30T09:20:38.751731Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:38.751748Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:409:2113] 2025-06-30T09:20:38.751810Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:38.751817Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:408:2303] 2025-06-30T09:20:38.752052Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:419:2307]} 2025-06-30T09:20:38.752069Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:38.752079Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:38.752083Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:38.752227Z node 2 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:421:2117]} 2025-06-30T09:20:38.752282Z node 2 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:38.752289Z node 2 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:38.752293Z node 2 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:38.771513Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:38.771538Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:38.771542Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:38.771548Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:37.400157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:37.400187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:37.400195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:37.400201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:37.400218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:37.400223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:37.400234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:37.400250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:37.400401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:37.400484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:37.418666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:37.418696Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:37.423050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:37.423162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:37.423239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:37.425952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:37.426055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:37.426236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:37.426317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:37.430200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:37.430282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:37.430692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:37.430706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:37.430758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:37.430773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:37.430781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:37.430811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:37.436432Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:37.463453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:37.463565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:37.463658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:37.463670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:37.463731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:37.463751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:37.464652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:37.464695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:37.464746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:37.464756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:37.464760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:37.464765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:37.465202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:37.465213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:37.465218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:37.465518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:37.465525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:37.465530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:37.465536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:37.466106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:37.466603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:37.466670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:37.466908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:37.466935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:37.466954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:37.467019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:37.467025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:37.467057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:37.467069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:37.467800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:37.467814Z node 1 :FLAT_TX_SCHEMESHARD ... wnerId: 72057594046678944, cookie: 103 2025-06-30T09:20:39.061631Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-30T09:20:39.061634Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-30T09:20:39.061638Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:20:39.061813Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:20:39.061821Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:20:39.061825Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-30T09:20:39.061829Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-30T09:20:39.061832Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-30T09:20:39.061855Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-30T09:20:39.061999Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:20:39.062006Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:20:39.062009Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:20:39.062127Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:20:39.062133Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-30T09:20:39.062143Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-30T09:20:39.062146Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:20:39.062149Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-30T09:20:39.062152Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:20:39.062155Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-30T09:20:39.062158Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:20:39.062162Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-30T09:20:39.062165Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:0 2025-06-30T09:20:39.062189Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-30T09:20:39.062309Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:20:39.062348Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-06-30T09:20:39.062578Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:39.063167Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:39.063213Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:20:39.063438Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-30T09:20:39.063479Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-30T09:20:39.063777Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-30T09:20:39.063811Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409548 Forgetting tablet 72075186233409547 2025-06-30T09:20:39.064014Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-30T09:20:39.064033Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:20:39.064151Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:20:39.064157Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:20:39.064175Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:20:39.064202Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:20:39.064206Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:20:39.064214Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:39.064292Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:20:39.064578Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-30T09:20:39.064587Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-30T09:20:39.064596Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-30T09:20:39.064599Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-30T09:20:39.064890Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:20:39.064897Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-30T09:20:39.064922Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:20:39.064930Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-30T09:20:39.064976Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-30T09:20:39.064984Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-30T09:20:39.065033Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-30T09:20:39.065046Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:20:39.065050Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:541:2488] TestWaitNotification: OK eventTxId 103 2025-06-30T09:20:39.065105Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:20:39.065134Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 37us result status StatusPathDoesNotExist 2025-06-30T09:20:39.065161Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> KqpRm::Reduce [GOOD] >> KqpRm::NotEnoughMemory [GOOD] |80.7%| [TA] $(B)/ydb/services/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} |80.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_failure_injection/unittest |80.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_failure_injection/unittest >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-true ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:37.586811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:37.586843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:37.586851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:37.586857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:37.586872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:37.586877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:37.586888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:37.586905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:37.587037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:37.587140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:37.598687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:37.598719Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:37.602941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:37.603012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:37.603063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:37.605286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:37.605368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:37.605505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:37.605574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:37.606465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:37.606535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:37.606909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:37.606927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:37.606956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:37.606966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:37.606973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:37.606996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:37.608781Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:37.634016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:37.634121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:37.634190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:37.634200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:37.634254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:37.634274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:37.635422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:37.635480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:37.635548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:37.635560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:37.635565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:37.635572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:37.636180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:37.636190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:37.636196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:37.636541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:37.636548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:37.636554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:37.636563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:37.637442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:37.637980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:37.638034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:37.638245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:37.638275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:37.638294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:37.638371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:37.638380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:37.638417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:37.638430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:37.639867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:37.639897Z node 1 :FLAT_TX_SCHEMESHARD ... 046678944, LocalPathId: 2], Generation: 2, ActorId:[7:393:2362], EffectiveACLVersion: 0, SubdomainVersion: 4, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 4, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:20:39.195596Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186234409546 2025-06-30T09:20:39.195603Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186234409546, txId: 0, path id: [OwnerId: 72075186234409546, LocalPathId: 1] 2025-06-30T09:20:39.195628Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186234409546 2025-06-30T09:20:39.195634Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:486:2427], at schemeshard: 72075186234409546, txId: 0, path id: 1 2025-06-30T09:20:39.195858Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186234409546, msg: Owner: 72075186234409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186234409546, cookie: 0 2025-06-30T09:20:39.195939Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:20:39.195947Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-30T09:20:39.195959Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-30T09:20:39.195964Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:20:39.195970Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-30T09:20:39.195973Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:20:39.195978Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-30T09:20:39.195984Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-30T09:20:39.195989Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-30T09:20:39.195993Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:0 2025-06-30T09:20:39.196006Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-30T09:20:39.196044Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:20:39.196051Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-30T09:20:39.196354Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-30T09:20:39.196363Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-30T09:20:39.196431Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-30T09:20:39.196446Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:20:39.196451Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:567:2506] TestWaitNotification: OK eventTxId 103 2025-06-30T09:20:39.196511Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:20:39.196533Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 29us result status StatusSuccess 2025-06-30T09:20:39.196604Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:39.196658Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:20:39.196669Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 13us result status StatusSuccess 2025-06-30T09:20:39.196704Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:39.196739Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186234409546 2025-06-30T09:20:39.196750Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186234409546 describe path "/MyRoot/USER_0" took 11us result status StatusSuccess 2025-06-30T09:20:39.196780Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "MyRoot/USER_0" PathId: 1 SchemeshardId: 72075186234409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/USER_0" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186234409546, at schemeshard: 72075186234409546 ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SingleTask [GOOD] Test command err: 2025-06-30T09:20:38.990101Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:184:2154] Bootstrap 2025-06-30T09:20:39.026500Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:184:2154] Become StateWork (SchemeCache [1:193:2160]) 2025-06-30T09:20:39.026650Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:185:2104] Bootstrap 2025-06-30T09:20:39.028843Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:185:2104] Become StateWork (SchemeCache [2:196:2107]) 2025-06-30T09:20:39.041818Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:39.044642Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:39.044716Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:20:39.044854Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:39.045549Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:20:39.045617Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:39.045624Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:39.045659Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:20:39.049247Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:20:39.049300Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:20:39.049316Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:20:39.049360Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:39.049375Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:39.049417Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:39.085460Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:39.085516Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:39.096483Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:39.096537Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:39.096551Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:39.096562Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:39.096582Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:39.096589Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:39.096594Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:39.096601Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:39.107772Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:39.107832Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:39.118786Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:39.118853Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:20:39.119062Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:20:39.119069Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:20:39.121329Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:20:39.121352Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:20:39.121791Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-30T09:20:39.122207Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/004242/r3tmp/tmpdfMJij/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } } } } 2025-06-30T09:20:39.122281Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/cl3w/004242/r3tmp/tmpdfMJij/pdisk_1.dat 2025-06-30T09:20:39.122294Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/cl3w/004242/r3tmp/tmpdfMJij/pdisk_1.dat 2025-06-30T09:20:39.122466Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-30T09:20:39.122556Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:20:39.122582Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:39.122597Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:39.122636Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:20:39.122760Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-30T09:20:39.123163Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:39.123237Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:39.134233Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-30T09:20:39.136988Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-30T09:20:39.137202Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/cl3w/004242/r3tmp/tmpdfMJij/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-30T09:20:39.137385Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/cl3w/004242/r3tmp/tmpdfMJij/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/cl3w/004242/r3tmp/tmpdfMJij/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 183723596280498134 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-30T09:20:39.137448Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-30T09:20:39.137629Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:39.137708Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:39.137716Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:39.137770Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:39.137782Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:39.137811Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:39.137831Z node 2 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:39.137859Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:39.137867Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:39.137881Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:39.137998Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:39.138008Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:39.138014Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:39.138031Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:419:2307] 2025-06-30T09:20:39.138133Z node 2 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:39.138141Z node 2 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:39.138145Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:39.138162Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:421:2117] 2025-06-30T09:20:39.138949Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:39.138963Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:409:2113] 2025-06-30T09:20:39.139015Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:39.139021Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:408:2303] 2025-06-30T09:20:39.139209Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:419:2307]} 2025-06-30T09:20:39.139222Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:39.139230Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:39.139234Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:39.139362Z node 2 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:421:2117]} 2025-06-30T09:20:39.139412Z node 2 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:39.139419Z node 2 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:39.139422Z node 2 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:39.158640Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:39.158662Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:39.158669Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:39.158675Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-false >> TPartBtreeIndexIteration::OneNode [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups >> RetryPolicy::TWriteSession_TestBrokenPolicy [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster >> TIterator::MixedReverse [GOOD] >> TIterator::Serial ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCHEveryQueryWorks-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 6700, MsgBus: 9153 2025-06-30T09:20:10.065577Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669755678828563:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:10.065653Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a31/r3tmp/tmpX5lrx8/pdisk_1.dat 2025-06-30T09:20:10.140440Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6700, node 1 2025-06-30T09:20:10.157023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:10.157035Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:10.157037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:10.157077Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:10.167234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:10.167257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:10.168398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9153 TClient is connected to server localhost:9153 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:10.221754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:10.527904Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669755678829148:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:10.527937Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:10.528111Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669755678829160:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:20:10.529142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:20:10.532507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:20:10.532627Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669755678829162:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:20:10.592631Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669755678829213:2328] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:20:10.650226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:10.723528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:10.738814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:10.761025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:10.777181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:10.797634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:10.822627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:10.836943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:20:11.068740Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:15.065918Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669755678828563:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:15.065972Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:20:19.754966Z node 1 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037892 Cancelled read: {[1:7521669794333537372:2804], 0} 2025-06-30T09:20:25.121054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7389: Cannot get console configs 2025-06-30T09:20:25.121072Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:26.397464Z node 1 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037889 Cancelled read: {[1:7521669824398310441:3242], 0} 2025-06-30T09:20:26.807096Z node 1 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037892 Cancelled read: {[1:7521669824398310543:3273], 0} ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NotEnoughMemory [GOOD] Test command err: 2025-06-30T09:20:39.121684Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:184:2154] Bootstrap 2025-06-30T09:20:39.157683Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:184:2154] Become StateWork (SchemeCache [1:193:2160]) 2025-06-30T09:20:39.157857Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:185:2104] Bootstrap 2025-06-30T09:20:39.160341Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:185:2104] Become StateWork (SchemeCache [2:196:2107]) 2025-06-30T09:20:39.172974Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:39.175163Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:39.175210Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:20:39.175316Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:39.175934Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:20:39.175996Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:39.176000Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:39.176024Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:20:39.179411Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:20:39.179478Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:20:39.179497Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:20:39.179555Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:39.179573Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:39.179626Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:39.212822Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:39.212858Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:39.223637Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:39.223683Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:39.223699Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:39.223711Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:39.223734Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:39.223744Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:39.223751Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:39.223760Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:39.234529Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:39.234572Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:39.245381Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:39.245437Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:20:39.245672Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:20:39.245680Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:20:39.247980Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:20:39.247999Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:20:39.248417Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-30T09:20:39.248794Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/00422d/r3tmp/tmpeEsk69/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } } } } 2025-06-30T09:20:39.248859Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/cl3w/00422d/r3tmp/tmpeEsk69/pdisk_1.dat 2025-06-30T09:20:39.248870Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/cl3w/00422d/r3tmp/tmpeEsk69/pdisk_1.dat 2025-06-30T09:20:39.249033Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-30T09:20:39.249124Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:20:39.249147Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:39.249161Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:39.249202Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:20:39.249306Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-30T09:20:39.249678Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:39.249750Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:39.260779Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-30T09:20:39.263735Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-30T09:20:39.263918Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/cl3w/00422d/r3tmp/tmpeEsk69/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-30T09:20:39.264090Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/cl3w/00422d/r3tmp/tmpeEsk69/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/cl3w/00422d/r3tmp/tmpeEsk69/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 11855880167827191972 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-30T09:20:39.264163Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-30T09:20:39.264332Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:39.264415Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:39.264423Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:39.264475Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:39.264488Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:39.264511Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:39.264529Z node 2 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:39.264537Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:39.264545Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:39.264558Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:39.264667Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:39.264678Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:39.264683Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:39.264698Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:419:2307] 2025-06-30T09:20:39.264774Z node 2 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:39.264783Z node 2 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:39.264787Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:39.264823Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:421:2117] 2025-06-30T09:20:39.265668Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:39.265678Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:409:2113] 2025-06-30T09:20:39.265728Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:39.265734Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:408:2303] 2025-06-30T09:20:39.265938Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:419:2307]} 2025-06-30T09:20:39.265953Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:39.265961Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:39.265965Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:39.266095Z node 2 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:421:2117]} 2025-06-30T09:20:39.266145Z node 2 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:39.266152Z node 2 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:39.266156Z node 2 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:39.285140Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:39.285163Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:39.285169Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:39.285176Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::Reduce [GOOD] Test command err: 2025-06-30T09:20:39.120650Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:184:2154] Bootstrap 2025-06-30T09:20:39.144303Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:184:2154] Become StateWork (SchemeCache [1:193:2160]) 2025-06-30T09:20:39.144440Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:185:2104] Bootstrap 2025-06-30T09:20:39.145786Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:185:2104] Become StateWork (SchemeCache [2:196:2107]) 2025-06-30T09:20:39.160571Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:39.162710Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:39.162788Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:20:39.162902Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:39.163542Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:20:39.163608Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:39.163615Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:39.163651Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:20:39.166189Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:20:39.166226Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:20:39.166238Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:20:39.166267Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:39.166276Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:39.166298Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:39.200034Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:39.200086Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:39.211061Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:39.211117Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:39.211131Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:39.211141Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:39.211169Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:39.211179Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:39.211186Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:39.211196Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:39.222190Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:39.222252Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:39.233145Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:39.233239Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:20:39.233419Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:20:39.233425Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:20:39.235077Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:20:39.235094Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:20:39.235447Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-30T09:20:39.235725Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/00423d/r3tmp/tmp0dgJcb/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } } } } 2025-06-30T09:20:39.235796Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/cl3w/00423d/r3tmp/tmp0dgJcb/pdisk_1.dat 2025-06-30T09:20:39.235805Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/cl3w/00423d/r3tmp/tmp0dgJcb/pdisk_1.dat 2025-06-30T09:20:39.235972Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-30T09:20:39.236062Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:20:39.236084Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:39.236094Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:39.236123Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:20:39.236205Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-30T09:20:39.236517Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:39.236563Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:39.247438Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-30T09:20:39.249469Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-30T09:20:39.249621Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/cl3w/00423d/r3tmp/tmp0dgJcb/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-30T09:20:39.249740Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/cl3w/00423d/r3tmp/tmp0dgJcb/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/cl3w/00423d/r3tmp/tmp0dgJcb/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 2450814314473549156 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-30T09:20:39.249800Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-30T09:20:39.249989Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:39.250056Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:39.250061Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:39.250099Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:39.250108Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:39.250127Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:39.250141Z node 2 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:39.250146Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:39.250150Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:39.250158Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:39.250259Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:39.250266Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:39.250270Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:39.250283Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:419:2307] 2025-06-30T09:20:39.250357Z node 2 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:39.250363Z node 2 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:39.250368Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:39.250393Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:421:2117] 2025-06-30T09:20:39.251251Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:39.251266Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:409:2113] 2025-06-30T09:20:39.251326Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:39.251333Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:408:2303] 2025-06-30T09:20:39.251544Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:419:2307]} 2025-06-30T09:20:39.251569Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:39.251579Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:39.251583Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:39.251734Z node 2 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:421:2117]} 2025-06-30T09:20:39.251786Z node 2 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:39.251793Z node 2 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:39.251796Z node 2 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:39.270002Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:39.270023Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:39.270027Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:39.270032Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. |80.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_failure_injection/unittest >> TChargeBTreeIndex::NoNodes_History [GOOD] >> TChargeBTreeIndex::NoNodes_Groups_History >> TVersions::WreckHeadReverse [GOOD] >> TVersions::Wreck2 >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanManyTables [GOOD] >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp |80.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_failure_injection/unittest >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true >> BuildStatsHistogram::Single_History [GOOD] >> BuildStatsHistogram::Single_History_Slices >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other [GOOD] >> TIterator::Serial [GOOD] >> TIterator::SerialReverse >> TExecutorDb::FullScan [GOOD] >> TExecutorDb::CoordinatorSimulation >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] >> TIterator::SerialReverse [GOOD] >> TIterator::GetKey [GOOD] >> TIterator::GetKeyWithEraseCache [GOOD] >> TIterator::GetKeyWithVersionSkips [GOOD] >> TLegacy::IndexIter >> TConsoleConfigTests::TestModifyConfigItem >> TConsoleConfigSubscriptionTests::TestAddConfigSubscription >> TLegacy::IndexIter [GOOD] >> TLegacy::ScreenedIndexIter [GOOD] >> TLegacy::StatsIter [GOOD] >> TPageHandleTest::Uninitialized [GOOD] >> TPageHandleTest::NormalUse [GOOD] >> TPageHandleTest::HandleRef [GOOD] >> TPageHandleTest::PinnedRef [GOOD] >> TPageHandleTest::PinnedRefPure [GOOD] >> TPart::BasicColumnGroups [GOOD] >> TModificationsValidatorTests::TestIsValidationRequired_NONE [GOOD] >> TModificationsValidatorTests::TestIsValidationRequired_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIsValidationRequired_TENANTS [GOOD] >> TModificationsValidatorTests::TestIsValidationRequired_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_TENANTS_AND_NODE_TYPES [GOOD] >> TNetClassifierUpdaterTest::TestGetUpdatesFromHttpServer >> KqpRm::NodesMembershipByExchanger [GOOD] >> TConsoleTests::TestRestartConsoleAndPoolsExtSubdomain >> KqpRm::SingleSnapshotByExchanger [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:38.334815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:38.334840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:38.334844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:38.334848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:38.334859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:38.334862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:38.334869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:38.334881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:38.334968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:38.335031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:38.344797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:38.344818Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:38.347854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:38.347924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:38.347967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:38.349632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:38.349700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:38.349820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:38.349908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:38.350577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:38.350614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:38.350924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:38.350937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:38.350965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:38.350972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:38.350978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:38.350998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:38.352365Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:38.369561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:38.369636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:38.369690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:38.369696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:38.369732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:38.369742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:38.370388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:38.370419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:38.370462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:38.370470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:38.370473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:38.370480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:38.370887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:38.370899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:38.370905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:38.371292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:38.371303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:38.371310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:38.371317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:38.372014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:38.372512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:38.372565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:38.372738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:38.372759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:38.372775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:38.372823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:38.372828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:38.372857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:38.372867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:38.373289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:38.373296Z node 1 :FLAT_TX_SCHEMESHARD ... 2057594046678944 2025-06-30T09:20:40.200410Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:40.200418Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:40.200790Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:40.200799Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:40.200806Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:40.200814Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:40.200851Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:40.201148Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:40.201180Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:40.201342Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:40.201363Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 34359740524 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:40.201369Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:40.201409Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:40.201414Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:40.201440Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:40.201450Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:40.201868Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:40.201875Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:40.201914Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:40.201922Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [8:214:2214], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-30T09:20:40.201931Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:40.201936Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-30T09:20:40.201946Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:20:40.201949Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:20:40.201953Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:20:40.201956Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:20:40.201959Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-30T09:20:40.201962Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:20:40.201966Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-30T09:20:40.201969Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1:0 2025-06-30T09:20:40.201977Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:20:40.201981Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-30T09:20:40.201984Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-30T09:20:40.202132Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-30T09:20:40.202144Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-30T09:20:40.202150Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-30T09:20:40.202155Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-30T09:20:40.202159Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:40.202172Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-30T09:20:40.202662Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-30T09:20:40.202755Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-30T09:20:40.203415Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_1" ExternalSchemeShard: true } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:40.203440Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 101:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_1" ExternalSchemeShard: true } 2025-06-30T09:20:40.203444Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 101:0, path /MyRoot/USER_1 2025-06-30T09:20:40.203468Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp:1102, at schemeshard: 72057594046678944 2025-06-30T09:20:40.203474Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 101:1, propose status:StatusPathDoesNotExist, reason: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp:1102, at schemeshard: 72057594046678944 2025-06-30T09:20:40.203539Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [8:277:2266] Bootstrap 2025-06-30T09:20:40.205006Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [8:277:2266] Become StateWork (SchemeCache [8:282:2271]) 2025-06-30T09:20:40.205240Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [8:277:2266] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:20:40.205900Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 101, response: Status: StatusPathDoesNotExist Reason: "Invalid AlterExtSubDomain request: Check failed: path: \'/MyRoot/USER_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp:1102" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:40.205943Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp:1102, operation: ALTER DATABASE, path: /MyRoot/USER_1 2025-06-30T09:20:40.206005Z node 8 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_DOMAIN [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainAffected_DOMAIN [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainAffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainAffected_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainUnaffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainUnaffected_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainAffected_DOMAIN [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainAffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainAffected_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainUnaffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainUnaffected_TENANTS_AND_NODE_TYPES [GOOD] >> TConsoleTests::TestCreateTenant >> TJaegerTracingConfiguratorTests::DefaultConfig ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NodesMembershipByExchanger [GOOD] Test command err: 2025-06-30T09:20:38.834509Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:184:2154] Bootstrap 2025-06-30T09:20:38.857048Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:184:2154] Become StateWork (SchemeCache [1:193:2160]) 2025-06-30T09:20:38.857204Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:185:2104] Bootstrap 2025-06-30T09:20:38.859375Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:185:2104] Become StateWork (SchemeCache [2:196:2107]) 2025-06-30T09:20:38.874055Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:38.876792Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:38.876867Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:20:38.877005Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:38.877740Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:20:38.877815Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:38.877823Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:38.877882Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:20:38.881161Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:20:38.881216Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:20:38.881233Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:20:38.881281Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:38.881297Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:38.881336Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:38.915156Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:38.915241Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:38.926424Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:38.926493Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:38.926529Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:38.926548Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:38.926581Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:38.926592Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:38.926600Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:38.926611Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:38.937705Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:38.937772Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:38.948781Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:38.948859Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:20:38.949113Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:20:38.949125Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:20:38.951392Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:20:38.951418Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:20:38.951947Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-30T09:20:38.952366Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/004247/r3tmp/tmpfa0mYT/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } } } } 2025-06-30T09:20:38.952454Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/cl3w/004247/r3tmp/tmpfa0mYT/pdisk_1.dat 2025-06-30T09:20:38.952465Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/cl3w/004247/r3tmp/tmpfa0mYT/pdisk_1.dat 2025-06-30T09:20:38.952660Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-30T09:20:38.952754Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:20:38.952782Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:38.952796Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:38.952838Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:20:38.952954Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-30T09:20:38.953394Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:38.953491Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:38.964631Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-30T09:20:38.967416Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-30T09:20:38.967635Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/cl3w/004247/r3tmp/tmpfa0mYT/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-30T09:20:38.967822Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/cl3w/004247/r3tmp/tmpfa0mYT/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/cl3w/004247/r3tmp/tmpfa0mYT/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 9768326655663777152 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-30T09:20:38.967909Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-30T09:20:38.968112Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:38.968213Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:38.968224Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:38.968274Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:38.968287Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:38.968320Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:38.968343Z node 2 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:38.968350Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:38.968358Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:38.968369Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:38.968493Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:38.968505Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:38.968510Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:38.968530Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:419:2307] 2025-06-30T09:20:38.968608Z node 2 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:38.968615Z node 2 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:38.968619Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:38.968635Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:421:2117] 2025-06-30T09:20:38.969408Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:38.969423Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:409:2113] 2025-06-30T09:20:38.969481Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:38.969487Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:408:2303] 2025-06-30T09:20:38.969705Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:419:2307]} 2025-06-30T09:20:38.969721Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:38.969730Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:38.969735Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:38.969895Z node 2 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:421:2117]} 2025-06-30T09:20:38.969948Z node 2 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:38.969954Z node 2 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:38.969958Z node 2 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:38.989595Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:38.989623Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:38.989629Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:38.989634Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:39.003113Z node 1 :BS_CONTROLLER DEBUG: {BSC19@console_interaction.cpp:74} Console proposed config response Response# {Status: ReverseCommit ConsoleConfigVersion: 0 YAML: "" } 2025-06-30T09:20:39.054825Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:184:2154] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:20:39.055884Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976710656 RangeEnd# 281474976715656 txAllocator# 72057594046447617 2025-06-30T09:20:39.055950Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:185:2104] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:20:39.056582Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:20:39.088963Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1 AvailableSize: 34225520640 TotalSize: 34359738368 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 EnforcedDynamicSlotSize: 34158411776 State: Normal } } 2025-06-30T09:20:39.130327Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1000 AvailableSize: 0 TotalSize: 0 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 State: OpenFileError } } 2025-06-30T09:20:39.161266Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } SatisfactionRank: 0 AvailableSize: 34158411776 AllocatedSize: 0 StatusFlags: 1 VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Occupancy: 0.00098231827111984276 State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 1000 } } 2025-06-30T09:20:39.456691Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:39.457394Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:39.457497Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } >> TConfigsCacheTests::TestNoNotificationIfConfigIsCached >> TConsoleTests::TestGetUnknownTenantStatus >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionCreate >> BuildStatsHistogram::Single_History_Slices [GOOD] >> BuildStatsHistogram::Ten_Mixed >> TNodeBrokerTest::LoadStateMoveEpoch >> KqpRm::DisonnectNodes [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SingleSnapshotByExchanger [GOOD] Test command err: 2025-06-30T09:20:39.136109Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:184:2154] Bootstrap 2025-06-30T09:20:39.160422Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:184:2154] Become StateWork (SchemeCache [1:193:2160]) 2025-06-30T09:20:39.160527Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:185:2104] Bootstrap 2025-06-30T09:20:39.161831Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:185:2104] Become StateWork (SchemeCache [2:196:2107]) 2025-06-30T09:20:39.172995Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:39.175308Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:39.175365Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:20:39.175455Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:39.176044Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:20:39.176114Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:39.176121Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:39.176152Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:20:39.179017Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:20:39.179065Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:20:39.179080Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:20:39.179123Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:39.179138Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:39.179174Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:39.212143Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:39.212185Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:39.223079Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:39.223133Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:39.223148Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:39.223161Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:39.223192Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:39.223202Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:39.223210Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:39.223224Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:39.234008Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:39.234045Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:39.245284Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:39.245351Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:20:39.245515Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:20:39.245520Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:20:39.246987Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:20:39.247007Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:20:39.247425Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-30T09:20:39.247701Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/004231/r3tmp/tmpdVte5o/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } } } } 2025-06-30T09:20:39.247758Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/cl3w/004231/r3tmp/tmpdVte5o/pdisk_1.dat 2025-06-30T09:20:39.247765Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/cl3w/004231/r3tmp/tmpdVte5o/pdisk_1.dat 2025-06-30T09:20:39.247903Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-30T09:20:39.247963Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:20:39.247982Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:39.247991Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:39.248017Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:20:39.248104Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-30T09:20:39.248385Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:39.248425Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:39.259449Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-30T09:20:39.262182Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-30T09:20:39.262353Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/cl3w/004231/r3tmp/tmpdVte5o/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-30T09:20:39.262520Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/cl3w/004231/r3tmp/tmpdVte5o/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/cl3w/004231/r3tmp/tmpdVte5o/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 7101516324866997928 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-30T09:20:39.262598Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-30T09:20:39.262778Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:39.262846Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:39.262851Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:39.262891Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:39.262900Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:39.262926Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:39.262940Z node 2 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:39.262945Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:39.262950Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:39.262957Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:39.263028Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:39.263034Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:39.263039Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:39.263060Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:419:2307] 2025-06-30T09:20:39.263127Z node 2 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:39.263135Z node 2 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:39.263139Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:39.263155Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:421:2117] 2025-06-30T09:20:39.263898Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:39.263906Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:409:2113] 2025-06-30T09:20:39.263946Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:39.263950Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:408:2303] 2025-06-30T09:20:39.264118Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:419:2307]} 2025-06-30T09:20:39.264129Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:39.264136Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:39.264139Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:39.264227Z node 2 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:421:2117]} 2025-06-30T09:20:39.264263Z node 2 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:39.264267Z node 2 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:39.264269Z node 2 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:39.281889Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:39.281915Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:39.281922Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:39.281929Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:39.295089Z node 1 :BS_CONTROLLER DEBUG: {BSC19@console_interaction.cpp:74} Console proposed config response Response# {Status: ReverseCommit ConsoleConfigVersion: 0 YAML: "" } 2025-06-30T09:20:39.346526Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:184:2154] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:20:39.347406Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976710656 RangeEnd# 281474976715656 txAllocator# 72057594046447617 2025-06-30T09:20:39.347476Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:185:2104] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:20:39.348102Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:20:39.380347Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1 AvailableSize: 34225520640 TotalSize: 34359738368 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 EnforcedDynamicSlotSize: 34158411776 State: Normal } } 2025-06-30T09:20:39.421806Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1000 AvailableSize: 0 TotalSize: 0 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 State: OpenFileError } } 2025-06-30T09:20:39.452803Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } SatisfactionRank: 0 AvailableSize: 34158411776 AllocatedSize: 0 StatusFlags: 1 VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Occupancy: 0.00098231827111984276 State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 1000 } } 2025-06-30T09:20:39.749784Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:39.750456Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:39.750540Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> TPart::BasicColumnGroups [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.565824Z 00000.004 DD| RESOURCE_BROKER: TResourceBrokerActor bootstrap 00000.004 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.005 II| FAKE_ENV: Starting storage for BS group 0 00000.005 II| FAKE_ENV: Starting storage for BS group 1 00000.005 II| FAKE_ENV: Starting storage for BS group 2 00000.005 II| FAKE_ENV: Starting storage for BS group 3 00000.006 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.006 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} hope 1 -> done Change{2, redo 0b alter 302b annex 0, ~{ } -{ }, 0 gb} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} release 4194304b of static, Memory{0 dyn 0} 00000.006 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 1, state Free, final id 0, final level 0 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} hope 1 -> done Change{2, redo 0b alter 15b annex 0, ~{ } -{ }, 0 gb} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} release 4194304b of static, Memory{0 dyn 0} 00000.006 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 1, state Free, final id 0, final level 0 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 104856577b requested for data (104857601b in total) 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{1 104857601b} type large_transaction 00000.006 DD| RESOURCE_BROKER: Submitted new unknown task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) priority=5 resources={0, 104857601} 00000.006 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.006 DD| RESOURCE_BROKER: Allocate resources {0, 104857601} for task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) from queue queue_default 00000.006 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.006 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 12.207031 (insert task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])) 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{1 104857601b}, Memory{0 dyn 104857601} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{1 104857601b}, Memory{0 dyn 0} 00000.006 DD| RESOURCE_BROKER: Finish task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) (release resources {0, 104857601}) 00000.006 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 12.207031 to 0.000000 (remove task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])) 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 104856577b requested for data (104857601b in total) 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 104857601b of static mem, Memory{104857601 dyn 0} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 104857601b of static, Memory{0 dyn 0} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 209714177b requested for data (209715201b in total) 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{2 209715201b} type large_transaction 00000.007 DD| RESOURCE_BROKER: Submitted new unknown task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) priority=5 resources={0, 209715201} 00000.007 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.007 DD| RESOURCE_BROKER: Allocate resources {0, 209715201} for task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) from queue queue_default 00000.007 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.007 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 23.193359 (insert task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])) 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{2 209715201b}, Memory{0 dyn 209715201} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{2 209715201b}, Memory{0 dyn 0} 00000.007 DD| RESOURCE_BROKER: Finish task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) (release resources {0, 209715201}) 00000.007 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 23.193359 to 0.000000 (remove task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])) 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.007 DD| TABLET_EXE ... 76:97:0], [1:2:54:1:24576:97:0], [1:2:55:1:24576:97:0], [1:2:56:1:24576:97:0], [1:2:57:1:24576:97:0], [1:2:58:1:24576:97:0], [1:2:59:1:24576:97:0], [1:2:60:1:24576:97:0], [1:2:61:1:24576:97:0], [1:2:62:1:24576:97:0], [1:2:63:1:24576:97:0], [1:2:64:1:24576:97:0], [1:2:65:1:24576:97:0], [1:2:66:1:24576:97:0], [1:2:67:1:24576:97:0], [1:2:68:1:24576:97:0], [1:2:69:1:24576:97:0], [1:2:70:1:24576:97:0], [1:2:71:1:24576:97:0], [1:2:72:1:24576:97:0], [1:2:73:1:24576:101:0], [1:2:74:1:24576:102:0], [1:2:75:1:24576:101:0], [1:2:76:1:24576:102:0], [1:2:77:1:24576:104:0], [1:2:78:1:24576:104:0], [1:2:79:1:24576:104:0], [1:2:80:1:24576:104:0], [1:2:81:1:24576:103:0], [1:2:82:1:24576:101:0], [1:2:83:1:24576:104:0], [1:2:84:1:24576:104:0], [1:2:85:1:24576:104:0], [1:2:86:1:24576:104:0], [1:2:87:1:24576:104:0], [1:2:88:1:24576:104:0], [1:2:89:1:24576:104:0], [1:2:90:1:24576:101:0], [1:2:91:1:24576:104:0], [1:2:92:1:24576:104:0], [1:2:93:1:24576:98:0], [1:2:94:1:24576:104:0], [1:2:95:1:24576:104:0], [1:2:96:1:24576:104:0], [1:2:97:1:24576:104:0], [1:2:98:1:24576:104:0], [1:2:99:1:24576:104:0], [1:2:100:1:24576:104:0], [1:2:101:1:24576:97:0], [1:2:102:1:24576:100:0], [1:2:103:1:24576:104:0], [1:2:104:1:24576:104:0], [1:2:105:1:24576:104:0], [1:2:106:1:24576:104:0], [1:2:107:1:24576:104:0], [1:2:108:1:24576:104:0], [1:2:109:1:24576:104:0], [1:2:110:1:24576:104:0], [1:2:111:1:24576:104:0], [1:2:112:1:24576:104:0], [1:2:113:1:24576:104:0], [1:2:114:1:24576:104:0], [1:2:115:1:24576:104:0], [1:2:116:1:24576:104:0], [1:2:117:1:24576:104:0], [1:2:118:1:24576:104:0], [1:2:119:1:24576:104:0], [1:2:120:1:24576:104:0], [1:2:121:1:24576:104:0], [1:2:122:1:24576:104:0], [1:2:123:1:24576:104:0], [1:2:124:1:24576:104:0], [1:2:125:1:24576:104:0], [1:2:126:1:24576:104:0], [1:2:127:1:24576:104:0], [1:2:128:1:24576:104:0], [1:2:129:1:24576:104:0], [1:2:130:1:24576:104:0], [1:2:131:1:24576:104:0], [1:2:132:1:24576:104:0], [1:2:133:1:24576:104:0], [1:2:134:1:24576:104:0], [1:2:135:1:24576:104:0], [1:2:136:1:24576:104:0], [1:2:137:1:24576:104:0], [1:2:138:1:24576:104:0], [1:2:139:1:24576:104:0], [1:2:140:1:24576:104:0], [1:2:141:1:24576:104:0], [1:2:142:1:24576:104:0], [1:2:145:1:24576:60:0], [1:2:146:1:24576:60:0] } 00000.015 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:143:1:12288:758:0] 00000.015 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.015 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] owner [20:212:2237] cookie 4 class Online from cache [ ] already requested [ ] to request [ 22 23 24 25 ] 00000.015 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:143:1:12288:758:0] status OK pages [ 22 23 24 25 ] 00000.015 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:143:1:12288:758:0] owner [20:212:2237] class Online pages [ 22 23 24 25 ] cookie 4 00000.015 II| TABLET_EXECUTOR: Leader{1:3:0} activating executor 00000.015 II| TABLET_EXECUTOR: LSnap{1:3, on 3:1, 1880b, wait} done, Waste{2:0, 141856b +(140, 14018b), 146 trc} 00000.015 DD| TABLET_SAUSAGECACHE: Attach page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.015 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] owner [20:212:2237] cookie 2 class AsyncLoad from cache [ 22 23 24 25 ] already requested [ ] to request [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.015 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] async queue pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.015 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:143:1:12288:758:0] status OK pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.015 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:143:1:12288:758:0] owner [20:212:2237] class AsyncLoad pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 ] cookie 2 00000.015 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:143:1:12288:758:0] owner [20:212:2237] pages [ 22 23 24 25 ] 00000.016 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{26 pages [1:2:143:1:12288:758:0] ok OK}, category 2 00000.016 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:143:1:12288:758:0] owner [20:212:2237] pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 ] 00000.016 DD| TABLET_EXECUTOR: Leader{1:3:2} commited cookie 2 for step 1 00000.016 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan 00000.016 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.016 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} hope 1 -> done Change{145, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.016 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} release 4194304b of static, Memory{0 dyn 0} 00000.016 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.016 II| TABLET_EXECUTOR: Leader{1:3:2} suiciding, Waste{2:0, 141856b +(0, 0b), 1 trc, -14018b acc} 00000.016 DD| TABLET_SAUSAGECACHE: Unregister owner [20:212:2237] 00000.016 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.016 DD| TABLET_SAUSAGECACHE: Remove owner [20:212:2237] 00000.016 DD| TABLET_SAUSAGECACHE: Drop expired page collection [1:2:143:1:12288:758:0] 00000.016 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {6 1077b} miss {50 281387b} 00000.016 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.016 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {14354b, 149} 00000.016 II| FAKE_ENV: DS.1 gone, left {143736b, 8}, put {157893b, 150} 00000.016 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.016 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.016 II| FAKE_ENV: All BS storage groups are stopped 00000.016 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.016 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 795}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:35.636822Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.006 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.006 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {3 512b} miss {0 0b} 00000.006 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.006 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: DS.0 gone, left {1356b, 12}, put {1376b, 13} 00000.006 II| FAKE_ENV: DS.1 gone, left {6814b, 23}, put {6814b, 23} 00000.006 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: All BS storage groups are stopped 00000.006 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.006 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:35.645370Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.045 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.045 NN| TABLET_SAUSAGECACHE: Poison cache serviced 10 reqs hit {860 5551893b} miss {0 0b} 00000.045 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.046 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.046 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.046 II| FAKE_ENV: DS.0 gone, left {1201b, 13}, put {1221b, 14} 00000.046 II| FAKE_ENV: DS.1 gone, left {6751256b, 17}, put {6751256b, 17} 00000.046 II| FAKE_ENV: All BS storage groups are stopped 00000.046 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.046 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:35.715779Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00001.083 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00001.083 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4109 reqs hit {2091 2366986b} miss {6144 6340608b} 00001.086 II| FAKE_ENV: Shut order, stopping 4 BS groups 00001.086 II| FAKE_ENV: DS.0 gone, left {1761b, 14}, put {1781b, 15} 00001.086 II| FAKE_ENV: DS.1 gone, left {6927727b, 27}, put {6927727b, 27} 00001.086 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00001.086 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00001.086 II| FAKE_ENV: All BS storage groups are stopped 00001.086 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00001.086 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:36.806617Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00001.143 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00001.143 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4106 reqs hit {43 253450b} miss {4096 4227072b} 00001.144 II| FAKE_ENV: Shut order, stopping 4 BS groups 00001.144 II| FAKE_ENV: DS.0 gone, left {44744b, 2}, put {164747b, 16} 00001.145 II| FAKE_ENV: DS.1 gone, left {2764621b, 2068}, put {2764621b, 2068} 00001.146 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00001.146 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00001.146 II| FAKE_ENV: All BS storage groups are stopped 00001.146 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00001.146 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:37.958055Z 00000.003 II| FAKE_ENV: Starting storage for BS group 0 00000.003 II| FAKE_ENV: Starting storage for BS group 1 00000.003 II| FAKE_ENV: Starting storage for BS group 2 00000.003 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:37.973768Z 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.001 II| FAKE_ENV: Starting storage for BS group 2 00000.001 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:37.989291Z 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:38.006135Z 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 >> TConsoleConfigSubscriptionTests::TestAddConfigSubscription [GOOD] >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscription |80.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/console/ut/unittest >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainUnaffected_TENANTS_AND_NODE_TYPES [GOOD] |80.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_failure_injection/unittest >> TConsoleConfigTests::TestModifyConfigItem [GOOD] >> TConsoleConfigTests::TestRemoveConfigItem >> TExecutorDb::CoordinatorSimulation [GOOD] >> TExecutorDb::RandomCoordinatorSimulation ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::DisonnectNodes [GOOD] Test command err: 2025-06-30T09:20:39.238971Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:184:2154] Bootstrap 2025-06-30T09:20:39.274185Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:184:2154] Become StateWork (SchemeCache [1:193:2160]) 2025-06-30T09:20:39.274316Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:185:2104] Bootstrap 2025-06-30T09:20:39.275650Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:185:2104] Become StateWork (SchemeCache [2:196:2107]) 2025-06-30T09:20:39.284975Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:39.286771Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:39.286823Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:20:39.286917Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:39.287497Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:20:39.287552Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:39.287556Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:39.287582Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:20:39.289879Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:20:39.289913Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:20:39.289924Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:20:39.289951Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:39.289961Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:39.289985Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:39.322649Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:39.322703Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:39.333592Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:39.333648Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:39.333677Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:39.333694Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:39.333720Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:39.333727Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:39.333733Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:39.333739Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:39.344666Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:39.344728Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:39.355583Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:39.355649Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:20:39.355833Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:20:39.355839Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:20:39.357363Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:20:39.357380Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:20:39.357790Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-30T09:20:39.358122Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/00421b/r3tmp/tmpMN9ZKH/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } } } } 2025-06-30T09:20:39.358189Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/cl3w/00421b/r3tmp/tmpMN9ZKH/pdisk_1.dat 2025-06-30T09:20:39.358195Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/cl3w/00421b/r3tmp/tmpMN9ZKH/pdisk_1.dat 2025-06-30T09:20:39.358360Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-30T09:20:39.358433Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:20:39.358453Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:39.358463Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:39.358491Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:20:39.358582Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-30T09:20:39.358950Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:39.359042Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:39.369832Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-30T09:20:39.371240Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-30T09:20:39.371394Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/cl3w/00421b/r3tmp/tmpMN9ZKH/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-30T09:20:39.371543Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/cl3w/00421b/r3tmp/tmpMN9ZKH/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/cl3w/00421b/r3tmp/tmpMN9ZKH/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 5192740116684639475 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-30T09:20:39.371612Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-30T09:20:39.371775Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:39.371839Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:39.371844Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:39.371873Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:39.371882Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:39.371909Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:39.371922Z node 2 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:39.371927Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:39.371932Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:39.371939Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:39.372011Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:39.372017Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:39.372022Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:39.372041Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:419:2307] 2025-06-30T09:20:39.372105Z node 2 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:39.372112Z node 2 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:39.372115Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:39.372126Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:421:2117] 2025-06-30T09:20:39.372709Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:39.372717Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:409:2113] 2025-06-30T09:20:39.372756Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:39.372759Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:408:2303] 2025-06-30T09:20:39.372937Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:419:2307]} 2025-06-30T09:20:39.372947Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:39.372953Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:39.372956Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:39.373046Z node 2 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:421:2117]} 2025-06-30T09:20:39.373082Z node 2 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:39.373086Z node 2 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:39.373088Z node 2 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:39.391612Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:39.391638Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:39.391642Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:39.391647Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:39.404247Z node 1 :BS_CONTROLLER DEBUG: {BSC19@console_interaction.cpp:74} Console proposed config response Response# {Status: ReverseCommit ConsoleConfigVersion: 0 YAML: "" } 2025-06-30T09:20:39.455896Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:184:2154] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:20:39.456991Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976710656 RangeEnd# 281474976715656 txAllocator# 72057594046447617 2025-06-30T09:20:39.457058Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:185:2104] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:20:39.457754Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:20:39.490106Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1 AvailableSize: 34225520640 TotalSize: 34359738368 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 EnforcedDynamicSlotSize: 34158411776 State: Normal } } 2025-06-30T09:20:39.531707Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1000 AvailableSize: 0 TotalSize: 0 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 State: OpenFileError } } 2025-06-30T09:20:39.562821Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } SatisfactionRank: 0 AvailableSize: 34158411776 AllocatedSize: 0 StatusFlags: 1 VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Occupancy: 0.00098231827111984276 State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 1000 } } 2025-06-30T09:20:39.870550Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:39.871349Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:39.871421Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:40.365015Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 2 2025-06-30T09:20:40.365085Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 2 2025-06-30T09:20:40.365324Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 2 2025-06-30T09:20:40.365532Z node 2 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:80:2083] ServerId# [1:388:2290] TabletId# 72057594037932033 PipeClientId# [2:80:2083] 2025-06-30T09:20:40.365629Z node 2 :TX_PROXY WARN: proxy_impl.cpp:227: actor# [2:185:2104] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2025-06-30T09:20:40.365656Z node 2 :LOCAL DEBUG: local.cpp:279: TEvTabletPipe::TEvClientDestroyed {TabletId=72057594046578946 ClientId=[2:421:2117]} 2025-06-30T09:20:40.365664Z node 2 :LOCAL DEBUG: local.cpp:222: TLocalNodeRegistrar HandlePipeDestroyed - DISCONNECTED 2025-06-30T09:20:40.365671Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:40.365691Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:559:2117] 2025-06-30T09:20:40.366873Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-30T09:20:40.367005Z node 2 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:559:2117]} 2025-06-30T09:20:40.367105Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1000 AvailableSize: 0 TotalSize: 0 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 State: OpenFileError } } 2025-06-30T09:20:40.367141Z node 2 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:40.367161Z node 2 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:40.367166Z node 2 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:40.378437Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] >> TVersions::Wreck2 [GOOD] >> TVersions::Wreck2Reverse >> TConfigsCacheTests::TestNoNotificationIfConfigIsCached [GOOD] >> TConfigsCacheTests::TestFullConfigurationRestore >> TPartBtreeIndexIteration::OneNode_Groups [GOOD] >> TPartBtreeIndexIteration::OneNode_History >> TJaegerTracingConfiguratorTests::DefaultConfig [GOOD] >> TJaegerTracingConfiguratorTests::GlobalRules >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionCreate [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClient >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-false >> TConsoleConfigTests::TestRemoveConfigItem [GOOD] >> TConsoleConfigTests::TestRemoveConfigItems >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscription [GOOD] >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscriptions ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/unittest >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other [GOOD] Test command err: 2025-06-30T09:16:41.659302Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521668857500063871:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:41.659319Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002caf/r3tmp/tmpY19EFQ/pdisk_1.dat 2025-06-30T09:16:41.701546Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:16:41.715755Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:41.718077Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521668857500063843:2080] 1751275001659157 != 1751275001659160 TServer::EnableGrpc on GrpcPort 27917, node 1 2025-06-30T09:16:41.743087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002caf/r3tmp/yandexyRGC7d.tmp 2025-06-30T09:16:41.743103Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002caf/r3tmp/yandexyRGC7d.tmp 2025-06-30T09:16:41.743182Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002caf/r3tmp/yandexyRGC7d.tmp 2025-06-30T09:16:41.743222Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:16:41.751286Z INFO: TTestServer started on Port 16459 GrpcPort 27917 TClient is connected to server localhost:16459 PQClient connected to localhost:27917 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:16:41.797267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:16:41.797302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:16:41.797889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:41.798813Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... waiting... 2025-06-30T09:16:41.807095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-30T09:16:42.143134Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668861795031924:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.143581Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521668861795031897:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.143614Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:16:42.144516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:16:42.147851Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521668861795031928:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:16:42.193577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.201482Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521668861795032046:2463] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:16:42.205362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:16:42.216651Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521668861795032066:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:16:42.217263Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=M2JiYmM3OGItMTBmMDg4NzAtODhiOTc2MmItZmY2NGI5ZWY=, ActorId: [1:7521668861795031895:2296], ActorState: ExecuteState, TraceId: 01jz01zp8ra25z3wwq6betc9rm, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:16:42.217743Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:16:42.233348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521668861795032272:2610] 2025-06-30T09:16:42.660803Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:16:46.659536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521668857500063871:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:16:46.659584Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:16:47.471274Z :WriteToTopic_Demo_11_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:16:47.495347Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:16:47.526804Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:16:47.534803Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:16:47.534935Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:16:47.535013Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-30T09:16:47.535019Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:16:47.535022Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-30T09:16:47.535038Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:16:47.535047Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:16:47.535069Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-30T09:16:47.542898Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [1:7521668883269868957:2693], now have 1 active actors on pipe 2025-06-30T09:16:47.546623Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72075186224037892] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 7521668857500064178 RawX2: 4294969451 } TxId: 281474 ... 0, State: StateInit] init complete for topic 'topic_A' partition 0 generation 2 [11:7521669866764513070:2462] 2025-06-30T09:20:36.187566Z node 11 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72075186224037895, Partition: 0, State: StateInit] SYNC INIT topic topic_A partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:20:36.187567Z node 11 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72075186224037895, Partition: {0, {1, 281474976715674}, 100000}, State: StateInit] SYNC INIT topic topic_A partitition {0, {1, 281474976715674}, 100000} so 2 endOffset 2 Head Offset 2 PartNo 0 PackedSize 0 count 0 nextOffset 2 batches 0 SYNC INIT sourceId test-message_group_id_3 seqNo 1 offset 1 SYNC INIT sourceId test-message_group_id_1 seqNo 1 offset 0 2025-06-30T09:20:36.187572Z node 11 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72075186224037895, Partition: {0, {1, 281474976715674}, 100000}, State: StateIdle] Process pending events. Count 0 2025-06-30T09:20:36.187572Z node 11 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72075186224037895, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-30T09:20:36.187584Z node 11 :PERSQUEUE DEBUG: partition.cpp:606: [PQ: 72075186224037895, Partition: {0, {1, 281474976715674}, 100000}, State: StateIdle] Init complete for topic 'topic_A' Partition: {0, {1, 281474976715674}, 100000} SourceId: test-message_group_id_3 SeqNo: 1 offset: 1 MaxOffset: 2 2025-06-30T09:20:36.187586Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037895, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 2 2025-06-30T09:20:36.187588Z node 11 :PERSQUEUE DEBUG: partition.cpp:606: [PQ: 72075186224037895, Partition: {0, {1, 281474976715674}, 100000}, State: StateIdle] Init complete for topic 'topic_A' Partition: {0, {1, 281474976715674}, 100000} SourceId: test-message_group_id_1 SeqNo: 1 offset: 0 MaxOffset: 2 2025-06-30T09:20:36.187590Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037895, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:20:36.187609Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037895, Partition: {0, {1, 281474976715674}, 100000}, State: StateIdle] no data for compaction 2025-06-30T09:20:36.187616Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037895, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:20:36.192361Z node 11 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037896][topic_A] TEvClientConnected TabletId 72075186224037895, NodeId 11, Generation 2 2025-06-30T09:20:36.192375Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037895] server connected, pipe [11:7521669866764513046:2437], now have 1 active actors on pipe 2025-06-30T09:20:38.182165Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037894] server connected, pipe [11:7521669875354447720:2888], now have 1 active actors on pipe 2025-06-30T09:20:38.182291Z node 11 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:465: [72075186224037896][topic_A] TEvClientDestroyed 72075186224037894 2025-06-30T09:20:38.184777Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037894] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:20:38.184928Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037894] Registered with mediator time cast 2025-06-30T09:20:38.185082Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037894] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:20:38.185141Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:742: [PQ: 72075186224037894] has a tx info 2025-06-30T09:20:38.185166Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037894] PlanStep 1742296505901, PlanTxId 281474976715673, ExecStep 1742296505901, ExecTxId 281474976715673 2025-06-30T09:20:38.185234Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72075186224037894] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:20:38.185403Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:20:38.185406Z node 11 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:20:38.185411Z node 11 :PERSQUEUE INFO: pq_impl.cpp:788: [PQ: 72075186224037894] has a tx writes info 2025-06-30T09:20:38.185566Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:20:38.185569Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3630: [PQ: 72075186224037894] send TEvSubscribeLock for WriteId {1, 281474976715674} 2025-06-30T09:20:38.185664Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:20:38.185692Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:20:38.185716Z node 11 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateInit] bootstrapping {1, {1, 281474976715674}, 100000} [11:7521669875354447747:2483] 2025-06-30T09:20:38.185765Z node 11 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037894, Partition: 1, State: StateInit] bootstrapping 1 [11:7521669875354447746:2482] 2025-06-30T09:20:38.185906Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitDiskStatusStep 2025-06-30T09:20:38.185995Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitDiskStatusStep 2025-06-30T09:20:38.186009Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitMetaStep 2025-06-30T09:20:38.186087Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitMetaStep 2025-06-30T09:20:38.186106Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitInfoRangeStep 2025-06-30T09:20:38.186149Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitInfoRangeStep 2025-06-30T09:20:38.186173Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitDataRangeStep 2025-06-30T09:20:38.186198Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: D0000100000_00000000000000000000_00000_0000000001_00000| 2025-06-30T09:20:38.186216Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:624: [topic_A:{1, {1, 281474976715674}, 100000}:TInitDataRangeStep] Got data offset 0 count 1 size 250 so 0 eo 1 D0000100000_00000000000000000000_00000_0000000001_00000| 2025-06-30T09:20:38.186228Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitDataStep 2025-06-30T09:20:38.186291Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:845: [topic_A:{1, {1, 281474976715674}, 100000}:TInitDataStep] read res partition offset 0 endOffset 1 key 0,1 valuesize 250 expected 250 2025-06-30T09:20:38.186306Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-30T09:20:38.186315Z node 11 :PERSQUEUE INFO: partition_init.cpp:895: [topic_A:{1, {1, 281474976715674}, 100000}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:20:38.186318Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Initializing completed. 2025-06-30T09:20:38.186325Z node 11 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateInit] init complete for topic 'topic_A' partition {1, {1, 281474976715674}, 100000} generation 2 [11:7521669875354447747:2483] 2025-06-30T09:20:38.186334Z node 11 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateInit] SYNC INIT topic topic_A partitition {1, {1, 281474976715674}, 100000} so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 SYNC INIT sourceId test-message_group_id_2 seqNo 1 offset 0 2025-06-30T09:20:38.186347Z node 11 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateIdle] Process pending events. Count 0 2025-06-30T09:20:38.186360Z node 11 :PERSQUEUE DEBUG: partition.cpp:606: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateIdle] Init complete for topic 'topic_A' Partition: {1, {1, 281474976715674}, 100000} SourceId: test-message_group_id_2 SeqNo: 1 offset: 0 MaxOffset: 1 2025-06-30T09:20:38.186382Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateIdle] no data for compaction 2025-06-30T09:20:38.186394Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitDataRangeStep 2025-06-30T09:20:38.186445Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitDataStep 2025-06-30T09:20:38.186452Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-30T09:20:38.186455Z node 11 :PERSQUEUE INFO: partition_init.cpp:895: [topic_A:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:20:38.186456Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic_A:1:Initializer] Initializing completed. 2025-06-30T09:20:38.186460Z node 11 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037894, Partition: 1, State: StateInit] init complete for topic 'topic_A' partition 1 generation 2 [11:7521669875354447746:2482] 2025-06-30T09:20:38.186464Z node 11 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72075186224037894, Partition: 1, State: StateInit] SYNC INIT topic topic_A partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:20:38.186469Z node 11 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72075186224037894, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-30T09:20:38.186486Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 1, State: StateIdle] Topic 'topic_A' partition 1 user consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 2 2025-06-30T09:20:38.186496Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 1, State: StateIdle] Topic 'topic_A' partition 1 user test-consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:20:38.186515Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 1, State: StateIdle] no data for compaction 2025-06-30T09:20:38.192909Z node 11 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037896][topic_A] TEvClientConnected TabletId 72075186224037894, NodeId 11, Generation 2 2025-06-30T09:20:38.192919Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037894] server connected, pipe [11:7521669875354447722:2437], now have 1 active actors on pipe >> TNodeBrokerTest::NodesMigration1001Nodes >> TJaegerTracingConfiguratorTests::GlobalRules [GOOD] >> TJaegerTracingConfiguratorTests::ExternalTracePlusSampling >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClient [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdates >> TNodeBrokerTest::ResolveScopeIdForServerless >> TNodeBrokerTest::ExtendLeaseBumpVersion >> TTenantPoolTests::TestForcedSensorLabelsForStaticConfig >> TConfigsCacheTests::TestFullConfigurationRestore [GOOD] >> TConfigsCacheTests::TestConfigurationSaveOnNotification >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-true >> TExecutorDb::RandomCoordinatorSimulation [GOOD] >> TExecutorDb::MultiPage >> BuildStatsHistogram::Ten_Mixed [GOOD] >> BuildStatsHistogram::Ten_Serial >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-false >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscriptions [GOOD] >> TConsoleConfigSubscriptionTests::TestListConfigSubscriptions >> TNodeBrokerTest::SubscribeToNodes >> TJaegerTracingConfiguratorTests::ExternalTracePlusSampling [GOOD] >> TJaegerTracingConfiguratorTests::RequestTypeThrottler >> TConsoleConfigTests::TestRemoveConfigItems [GOOD] >> TConsoleConfigTests::TestConfigureOrderConflicts >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_FlatIndex >> TExecutorDb::MultiPage [GOOD] >> TExecutorDb::EncodedPage >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-true >> KqpRm::SnapshotSharingByExchanger [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_BTreeIndex >> TExecutorDb::EncodedPage [GOOD] >> TFlatCxxDatabaseTest::BasicSchemaTest [GOOD] >> TFlatCxxDatabaseTest::RenameColumnSchemaTest [GOOD] >> TFlatCxxDatabaseTest::SchemaFillerTest [GOOD] >> TFlatDatabaseDecimal::UpdateRead [GOOD] >> TFlatEraseCacheTest::BasicUsage [GOOD] >> TFlatEraseCacheTest::BasicUsageReverse [GOOD] >> TFlatEraseCacheTest::CacheEviction [GOOD] >> TFlatEraseCacheTest::StressGarbageCollection [GOOD] >> TFlatEraseCacheTest::StressGarbageCollectionWithStrings [GOOD] >> TFlatExecutorLeases::Basics >> TNodeBrokerTest::ResolveScopeIdForServerless [GOOD] >> TVersions::Wreck2Reverse [GOOD] >> TVersions::Wreck1 >> TConfigsCacheTests::TestConfigurationSaveOnNotification [GOOD] >> TConfigsCacheTests::TestOverwrittenConfigurationDoesntCauseNotification >> TNodeBrokerTest::Test1001NodesSubscribers >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TPartBtreeIndexIteration::OneNode_History [GOOD] >> TPartBtreeIndexIteration::OneNode_Slices ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SnapshotSharingByExchanger [GOOD] Test command err: 2025-06-30T09:20:38.315564Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:184:2154] Bootstrap 2025-06-30T09:20:38.352717Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:184:2154] Become StateWork (SchemeCache [1:193:2160]) 2025-06-30T09:20:38.352860Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:185:2104] Bootstrap 2025-06-30T09:20:38.354956Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:185:2104] Become StateWork (SchemeCache [2:196:2107]) 2025-06-30T09:20:38.367502Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:38.370128Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:38.370198Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:20:38.370306Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:38.371010Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:20:38.371080Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:38.371087Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:38.371126Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:20:38.374139Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:20:38.374186Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:20:38.374203Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:20:38.374242Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:38.374258Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:38.374293Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:38.407402Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:38.407463Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:38.418396Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:38.418444Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:38.418456Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:38.418466Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:38.418487Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:38.418495Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:38.418501Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:38.418507Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:38.429292Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:38.429352Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:38.440216Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:38.440308Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:20:38.440518Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:20:38.440526Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:20:38.442222Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:20:38.442239Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:20:38.442644Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-30T09:20:38.443053Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/004263/r3tmp/tmpyxjMSA/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } } } } 2025-06-30T09:20:38.443141Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/cl3w/004263/r3tmp/tmpyxjMSA/pdisk_1.dat 2025-06-30T09:20:38.443154Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/cl3w/004263/r3tmp/tmpyxjMSA/pdisk_1.dat 2025-06-30T09:20:38.443362Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-30T09:20:38.443468Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:20:38.443496Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:38.443514Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:38.443563Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:20:38.443689Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-30T09:20:38.444053Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:38.444136Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:38.455379Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-30T09:20:38.458392Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-30T09:20:38.458609Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/cl3w/004263/r3tmp/tmpyxjMSA/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-30T09:20:38.458836Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/cl3w/004263/r3tmp/tmpyxjMSA/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/cl3w/004263/r3tmp/tmpyxjMSA/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 1746294591063967304 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-30T09:20:38.458924Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-30T09:20:38.459116Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:38.459213Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:38.459224Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:38.459283Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:38.459300Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:38.459332Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:38.459353Z node 2 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:38.459362Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:38.459371Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:38.459383Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:38.459483Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:38.459493Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:38.459499Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:38.459517Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:419:2307] 2025-06-30T09:20:38.459599Z node 2 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:38.459607Z node 2 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:38.459613Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:38.459634Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:421:2117] 2025-06-30T09:20:38.460556Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:38.460571Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:409:2113] 2025-06-30T09:20:38.460636Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:38.460643Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:408:2303] 2025-06-30T09:20:38.460852Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:419:2307]} 2025-06-30T09:20:38.460865Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:38.460874Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:38.460879Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:38.461022Z node 2 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:421:2117]} 2025-06-30T09:20:38.461079Z node 2 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:38.461088Z node 2 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:38.461092Z node 2 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:38.480838Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:38.480865Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:38.480871Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-30T09:20:38.480879Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-30T09:20:38.494162Z node 1 :BS_CONTROLLER DEBUG: {BSC19@console_interaction.cpp:74} Console proposed config response Response# {Status: ReverseCommit ConsoleConfigVersion: 0 YAML: "" } 2025-06-30T09:20:38.545956Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:184:2154] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:20:38.547183Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976710656 RangeEnd# 281474976715656 txAllocator# 72057594046447617 2025-06-30T09:20:38.547249Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:185:2104] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:20:38.547952Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:20:38.586406Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1 AvailableSize: 34225520640 TotalSize: 34359738368 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 EnforcedDynamicSlotSize: 34158411776 State: Normal } } 2025-06-30T09:20:38.629202Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1000 AvailableSize: 0 TotalSize: 0 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 State: OpenFileError } } 2025-06-30T09:20:38.660730Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } SatisfactionRank: 0 AvailableSize: 34158411776 AllocatedSize: 0 StatusFlags: 1 VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Occupancy: 0.00098231827111984276 State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 1000 } } 2025-06-30T09:20:38.958277Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:38.959025Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:38.959101Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } |80.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |80.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup >> TTenantPoolTests::TestForcedSensorLabelsForStaticConfig [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_BTreeIndex [GOOD] >> TConsoleConfigTests::TestConfigureOrderConflicts [GOOD] >> TConsoleConfigSubscriptionTests::TestListConfigSubscriptions [GOOD] >> TJaegerTracingConfiguratorTests::RequestTypeThrottler [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_History_FlatIndex ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ResolveScopeIdForServerless [GOOD] Test command err: 2025-06-30T09:20:42.285590Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.289636Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.289768Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.289779Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.289858Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.289986Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.290252Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.290290Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.290397Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.290493Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.315700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:42.315727Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:42.321983Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:42.322569Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:42.322661Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:42.322920Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:42.326047Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:42.326084Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:42.326138Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:42.326156Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:42.326162Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:42.326183Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:42.326207Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:42.326213Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:42.326218Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:42.326225Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:42.326246Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:42.326253Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:42.359042Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:42.359088Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.025000Z 2025-06-30T09:20:42.359105Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z, approximate epoch start #1.1 nodes=0 expired=0 2025-06-30T09:20:42.359117Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z nodes=0 expired=0 removed=0 2025-06-30T09:20:42.430789Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:255:2200], Recipient [1:221:2179]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.430833Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:255:2200] Leader: 1 Dead: 0 Generation: 2 VersionInfo:  } 2025-06-30T09:20:42.433014Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:19:2066], Recipient [1:221:2179]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { MinEpoch: 1 } 2025-06-30T09:20:42.433039Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:42.433059Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:42.443518Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:257:2201], Recipient [1:221:2179]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.443578Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:257:2201] Leader: 1 Dead: 0 Generation: 2 VersionInfo:  } ... waiting for nameservers are connected (done) 2025-06-30T09:20:42.443677Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [2:42:2066], Recipient [1:257:2201] 2025-06-30T09:20:42.443683Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:42.443701Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:42.443718Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:261:2205], Recipient [1:221:2179]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.443757Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:259:2203], Recipient [1:221:2179]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:42.443763Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:42.443770Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:42.444671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 2025-06-30T09:20:42.451455Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:293:2235], Recipient [1:221:2179]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.451522Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:259:2203], Recipient [1:221:2179]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "/dc-1/SharedDB" } 2025-06-30T09:20:42.451529Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:42.451539Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "/dc-1/SharedDB" 2025-06-30T09:20:42.451602Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:17:2064], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/SharedDB TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:42.451619Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:17:2064], path# /dc-1/SharedDB, domainOwnerId# 72057594046678944 2025-06-30T09:20:42.457580Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:17:2064], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1/SharedDB PathId: [OwnerId: 72057594046678944, LocalPathId: 2] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1/SharedDB" PathDescription { Self { Name: "SharedDB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 } 2025-06-30T09:20:42.457704Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:17:2064], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1/SharedDB PathId: [OwnerId: 72057594046678944, LocalPathId: 2] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1/SharedDB" PathDescription { Self { Name: "SharedDB" PathId: 2 Schemeshar ... node_broker.cpp:558: [Committed] Update current epoch version from 1 to 2 2025-06-30T09:20:42.480015Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v2 host1:1001 to epoch cache 2025-06-30T09:20:42.480042Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v2 to update nodes log 2025-06-30T09:20:42.480112Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200025000 Name: "slot-0" } 2025-06-30T09:20:42.480499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 102 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000002 FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:20:42.486154Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:333:2267], Recipient [1:221:2179]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.486233Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:259:2203], Recipient [1:221:2179]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "/dc-1/ServerlessDB" } 2025-06-30T09:20:42.486242Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:42.486254Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "/dc-1/ServerlessDB" 2025-06-30T09:20:42.486301Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:17:2064], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/ServerlessDB TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:42.486318Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:17:2064], path# /dc-1/ServerlessDB, domainOwnerId# 72057594046678944 2025-06-30T09:20:42.486619Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:17:2064], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1/ServerlessDB PathId: [OwnerId: 72057594046678944, LocalPathId: 3] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1/ServerlessDB" PathDescription { Self { Name: "ServerlessDB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944 } 2025-06-30T09:20:42.486663Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:17:2064], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1/ServerlessDB PathId: [OwnerId: 72057594046678944, LocalPathId: 3] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1/ServerlessDB" PathDescription { Self { Name: "ServerlessDB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944 }, by path# { Subscriber: { Subscriber: [1:335:2268] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:20:42.486700Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:17:2064], cacheItem# { Subscriber: { Subscriber: [1:335:2268] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 9 TableKind: 0 Created: 1 CreateStep: 5000002 PathId: [OwnerId: 72057594046678944, LocalPathId: 3] DomainId: [OwnerId: 72057594046678944, LocalPathId: 3] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/ServerlessDB TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:42.486780Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:342:2269], recipient# [1:334:2179], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/ServerlessDB TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared Users: [] Groups: [] } }] } 2025-06-30T09:20:42.486797Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1/ServerlessDB TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared Users: [] Groups: [] } } 2025-06-30T09:20:42.486819Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "/dc-1/ServerlessDB": scope id# <72057594046678944:2>: serviced subdomain# 72057594046678944:3 2025-06-30T09:20:42.486836Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [1:334:2179], Recipient [1:221:2179]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:42.486840Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:42.486855Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:42.486858Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host2:1001 (not fixed) tenant: /dc-1/ServerlessDB 2025-06-30T09:20:42.486890Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v3 host2:1001 to database state=Active resolvehost=host2.yandex.net address=1.2.3.5 dc=1 location=DC=1/M=2/R=3/U=5/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:3 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:42.486942Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:42.486952Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 2 to 3 2025-06-30T09:20:42.486956Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=3 2025-06-30T09:20:42.498001Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:42.498032Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:42.498047Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 2 to 3 2025-06-30T09:20:42.498054Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v3 host2:1001 to epoch cache 2025-06-30T09:20:42.498091Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v3 to update nodes log 2025-06-30T09:20:42.498153Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 7200025000 Name: "slot-0" } >> TConsoleConfigSubscriptionTests::TestReplaceConfigSubscriptions >> TJaegerTracingConfiguratorTests::RequestTypeSampler >> LocalPartition::WithoutPartitionUnknownEndpoint [GOOD] >> BuildStatsHistogram::Ten_Serial [GOOD] >> TConsoleConfigTests::TestGetItems ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-true [GOOD] >> LocalPartition::WithoutPartitionPartitionRelocation >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloaded [GOOD] Test command err: 2025-06-30T09:20:41.859151Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.862820Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.863442Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.863753Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.884282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:41.884314Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:41.888790Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:41.889253Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:41.889321Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:41.889502Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:41.890230Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:41.890265Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:41.890318Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:41.890331Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:41.890336Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:41.890350Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:41.890368Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:41.890372Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:41.890376Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:41.890380Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:41.890397Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:41.890402Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:41.911555Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:41.911597Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.025000Z 2025-06-30T09:20:41.911607Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z, approximate epoch start #1.1 nodes=0 expired=0 2025-06-30T09:20:41.911617Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z nodes=0 expired=0 removed=0 2025-06-30T09:20:41.952596Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:204:2199], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:41.952653Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:204:2199] Leader: 1 Dead: 0 Generation: 2 VersionInfo:  } ... waiting for nameservers are connected (done) 2025-06-30T09:20:41.954307Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:18:2065], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { MinEpoch: 1 } 2025-06-30T09:20:41.954333Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:41.954348Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:41.954416Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:208:2203], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:41.954464Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:206:2201], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:41.954473Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:41.954482Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:41.954536Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:41.954549Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:16:2063], path# /dc-1, domainOwnerId# 72057594046678944 2025-06-30T09:20:41.959694Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 } 2025-06-30T09:20:41.959786Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }, by path# { Subscriber: { Subscriber: [1:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:20:41.959830Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:16:2063], cacheItem# { Subscriber: { Subscriber: [1:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:41.959903Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:217:2205], recipient# [1:209:2178], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:41.959925 ... # { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:42.405263Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:42.405284Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [2:209:2178], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:42.405291Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:42.405315Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:42.405322Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host1:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:42.405360Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v2 host1:1001 to database state=Active resolvehost=host1.host1.host1 address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:42.405427Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:42.405438Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 1 to 2 2025-06-30T09:20:42.405443Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=2 2025-06-30T09:20:42.417002Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:42.417035Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:42.417050Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 1 to 2 2025-06-30T09:20:42.417057Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v2 host1:1001 to epoch cache 2025-06-30T09:20:42.417092Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v2 to update nodes log 2025-06-30T09:20:42.417163Z node 2 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200024000 Name: "slot-0" } 2025-06-30T09:20:42.417314Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [2:221:2209], Recipient [2:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.417346Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [2:206:2201], Recipient [2:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:42.417355Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:42.417368Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:42.417433Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:42.417471Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:16:2063], cacheItem# { Subscriber: { Subscriber: [2:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:42.417538Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:223:2210], recipient# [2:222:2178], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:42.417557Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:42.417574Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:42.417590Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [2:222:2178], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:42.417597Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:42.417617Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:42.417623Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host2:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:42.417655Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v3 host2:1001 to database state=Active resolvehost=host2.host2.host2 address=1.2.3.5 dc=1 location=DC=1/M=2/R=3/U=5/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:42.417730Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:42.417739Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 2 to 3 2025-06-30T09:20:42.417744Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=3 2025-06-30T09:20:42.428778Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:42.428805Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:42.428819Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 2 to 3 2025-06-30T09:20:42.428826Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v3 host2:1001 to epoch cache 2025-06-30T09:20:42.428862Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v3 to update nodes log 2025-06-30T09:20:42.428927Z node 2 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 7200024000 Name: "slot-1" } ... waiting for cache miss 2025-06-30T09:20:42.429024Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1024 Deadline: 2.107024s } 2025-06-30T09:20:42.429039Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:649: New cache miss: nodeId# 1024, deadline# 2.107024s 2025-06-30T09:20:42.429044Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:653: Schedule wakeup for new earliest deadline 2.107024s 2025-06-30T09:20:42.429055Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1025 Deadline: 1.107024s } 2025-06-30T09:20:42.429060Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:649: New cache miss: nodeId# 1025, deadline# 1.107024s 2025-06-30T09:20:42.429064Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:653: Schedule wakeup for new earliest deadline 1.107024s ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) 2025-06-30T09:20:42.439954Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:42.439986Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:42.440004Z node 2 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v1 -> v3 to [2:18:2065] ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 2025-06-30T09:20:42.518907Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:896: HandleWakeup at 1.108024s 2025-06-30T09:20:42.518950Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:134: Cache miss failed: nodeId=1025, error=Deadline exceeded 2025-06-30T09:20:42.530663Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:42.530701Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:42.574157Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:896: HandleWakeup at 2.108024s 2025-06-30T09:20:42.574193Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:134: Cache miss failed: nodeId=1024, error=Deadline exceeded >> BuildStatsHistogram::Ten_Crossed >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloadedWithReboot >> TConsoleConfigSubscriptionTests::TestReplaceConfigSubscriptions [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForNewSubscription >> TFlatTableExecutor_IndexLoading::Scan_History_FlatIndex [GOOD] >> TJaegerTracingConfiguratorTests::RequestTypeSampler [GOOD] >> TConfigsCacheTests::TestOverwrittenConfigurationDoesntCauseNotification [GOOD] >> TJaegerTracingConfiguratorTests::SamplingSameScope >> TFlatTableExecutor_IndexLoading::Scan_History_BTreeIndex >> TConfigsCacheTests::TestConfigurationChangeSensor >> TConsoleConfigTests::TestGetItems [GOOD] >> TConsoleConfigTests::TestGetNodeItems >> TConsoleTests::TestGetUnknownTenantStatus [GOOD] >> TConsoleTests::TestGetUnknownTenantStatusExtSubdomain >> TFlatExecutorLeases::Basics [GOOD] >> TFlatExecutorLeases::BasicsLeaseTimeout >> TNodeBrokerTest::LoadStateMoveEpoch [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-30T09:20:42.495151Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.498870Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.499468Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.499731Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.519188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:42.519221Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:42.524950Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:42.525525Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:42.525604Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:42.525797Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:42.526907Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:42.526973Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:42.527027Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:42.527047Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:42.527053Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:42.527073Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:42.527102Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:42.527109Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:42.527115Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:42.527121Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:42.527146Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:42.527153Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:42.548448Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:42.548500Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.025000Z 2025-06-30T09:20:42.548510Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z, approximate epoch start #1.1 nodes=0 expired=0 2025-06-30T09:20:42.548521Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z nodes=0 expired=0 removed=0 2025-06-30T09:20:42.591107Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:204:2199], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.591176Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:204:2199] Leader: 1 Dead: 0 Generation: 2 VersionInfo:  } ... waiting for nameservers are connected (done) 2025-06-30T09:20:42.593770Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:18:2065], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { MinEpoch: 1 } 2025-06-30T09:20:42.593811Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:42.593847Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:42.593964Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:208:2203], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.594018Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:206:2201], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:42.594024Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:42.594036Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:42.594116Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:42.594137Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:16:2063], path# /dc-1, domainOwnerId# 72057594046678944 2025-06-30T09:20:42.603191Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 } 2025-06-30T09:20:42.603365Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }, by path# { Subscriber: { Subscriber: [1:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:20:42.603442Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:16:2063], cacheItem# { Subscriber: { Subscriber: [1:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:42.603562Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:217:2205], recipient# [1:209:2178], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:42.603600 ... 06-30T09:20:42.834891Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:42.834896Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host1:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:42.834926Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v2 host1:1001 to database state=Active resolvehost=host1.host1.host1 address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:42.834976Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:42.834982Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 1 to 2 2025-06-30T09:20:42.834985Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=2 2025-06-30T09:20:42.845890Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:42.845915Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:42.845924Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 1 to 2 2025-06-30T09:20:42.845933Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v2 host1:1001 to epoch cache 2025-06-30T09:20:42.845958Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v2 to update nodes log 2025-06-30T09:20:42.846007Z node 2 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200024000 Name: "slot-0" } 2025-06-30T09:20:42.846144Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [2:221:2209], Recipient [2:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.846166Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [2:206:2201], Recipient [2:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:42.846171Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:42.846178Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:42.846219Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:42.846246Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:16:2063], cacheItem# { Subscriber: { Subscriber: [2:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:42.846339Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:223:2210], recipient# [2:222:2178], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:42.846357Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:42.846370Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:42.846380Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [2:222:2178], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:42.846384Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:42.846396Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:42.846400Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host2:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:42.846425Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v3 host2:1001 to database state=Active resolvehost=host2.host2.host2 address=1.2.3.5 dc=1 location=DC=1/M=2/R=3/U=5/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:42.846468Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:42.846472Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 2 to 3 2025-06-30T09:20:42.846475Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=3 2025-06-30T09:20:42.857456Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:42.857486Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:42.857498Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 2 to 3 2025-06-30T09:20:42.857503Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v3 host2:1001 to epoch cache 2025-06-30T09:20:42.857537Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v3 to update nodes log 2025-06-30T09:20:42.857590Z node 2 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 7200024000 Name: "slot-1" } ... waiting for cache miss 2025-06-30T09:20:42.857668Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1024 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.857679Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:649: New cache miss: nodeId# 1024, deadline# 18446744073709.551615s 2025-06-30T09:20:42.857684Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1025 Deadline: 1.107024s } 2025-06-30T09:20:42.857687Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:649: New cache miss: nodeId# 1025, deadline# 1.107024s 2025-06-30T09:20:42.857690Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:653: Schedule wakeup for new earliest deadline 1.107024s ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) 2025-06-30T09:20:42.867878Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:42.867913Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:42.867926Z node 2 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v1 -> v3 to [2:18:2065] ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 2025-06-30T09:20:42.939776Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:896: HandleWakeup at 1.108024s 2025-06-30T09:20:42.939812Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:134: Cache miss failed: nodeId=1025, error=Deadline exceeded ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR 2025-06-30T09:20:42.939910Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:747: Handle NKikimrNodeBroker.TUpdateNodes Epoch { Id: 1 Version: 1 Start: 24000 End: 3600024000 NextEnd: 7200024000 } SeqNo: 0 2025-06-30T09:20:42.939930Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039952, Sender [2:18:2065], Recipient [2:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest { SeqNo: 0 } 2025-06-30T09:20:42.939938Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:262: StateWork, processing event TEvNodeBroker::TEvSyncNodesRequest 2025-06-30T09:20:42.940007Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:747: Handle NKikimrNodeBroker.TUpdateNodes Epoch { Id: 1 Version: 3 Start: 24000 End: 3600024000 NextEnd: 7200024000 } Updates { Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200024000 Name: "slot-0" } } Updates { Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 7200024000 Name: "slot-1" } } SeqNo: 0 2025-06-30T09:20:42.940032Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:812: Handle NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesResponse { SeqNo: 0 } 2025-06-30T09:20:42.940036Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:128: Cache miss succeed: nodeId=1024 2025-06-30T09:20:42.940046Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1024 Deadline: 18446744073709.551615s } |80.8%| [TA] {RESULT} $(B)/ydb/services/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} |80.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TTenantPoolTests::TestForcedSensorLabelsForStaticConfig [GOOD] Test command err: 2025-06-30T09:20:42.428197Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:253:2105] Bootstrap 2025-06-30T09:20:42.460344Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:253:2105] Become StateWork (SchemeCache [2:263:2108]) 2025-06-30T09:20:42.460491Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:252:2155] Bootstrap 2025-06-30T09:20:42.462409Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:252:2155] Become StateWork (SchemeCache [1:266:2161]) 2025-06-30T09:20:42.462508Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [3:254:2105] Bootstrap 2025-06-30T09:20:42.464118Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [3:254:2105] Become StateWork (SchemeCache [3:268:2108]) 2025-06-30T09:20:42.477682Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:42.479307Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:42.479333Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:20:42.479650Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:42.479805Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:20:42.479839Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:42.479843Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:42.479872Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:20:42.485286Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:20:42.485368Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:20:42.485385Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:20:42.485414Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:42.485430Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:42.485450Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:42.539185Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:42.539264Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:42.550198Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:42.550247Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:42.550264Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:42.550277Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:42.550303Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:42.550311Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:42.550315Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:42.550322Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:42.561471Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:42.561531Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:42.572931Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:42.573008Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:20:42.573227Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:20:42.573233Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:20:42.575127Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:20:42.575146Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:20:42.575686Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-30T09:20:42.576074Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/004675/r3tmp/tmpQU8hIe/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } } } } 2025-06-30T09:20:42.576148Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/cl3w/004675/r3tmp/tmpQU8hIe/pdisk_1.dat 2025-06-30T09:20:42.576157Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/cl3w/004675/r3tmp/tmpQU8hIe/pdisk_1.dat 2025-06-30T09:20:42.576161Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /home/runner/.ya/build/build_root/cl3w/004675/r3tmp/tmpQU8hIe/pdisk_1.dat 2025-06-30T09:20:42.576372Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 3 DeclarativePDiskManagement: true } 2025-06-30T09:20:42.576392Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-30T09:20:42.576461Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:20:42.576484Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:42.576501Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:42.576527Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:20:42.576572Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-30T09:20:42.577342Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:42.577389Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:42.595695Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-30T09:20:42.598702Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-30T09:20:42.598908Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/cl3w/004675/r3tmp/tmpQU8hIe/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-30T09:20:42.599111Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/cl3w/004675/r3tmp/tmpQU8hIe/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/cl3w/004675/r3tmp/tmpQU8hIe/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 1032038888071881033 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-30T09:20:42.599195Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 3 Devices# [] 2025-06-30T09:20:42.599320Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-30T09:20:42.601729Z node 3 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-30T09:20:42.602133Z node 3 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/cl3w/004675/r3tmp/tmpQU8hIe/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-30T09:20:42.602190Z node 3 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/cl3w/004675/r3tmp/tmpQU8hIe/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/cl3w/004675/r3tmp/tmpQU8hIe/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 17266085429583651884 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-30T09:20:42.602412Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:42.602487Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:42.602492Z node 3 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:42.602527Z node 3 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:42.602535Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:42.602584Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:42.602618Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:42.602626Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:42.602630Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:42.602636Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1/users/tenant-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:42.602649Z node 2 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:42.602652Z node 3 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:42.602657Z node 3 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1/users/tenant-2 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:42.602666Z node 3 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:42.602680Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:42.604164Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:42.604188Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:42.604191Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:42.604222Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:505:2306] 2025-06-30T09:20:42.604923Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:42.604934Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:487:2302] 2025-06-30T09:20:42.605298Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:505:2306]} 2025-06-30T09:20:42.605321Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:42.605329Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:42.605332Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:42.605538Z node 2 :LOCAL DEBUG: local.cpp:1258: TDomainLocal(dc-1): TDomainLocal::TEvClientConnected for dc-1 shard 72057594046578944 2025-06-30T09:20:42.605549Z node 2 :LOCAL DEBUG: local.cpp:1117: TDomainLocal(dc-1): Send resolve request for /dc-1/users/tenant-1 to schemeshard 72057594046578944 2025-06-30T09:20:42.605572Z node 3 :LOCAL DEBUG: local.cpp:1258: TDomainLocal(dc-1): TDomainLocal::TEvClientConnected for dc-1 shard 72057594046578944 2025-06-30T09:20:42.605580Z node 3 :LOCAL DEBUG: local.cpp:1117: TDomainLocal(dc-1): Send resolve request for /dc-1/users/tenant-2 to schemeshard 72057594046578944 2025-06-30T09:20:42.612140Z node 3 :LOCAL DEBUG: local.cpp:1285: TDomainLocal(dc-1): HandleResolve from schemeshard 72057594046578944: Status: StatusSuccess Path: "/dc-1/users/tenant-2" PathDescription { Self { Name: "/dc-1/users/tenant-2" PathId: 101 SchemeshardId: 72057594046578944 PathType: EPathTypeSubDomain } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 101 DomainKey { SchemeShard: 72057594046578944 PathId: 101 } } } 2025-06-30T09:20:42.612218Z node 3 :LOCAL DEBUG: local.cpp:1223: TDomainLocal(dc-1): Binding tenant /dc-1/users/tenant-2 to hive 72057594046578946 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:42.612396Z node 3 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:42.612405Z node 3 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:42.612429Z node 3 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[3:514:2118] 2025-06-30T09:20:42.612546Z node 3 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1/users/tenant-2 2025-06-30T09:20:42.612553Z node 3 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [3:489:2114] 2025-06-30T09:20:42.612699Z node 2 :LOCAL DEBUG: local.cpp:1285: TDomainLocal(dc-1): HandleResolve from schemeshard 72057594046578944: Status: StatusSuccess Path: "/dc-1/users/tenant-1" PathDescription { Self { Name: "/dc-1/users/tenant-1" PathId: 100 SchemeshardId: 72057594046578944 PathType: EPathTypeSubDomain } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 100 DomainKey { SchemeShard: 72057594046578944 PathId: 100 } } } 2025-06-30T09:20:42.612723Z node 2 :LOCAL DEBUG: local.cpp:1223: TDomainLocal(dc-1): Binding tenant /dc-1/users/tenant-1 to hive 72057594046578946 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:42.612828Z node 2 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:42.612836Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:42.612849Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:520:2118] 2025-06-30T09:20:42.612954Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1/users/tenant-1 2025-06-30T09:20:42.612960Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:488:2114] 2025-06-30T09:20:42.613669Z node 2 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:520:2118]} 2025-06-30T09:20:42.613687Z node 3 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[3:514:2118]} 2025-06-30T09:20:42.613757Z node 3 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:42.613769Z node 3 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:42.613772Z node 3 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:42.613787Z node 2 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:42.613793Z node 2 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:42.613796Z node 2 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk >> TFlatTableExecutor_IndexLoading::Scan_History_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_FlatIndex >> TNodeBrokerTest::NodesMigrationExpireActive >> TConsoleConfigTests::TestGetNodeItems [GOOD] >> TConsoleConfigTests::TestGetNodeConfig >> TFlatTableExecutor_IndexLoading::Scan_Groups_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Table [GOOD] >> BuildStatsHistogram::Ten_Crossed [GOOD] >> TJaegerTracingConfiguratorTests::SamplingSameScope [GOOD] >> TJaegerTracingConfiguratorTests::ThrottlingByDb >> BuildStatsHistogram::Ten_Mixed_Log >> TConfigsCacheTests::TestConfigurationChangeSensor [GOOD] >> TConfigsDispatcherTests::TestSubscriptionNotification ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::LoadStateMoveEpoch [GOOD] Test command err: 2025-06-30T09:20:41.247542Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.253153Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.253206Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.253234Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.253269Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.253299Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.253319Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.257762Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.257865Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.257920Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.257966Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.258045Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.258101Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.258141Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.258242Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.258292Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.258328Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.259846Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.259909Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.259940Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.259972Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.260001Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.260088Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.261691Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.261880Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.261954Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.262007Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.262063Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.262092Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.262114Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.262134Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.262958Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.263091Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.263129Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.263168Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.263197Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.263256Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.263292Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:41.263335Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.263363Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.263682Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.264246Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.264338Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.264371Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.264393Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.264410Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.267835Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.268090Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.268745Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.268783Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.269004Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.269127Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.269337Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.269785Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.269978Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.270231Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.270271Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.270358Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.271734Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.271960Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.271995Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.272106Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.272281Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.272374Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.272544Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.273352Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.274001Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.274602Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.276749Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.276983Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:41.297818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:41.297869Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:41.309261Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:41.311934Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:41.312091Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:41.312349Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:41.313016Z node 1 :NODE_BROKER DEBUG: node_ ... Scheme Execute 2025-06-30T09:20:42.345058Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:42.345147Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:42.345222Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:42.345239Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1000: [DB] Loaded current epoch: #3.4 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:42.345250Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1029: [DB] Loaded approximate epoch start: #3.4 2025-06-30T09:20:42.345255Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1046: [DB] Loaded main nodes table: Nodes 2025-06-30T09:20:42.345285Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:294: [Dirty] Added expired node #1024.v0 host1:1001 2025-06-30T09:20:42.345327Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1128: [DB] Loaded node #1024.v0 { NodeId: 1024, State: Expired, Version: 0, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:42.345354Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1024.v4 { NodeId: 1024, State: Expired, Version: 4, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:42.345362Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1254: [DB] Node #1024.v4 is already migrated 2025-06-30T09:20:42.345381Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:42.345442Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:42.345450Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:42.345456Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:42.345462Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:536: [Dirty] Remove node #1024.v4 host1:1001 2025-06-30T09:20:42.345474Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:548: [Dirty] Move to new epoch #4.5 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z, approximate epoch start #4.5 2025-06-30T09:20:42.345481Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:836: [DB] Removing node #1024.v5 from database 2025-06-30T09:20:42.345508Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #4.5 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:42.345519Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #4.5 2025-06-30T09:20:42.686150Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:42.686194Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:42.686207Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #4.5 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z, approximate epoch start #4.5 nodes=0 expired=0 2025-06-30T09:20:42.686220Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##4.5 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z nodes=0 expired=0 removed=1 2025-06-30T09:20:42.686227Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v5 to update nodes log 2025-06-30T09:20:42.686368Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:787:2298] Leader: 1 Dead: 0 Generation: 4 VersionInfo:  } 2025-06-30T09:20:42.686395Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:787:2298], Recipient [1:754:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.686411Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:792:2299], Recipient [1:754:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.686491Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:793:2300] Leader: 1 Dead: 0 Generation: 4 VersionInfo:  } 2025-06-30T09:20:42.686521Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:793:2300], Recipient [1:754:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.686537Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:792:2299] Leader: 1 Dead: 0 Generation: 4 VersionInfo:  } 2025-06-30T09:20:42.686560Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:794:2301] Leader: 1 Dead: 0 Generation: 4 VersionInfo:  } 2025-06-30T09:20:42.686572Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:795:2302] Leader: 1 Dead: 0 Generation: 4 VersionInfo:  } 2025-06-30T09:20:42.686594Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:794:2301], Recipient [1:754:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.686614Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:761:2277] Leader: 1 Dead: 0 Generation: 4 VersionInfo:  } 2025-06-30T09:20:42.686622Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:795:2302], Recipient [1:754:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.686635Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:761:2277], Recipient [1:754:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.686668Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:25:2072], Recipient [1:754:2272]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { MinEpoch: 2 } 2025-06-30T09:20:42.686674Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:42.686684Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:42.686723Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [2:54:2072], Recipient [1:792:2299] 2025-06-30T09:20:42.686728Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:42.686754Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:42.686779Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [3:83:2072], Recipient [1:793:2300] 2025-06-30T09:20:42.686784Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:42.686791Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:42.686825Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [4:112:2072], Recipient [1:794:2301] 2025-06-30T09:20:42.686829Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:42.686835Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:42.686856Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:795:2302] 2025-06-30T09:20:42.686860Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:42.686865Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:42.686877Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:761:2277] 2025-06-30T09:20:42.686879Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:42.686882Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:42.686967Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:817:2313], Recipient [1:754:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.686989Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:754:2272]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:42.686994Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:42.687000Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:42.687056Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:819:2315], Recipient [1:754:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.687069Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:754:2272]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:42.687074Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:42.687080Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:42.687134Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:821:2317], Recipient [1:754:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:42.687155Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:641:2213], Recipient [1:754:2272]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:42.687161Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:42.687178Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex_Empty [GOOD] >> TFlatTableExecutor_KeepEraseMarkers::TestKeepEraseMarkers [GOOD] >> TFlatTableExecutor_LongTx::MemTableLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactUncommittedLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactCommittedLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactedLongTxRestart >> TConsoleTests::TestRestartConsoleAndPoolsExtSubdomain [GOOD] >> TConsoleTests::TestSetDefaultStorageUnitsQuota >> TDynamicNameserverTest::BasicFunctionality-EnableNodeBrokerDeltaProtocol-true >> TConfigsDispatcherTests::TestSubscriptionNotification [GOOD] >> TConfigsDispatcherTests::TestSubscriptionNotificationForNewSubscriberAfterUpdate >> TFlatTableExecutor_LongTx::CompactedLongTxRestart [GOOD] >> TFlatTableExecutor_LongTx::CompactMultipleChanges [GOOD] >> TFlatTableExecutor_LongTx::LongTxBorrow [GOOD] >> TFlatTableExecutor_LongTx::MemTableLongTxRead [GOOD] >> TFlatTableExecutor_LongTx::CompactedTxIdReuse [GOOD] >> TFlatTableExecutor_LongTx::MergeSkewedCommitted [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::SmallValues [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::OuterBlobValues [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::ExternalBlobValues [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestEnqueueCancel [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriority [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityCancel [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityAllocatingCancel [GOOD] >> TFlatTableExecutor_MoveTableData::TestMoveSnapshot >> TNodeBrokerTest::SyncNodes >> TConsoleConfigTests::TestGetNodeConfig [GOOD] >> TConsoleConfigTests::TestAutoOrder >> TFlatTableExecutor_MoveTableData::TestMoveSnapshot [GOOD] >> TFlatTableExecutor_MoveTableData::TestMoveSnapshotFollower [GOOD] >> TFlatTableExecutor_PostponedScan::TestPostponedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestCancelFinishedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestCancelRunningPostponedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestPostponedScanSnapshotMVCC [GOOD] >> TFlatTableExecutor_Reboot::TestSchemeGcAfterReassign >> TNodeBrokerTest::ExtendLeaseBumpVersion [GOOD] >> TNodeBrokerTest::EpochCacheUpdate >> TConfigsDispatcherTests::TestSubscriptionNotificationForNewSubscriberAfterUpdate [GOOD] >> TConfigsDispatcherTests::TestSubscriptionNotificationForNewSubscriberDuringUpdate >> TConsoleTests::TestCreateTenant [GOOD] >> TConsoleTests::TestCreateTenantExtSubdomain >> TFlatTableExecutor_Reboot::TestSchemeGcAfterReassign [GOOD] >> TFlatTableExecutor_RejectProbability::MaxedOutRejectProbability >> TJaegerTracingConfiguratorTests::ThrottlingByDb [GOOD] >> TJaegerTracingConfiguratorTests::SamplingByDb >> TStorageBalanceTest::TestScenario3 [GOOD] >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-true >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdates [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdatesAddRemove >> TNodeBrokerTest::NodesMigrationExtendLeaseThenExpire >> TConsoleConfigSubscriptionTests::TestNotificationForNewSubscription [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForNewConfigItem >> TConfigsDispatcherTests::TestSubscriptionNotificationForNewSubscriberDuringUpdate [GOOD] >> TConfigsDispatcherTests::TestRemoveSubscription >> TNetClassifierUpdaterTest::TestGetUpdatesFromHttpServer [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsAndTags >> TFlatTableExecutor_RejectProbability::MaxedOutRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::SomeRejectProbability >> TFlatExecutorLeases::BasicsLeaseTimeout [GOOD] >> TFlatExecutorLeases::BasicsInitialLease >> TFlatTableExecutor_RejectProbability::SomeRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::ZeroRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::ZeroRejectProbabilityMultipleTables >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-false >> TConsoleConfigTests::TestAutoOrder [GOOD] >> TConsoleConfigTests::TestAutoSplit >> TConfigsDispatcherTests::TestRemoveSubscription [GOOD] >> TConfigsDispatcherTests::TestRemoveSubscriptionWhileUpdateInProcess >> TFlatTableExecutor_RejectProbability::ZeroRejectProbabilityMultipleTables [GOOD] >> TFlatTableExecutor_Reschedule::TestExecuteReschedule [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorSetResourceProfile [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestTxData [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorReuseStaticMemory [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestPages [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorPageLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemory >> BuildStatsHistogram::Ten_Mixed_Log [GOOD] >> BuildStatsHistogram::Ten_Serial_Log >> TNodeBrokerTest::ExtendLeaseSetLocationInOneRegistration >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemory [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemoryFollower [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorMemoryLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorPreserveTxData [GOOD] >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TConfigsDispatcherTests::TestRemoveSubscriptionWhileUpdateInProcess [GOOD] >> TConfigsDispatcherTests::TestEmptyChangeCausesNoNotification >> TJaegerTracingConfiguratorTests::SamplingByDb [GOOD] >> TJaegerTracingConfiguratorTests::SharedThrottlingLimits >> TNodeBrokerTest::NodesMigration1001Nodes [GOOD] >> TConsoleConfigTests::TestAutoSplit [GOOD] >> TConsoleConfigTests::TestValidation >> TConfigsDispatcherTests::TestEmptyChangeCausesNoNotification [GOOD] >> TConfigsDispatcherTests::TestYamlAndNonYamlCoexist |80.8%| [TA] $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] Test command err: 2025-06-30T09:20:45.123886Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.127253Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.127868Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.128171Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.143737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:45.143763Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:45.147787Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:45.148177Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:45.148233Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:45.148420Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:45.149090Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:45.149121Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:45.149154Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:45.149167Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:45.149171Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:45.149186Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:45.149208Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:45.149215Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:45.149220Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:45.149225Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:45.149237Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:45.149243Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:45.170398Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:45.170442Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.025000Z 2025-06-30T09:20:45.170456Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z, approximate epoch start #1.1 nodes=0 expired=0 2025-06-30T09:20:45.170469Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z nodes=0 expired=0 removed=0 2025-06-30T09:20:45.211358Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:204:2199], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.211416Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:204:2199] Leader: 1 Dead: 0 Generation: 2 VersionInfo:  } ... waiting for nameservers are connected (done) 2025-06-30T09:20:45.214431Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:18:2065], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 0 SeqNo: 0 } 2025-06-30T09:20:45.214470Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:45.214481Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:18:2065], seqNo: 0, version: 0, server pipe id: [1:204:2199] 2025-06-30T09:20:45.214496Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v0 -> v1 to [1:18:2065] ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 2025-06-30T09:20:45.214621Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:208:2203], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.214663Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:206:2201], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:45.214670Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:45.214684Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:45.214776Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:45.214795Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:16:2063], path# /dc-1, domainOwnerId# 72057594046678944 2025-06-30T09:20:45.223396Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 } 2025-06-30T09:20:45.223545Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }, by path# { Subscriber: { Subscriber: [1:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:20:45.223604Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:16:2063], cacheItem# { Subscriber: { Subscriber: [1:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:45.223719Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:217:2205], recipient# [1:209:2178], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 7205759404 ... , entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:45.464955Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:217:2205], recipient# [2:209:2178], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:45.464971Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:45.464989Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:45.465003Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [2:209:2178], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:45.465007Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:45.465026Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:45.465029Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host1:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:45.465054Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v2 host1:1001 to database state=Active resolvehost=host1.host1.host1 address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:45.465098Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:45.465104Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 1 to 2 2025-06-30T09:20:45.465107Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=2 2025-06-30T09:20:45.476120Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:45.476169Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:45.476183Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 1 to 2 2025-06-30T09:20:45.476191Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v2 host1:1001 to epoch cache 2025-06-30T09:20:45.476222Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v2 to update nodes log 2025-06-30T09:20:45.476279Z node 2 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200024000 Name: "slot-0" } 2025-06-30T09:20:45.476399Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [2:221:2209], Recipient [2:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.476436Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [2:206:2201], Recipient [2:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:45.476444Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:45.476455Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:45.476511Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:45.476535Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:16:2063], cacheItem# { Subscriber: { Subscriber: [2:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:45.476576Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:223:2210], recipient# [2:222:2178], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:45.476588Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:45.476597Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:45.476606Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [2:222:2178], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:45.476610Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:45.476622Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:45.476625Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host2:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:45.476648Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v3 host2:1001 to database state=Active resolvehost=host2.host2.host2 address=1.2.3.5 dc=1 location=DC=1/M=2/R=3/U=5/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:45.476690Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:45.476695Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 2 to 3 2025-06-30T09:20:45.476697Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=3 2025-06-30T09:20:45.487723Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:45.487753Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:45.487764Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 2 to 3 2025-06-30T09:20:45.487771Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v3 host2:1001 to epoch cache 2025-06-30T09:20:45.487801Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v3 to update nodes log 2025-06-30T09:20:45.487852Z node 2 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 7200024000 Name: "slot-1" } ... waiting for cache miss 2025-06-30T09:20:45.487932Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1024 Deadline: 1.107024s } 2025-06-30T09:20:45.487956Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:649: New cache miss: nodeId# 1024, deadline# 1.107024s 2025-06-30T09:20:45.487961Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:653: Schedule wakeup for new earliest deadline 1.107024s 2025-06-30T09:20:45.487973Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1025 Deadline: 1.107024s } 2025-06-30T09:20:45.487979Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:649: New cache miss: nodeId# 1025, deadline# 1.107024s ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) 2025-06-30T09:20:45.559838Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:896: HandleWakeup at 1.108024s 2025-06-30T09:20:45.559869Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:134: Cache miss failed: nodeId=1024, error=Deadline exceeded 2025-06-30T09:20:45.559880Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:134: Cache miss failed: nodeId=1025, error=Deadline exceeded >> TNodeBrokerTest::SubscribeToNodes [GOOD] >> TPartBtreeIndexIteration::OneNode_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups_Slices ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigration1001Nodes [GOOD] Test command err: 2025-06-30T09:20:42.306465Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.311120Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.311185Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.311220Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.311262Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.311299Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.311323Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.315359Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.315446Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.315516Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.315564Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.315621Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.315667Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.315700Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.315795Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.315845Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.315872Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.316962Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.317008Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.317034Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.317059Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.317082Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.317149Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.318481Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.318635Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.318705Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.318783Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.318843Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.318872Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.318897Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.318920Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.319726Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.319858Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.319898Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.319937Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.319970Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.320037Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.320088Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.320268Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.320312Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.320710Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.321073Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.321129Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.321221Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.321256Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.321341Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.325797Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.325911Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.325974Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.328106Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.328189Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.328402Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.328446Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.329332Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.329543Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.329896Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.329950Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.330068Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.331388Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.332014Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.332102Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.332512Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.356681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:42.356711Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:42.362515Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:42.363819Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:42.363922Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:42.364172Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:42.364925Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:42.364969Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:42.365024Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:42.365043Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:42.365049Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:42.365066Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:42.365105Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:42.365112Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:42.365118Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:42.365126Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:42.365147Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:134 ... pdate nodes log 2025-06-30T09:20:44.120039Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1573.v505 to update nodes log 2025-06-30T09:20:44.120045Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1572.v505 to update nodes log 2025-06-30T09:20:44.120052Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1571.v505 to update nodes log 2025-06-30T09:20:44.120057Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1570.v505 to update nodes log 2025-06-30T09:20:44.120063Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1569.v505 to update nodes log 2025-06-30T09:20:44.120068Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1568.v505 to update nodes log 2025-06-30T09:20:44.120073Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1567.v505 to update nodes log 2025-06-30T09:20:44.120079Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1566.v505 to update nodes log 2025-06-30T09:20:44.120085Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1565.v505 to update nodes log 2025-06-30T09:20:44.120091Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1564.v505 to update nodes log 2025-06-30T09:20:44.120097Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1563.v505 to update nodes log 2025-06-30T09:20:44.120103Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1562.v505 to update nodes log 2025-06-30T09:20:44.120109Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1561.v505 to update nodes log 2025-06-30T09:20:44.120114Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1560.v505 to update nodes log 2025-06-30T09:20:44.120120Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1559.v505 to update nodes log 2025-06-30T09:20:44.120127Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1558.v505 to update nodes log 2025-06-30T09:20:44.120133Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1557.v505 to update nodes log 2025-06-30T09:20:44.120139Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1556.v505 to update nodes log 2025-06-30T09:20:44.120145Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1555.v505 to update nodes log 2025-06-30T09:20:44.120151Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1554.v505 to update nodes log 2025-06-30T09:20:44.120158Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1553.v505 to update nodes log 2025-06-30T09:20:44.120164Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1552.v505 to update nodes log 2025-06-30T09:20:44.120171Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1551.v505 to update nodes log 2025-06-30T09:20:44.120177Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1550.v505 to update nodes log 2025-06-30T09:20:44.120183Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1549.v505 to update nodes log 2025-06-30T09:20:44.120188Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1548.v505 to update nodes log 2025-06-30T09:20:44.120191Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1547.v505 to update nodes log 2025-06-30T09:20:44.120195Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1546.v505 to update nodes log 2025-06-30T09:20:44.120199Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1545.v505 to update nodes log 2025-06-30T09:20:44.120203Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1544.v505 to update nodes log 2025-06-30T09:20:44.120206Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1543.v505 to update nodes log 2025-06-30T09:20:44.120210Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1542.v505 to update nodes log 2025-06-30T09:20:44.120213Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1541.v505 to update nodes log 2025-06-30T09:20:44.120217Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1540.v505 to update nodes log 2025-06-30T09:20:44.120220Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1536.v505 to update nodes log 2025-06-30T09:20:44.120224Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1787.v505 to update nodes log 2025-06-30T09:20:44.120446Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2786:3806], Recipient [1:2778:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.120518Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2787:3807], Recipient [1:2778:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.120548Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2791:3811] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:44.120582Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2786:3806] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:44.120595Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2789:3809], Recipient [1:2778:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.120601Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2787:3807] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:44.120609Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2790:3810] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:44.120628Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2790:3810], Recipient [1:2778:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.120645Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2789:3809] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:44.120662Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2791:3811], Recipient [1:2778:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.120717Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:2787:3807] 2025-06-30T09:20:44.120721Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.120730Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.505 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:44.120756Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:2786:3806] 2025-06-30T09:20:44.120759Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.120763Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.505 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:44.122614Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:2789:3809] 2025-06-30T09:20:44.122629Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.122645Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.505 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:44.127313Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:2790:3810] 2025-06-30T09:20:44.127336Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.127355Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.505 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:44.127410Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [2:54:2072], Recipient [1:2791:3811] 2025-06-30T09:20:44.127415Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.127422Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.505 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:44.131149Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2845:3860], Recipient [1:2778:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.131203Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:2778:3800]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:44.131210Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.131222Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.505 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:44.133678Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2847:3862], Recipient [1:2778:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.133722Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:2778:3800]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:44.133729Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.133740Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.505 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:44.136107Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2849:3864], Recipient [1:2778:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.136141Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:2778:3800]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:44.136146Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.136155Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.505 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:44.137353Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2851:3866], Recipient [1:2778:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.137384Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:2778:3800]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 504 } 2025-06-30T09:20:44.137389Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.137398Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.505 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z >> TJaegerTracingConfiguratorTests::SharedThrottlingLimits [GOOD] >> TJaegerTracingConfiguratorTests::SharedSamplingLimits >> TxUsage::Sinks_Oltp_WriteToTopic_4_Table >> TFlatExecutorLeases::BasicsInitialLease [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseTimeout >> TNodeBrokerTest::NodesMigrationExpireActive [GOOD] >> BuildStatsHistogram::Ten_Serial_Log [GOOD] >> BuildStatsHistogram::Ten_Crossed_Log >> TConfigsDispatcherTests::TestYamlAndNonYamlCoexist [GOOD] >> TConfigsDispatcherTests::TestYamlEndToEnd >> TVersions::Wreck1 [GOOD] >> TVersions::Wreck1Reverse >> TJaegerTracingConfiguratorTests::SharedSamplingLimits [GOOD] >> TLogSettingsConfiguratorTests::TestNoChanges ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::SubscribeToNodes [GOOD] Test command err: 2025-06-30T09:20:42.654405Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.658232Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.658282Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.658309Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.658340Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.658368Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.658387Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.662435Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.662499Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.662535Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.662572Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.662619Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.662643Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.662669Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.662758Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.662813Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.662840Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.663722Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.663749Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.663770Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.663800Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.663819Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.663871Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.664928Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.665036Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.665078Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.665112Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.665149Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.665165Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.665181Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.665195Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.665867Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.665962Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.665987Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.666010Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.666030Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.666066Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.666098Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.666155Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.666183Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.666772Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.666806Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.666858Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.666879Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.666890Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.666901Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.666998Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.670085Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.670777Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.670807Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.670834Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.670854Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.671061Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.671225Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.671444Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.671541Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.671741Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.671767Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.671852Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.671977Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.672604Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.672783Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.672809Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.672830Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.672932Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.672995Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.673087Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.673154Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.685310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:42.685336Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:42.690761Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:42.691390Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:42.691487Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:42.691702Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:42.692542Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:42.692572Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:42.692614Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:42.692628Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Star ... ver.cpp:672: Handle NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594037936129 ClientId: [8:379:2072] ServerId: [1:634:2208] } 2025-06-30T09:20:44.570375Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:672: Handle NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594037936129 ClientId: [7:295:2072] ServerId: [1:639:2213] } 2025-06-30T09:20:44.584609Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:44.585951Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:44.586045Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:44.586579Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:44.586648Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:44.586728Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:44.586835Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:44.586852Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1000: [DB] Loaded current epoch: #4.10 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z 2025-06-30T09:20:44.586858Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1029: [DB] Loaded approximate epoch start: #4.10 2025-06-30T09:20:44.586863Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1046: [DB] Loaded main nodes table: Nodes 2025-06-30T09:20:44.586904Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:284: [Dirty] Added node #1024.v0 host1:1001 2025-06-30T09:20:44.586935Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1128: [DB] Loaded node #1024.v0 { NodeId: 1024, State: Active, Version: 0, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 3, Expire: Thu, 01 Jan 1970 04:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:44.586943Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1025 not in range (1023, 1024] 2025-06-30T09:20:44.586950Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:303: [Dirty] Added removed node #1025.v11 2025-06-30T09:20:44.586979Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1024.v9 { NodeId: 1024, State: Active, Version: 9, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 3, Expire: Thu, 01 Jan 1970 04:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:44.586986Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1254: [DB] Node #1024.v9 is already migrated 2025-06-30T09:20:44.586997Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1025.v10 { NodeId: 1025, State: Expired, Version: 10, Host: host2, Port: 1001, ResolveHost: host2.yandex.net, Address: 1.2.3.4, Lease: 2, Expire: Thu, 01 Jan 1970 03:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 1, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:44.587002Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1234: [DB] Migrating removed node #1025.v11 2025-06-30T09:20:44.587014Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1026.v10 { NodeId: 1026, State: Removed, Version: 10, Host: , Port: 0, ResolveHost: , Address: , Lease: 0, Expire: Thu, 01 Jan 1970 00:00:00 UTC, Location: , AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 0:0 } 2025-06-30T09:20:44.587018Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:303: [Dirty] Added removed node #1026.v10 2025-06-30T09:20:44.587023Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1249: [DB] Removed node #1026.v10 is already migrated 2025-06-30T09:20:44.587029Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 10 to 11 2025-06-30T09:20:44.587044Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:44.587089Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:44.587096Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 1 2025-06-30T09:20:44.587102Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #4.11 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z 2025-06-30T09:20:44.587121Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:836: [DB] Removing node #1025.v11 from database 2025-06-30T09:20:44.587135Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:44.599632Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:44.599690Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:44.599703Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #4.11 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z, approximate epoch start #4.10 nodes=1 expired=0 2025-06-30T09:20:44.599751Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##4.11 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z nodes=1 expired=0 removed=2 2025-06-30T09:20:44.599760Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v9 to update nodes log 2025-06-30T09:20:44.599772Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1026.v10 to update nodes log 2025-06-30T09:20:44.599777Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v11 to update nodes log 2025-06-30T09:20:44.599893Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:814:2323], Recipient [1:806:2317]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.599981Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:816:2325] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:44.600000Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:814:2323] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:44.600011Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:815:2324] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:44.600022Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:815:2324], Recipient [1:806:2317]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.600049Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:816:2325], Recipient [1:806:2317]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.600122Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:814:2323] 2025-06-30T09:20:44.600128Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.600136Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #5 2025-06-30T09:20:44.600145Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:815:2324] 2025-06-30T09:20:44.600149Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.600152Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #5 2025-06-30T09:20:44.600163Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:816:2325] 2025-06-30T09:20:44.600167Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.600171Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #5 2025-06-30T09:20:44.600341Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:846:2350], Recipient [1:806:2317]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.600378Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:641:2215], Recipient [1:806:2317]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 10 SeqNo: 1 } 2025-06-30T09:20:44.600384Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:44.600391Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:641:2215], seqNo: 1, version: 10, server pipe id: [1:846:2350] 2025-06-30T09:20:44.600399Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v10 -> v11 to [1:641:2215] 2025-06-30T09:20:44.600414Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:847:2351], Recipient [1:806:2317]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.600429Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2215], Recipient [1:806:2317]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:44.600433Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.600442Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z 2025-06-30T09:20:44.600558Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:851:2355], Recipient [1:806:2317]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.600579Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:848:2352], Recipient [1:806:2317]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 0 SeqNo: 0 } 2025-06-30T09:20:44.600584Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:44.600588Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:848:2352], seqNo: 0, version: 0, server pipe id: [1:851:2355] 2025-06-30T09:20:44.600593Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v0 -> v11 to [1:848:2352] 2025-06-30T09:20:44.600608Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:852:2356], Recipient [1:806:2317]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.600621Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:748:2291], Recipient [1:806:2317]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:44.600626Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.600632Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExpireActive [GOOD] Test command err: 2025-06-30T09:20:44.250274Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.259561Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.259645Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.259685Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.259736Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.259775Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.259802Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.265036Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.265123Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.265179Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.265231Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.265289Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.265332Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.265369Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.265453Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.265499Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.265522Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.267023Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.267091Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.267129Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.267161Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.267194Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.267278Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.268719Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.268866Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.268929Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.268980Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.269035Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.269063Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.269084Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.269106Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.270015Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.270154Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.270197Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.270234Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.270265Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.270330Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.270381Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.270460Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.270499Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.270819Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.274363Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.274536Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.274591Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.279567Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.279622Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.280611Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.280723Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.280888Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.281350Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.281434Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.281527Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.281587Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.281645Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.281691Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.281760Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.284279Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.284549Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.284585Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.284853Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.303095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:44.303122Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:44.308712Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:44.309256Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:44.309350Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:44.309589Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:44.310460Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:44.310687Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:44.310760Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:44.310779Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:44.310786Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:44.310804Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:44.310823Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:44.310829Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:44.310835Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:44.310842Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:44.310867Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:44.310875Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:44.354332Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:44.354388Z node 1 :NODE_BROKER TRACE: node_brok ... :NODE_BROKER DEBUG: node_broker.cpp:294: [Dirty] Added expired node #1024.v0 host1:1001 2025-06-30T09:20:44.666253Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1128: [DB] Loaded node #1024.v0 { NodeId: 1024, State: Expired, Version: 0, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:44.666292Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1024.v2 { NodeId: 1024, State: Active, Version: 2, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:44.666303Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1229: [DB] Migrating changed node #1024.v4 { NodeId: 1024, State: Expired, Version: 4, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:44.666323Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:44.666389Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:44.666396Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 1, NewVersionUpdateNodes left 0 2025-06-30T09:20:44.666409Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v4 host1:1001 to database state=Expired resolvehost=host1.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:44.666473Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:44.666480Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #3.4 2025-06-30T09:20:44.678537Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:44.678580Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:44.678592Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #3.4 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z, approximate epoch start #3.4 nodes=0 expired=1 2025-06-30T09:20:44.678622Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##3.4 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z nodes=0 expired=1 removed=0 2025-06-30T09:20:44.678629Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v4 to update nodes log 2025-06-30T09:20:44.678834Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:694:2246], Recipient [1:684:2240]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.678866Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:697:2249] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:44.678884Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:695:2247] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:44.678896Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:695:2247], Recipient [1:684:2240]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.678929Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:697:2249], Recipient [1:684:2240]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.678938Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:694:2246] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:44.678948Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:698:2250] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:44.678970Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:698:2250], Recipient [1:684:2240]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.679042Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:695:2247] 2025-06-30T09:20:44.679048Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.679059Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.4 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:44.679084Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:694:2246] 2025-06-30T09:20:44.679089Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.679096Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.4 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:44.679145Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [3:83:2072], Recipient [1:697:2249] 2025-06-30T09:20:44.679150Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.679157Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.4 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:44.679175Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:698:2250] 2025-06-30T09:20:44.679198Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.679205Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.4 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:44.679405Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:726:2273], Recipient [1:684:2240]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.679438Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2215], Recipient [1:684:2240]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:44.679445Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.679453Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.4 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:44.679523Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:728:2275], Recipient [1:684:2240]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.679543Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2215], Recipient [1:684:2240]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:44.679548Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.679554Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.4 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:44.679616Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:730:2277], Recipient [1:684:2240]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.679635Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2215], Recipient [1:684:2240]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:44.679639Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.679646Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.4 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:44.679718Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:732:2279], Recipient [1:684:2240]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.679738Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2215], Recipient [1:684:2240]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 4 } 2025-06-30T09:20:44.679743Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:44.679749Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.4 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:44.679807Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:734:2281], Recipient [1:684:2240]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.679829Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:633:2215], Recipient [1:684:2240]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 4 SeqNo: 2 } 2025-06-30T09:20:44.679836Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:44.679842Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:633:2215], seqNo: 2, version: 4, server pipe id: [1:734:2281] 2025-06-30T09:20:44.679849Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v4 -> v4 to [1:633:2215] 2025-06-30T09:20:44.679911Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:734:2281], Recipient [1:684:2240]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:44.679918Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:633:2215], seqNo: 2, server pipe id: [1:734:2281] 2025-06-30T09:20:44.679949Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:736:2283], Recipient [1:684:2240]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:44.679971Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:633:2215], Recipient [1:684:2240]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:44.679977Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:44.679996Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> TFlatTableExecutor_ResourceProfile::TestExecutorPreserveTxData [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.269862Z 00000.005 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.005 II| FAKE_ENV: Starting storage for BS group 0 00000.005 II| FAKE_ENV: Starting storage for BS group 1 00000.005 II| FAKE_ENV: Starting storage for BS group 2 00000.005 II| FAKE_ENV: Starting storage for BS group 3 00000.067 C1| TABLET_EXECUTOR: Tablet 1 unhandled exception std::runtime_error: test ??+0 (0xCF0CD52) ??+0 (0xCF0CCF7) NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Exceptions::TTxExecuteThrowException::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&)+57 (0xCA48039) NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*)+1312 (0xF520E00) NKikimr::NTabletFlatExecutor::TExecutor::StateWork(TAutoPtr&)+167 (0xF50BB77) NActors::IActor::Receive(TAutoPtr&)+85 (0xD7C15E5) 00000.067 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.067 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.067 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.067 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {62b, 2} 00000.067 II| FAKE_ENV: DS.1 gone, left {35b, 1}, put {35b, 1} 00000.067 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.067 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.067 II| FAKE_ENV: All BS storage groups are stopped 00000.067 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.067 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 1 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.339007Z 00000.003 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.003 II| FAKE_ENV: Starting storage for BS group 0 00000.003 II| FAKE_ENV: Starting storage for BS group 1 00000.003 II| FAKE_ENV: Starting storage for BS group 2 00000.003 II| FAKE_ENV: Starting storage for BS group 3 00000.004 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.004 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.004 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.004 II| FAKE_ENV: DS.0 gone, left {111b, 2}, put {131b, 3} 00000.004 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.005 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.005 II| FAKE_ENV: DS.1 gone, left {42b, 2}, put {42b, 2} 00000.005 II| FAKE_ENV: All BS storage groups are stopped 00000.005 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.005 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.345517Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.004 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 4 actors 00000.004 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.004 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.004 II| FAKE_ENV: DS.0 gone, left {561b, 14}, put {623b, 16} 00000.004 II| FAKE_ENV: DS.1 gone, left {693b, 8}, put {693b, 8} 00000.004 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: All BS storage groups are stopped 00000.004 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.004 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.351800Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.004 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 4 actors 00000.004 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.004 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.004 II| FAKE_ENV: DS.0 gone, left {141b, 4}, put {669b, 13} 00000.004 II| FAKE_ENV: DS.1 gone, left {868b, 8}, put {987b, 10} 00000.004 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: All BS storage groups are stopped 00000.004 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 0.000s 00000.004 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.358131Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.002 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.002 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.002 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 ... initializing schema 00000.002 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema 00000.002 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} release 4194304b of static, Memory{0 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 ... inserting rows 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} hope 1 -> done Change{2, redo 512b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} release 4194304b of static, Memory{0 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 ... starting follower ... waiting for follower attach ... blocking NKikimr::TEvTablet::TEvNewFollowerAttached from TABLET_ACTOR to NKikimr::NTabletFlatExecutor::TTestFlatTablet cookie 0 ... waiting for follower attach (done) ... spamming QueueScan transactions 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 II| TABLET_EXECUTOR: Leader{1:2:5} starting Scan{2 on 101, TEmptyScan{}} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 8 for step 4 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 II| TABLET_EXECUTOR: Leader{1:2:6} starting Scan{4 on 101, TEmptyScan{}} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 8 for step 5 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 II| TABLET_EXECUTOR: Leader{1:2:7} starting Scan{6 on 101, TEmptyScan{}} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 8 for step 6 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 II| TABLET_EXECUTOR: Leader{1:2:8} starting Scan{8 on 101, TEmptyScan{}} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 8 for step 7 00000.004 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.004 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.004 II| TABLET_EXECUTOR: Leader{1:2:9} starting Scan{10 on 101, TEmptyScan{}} 00000.004 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.004 DD| TABLET_EXECUTOR: Leader{1 ... ange{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} found attached Res{10 20480b} 00000.005 DD| TABLET_EXECUTOR: release 10240b of static tx data due to attached res 10, Memory{0 dyn 20480} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 524267520b requested for data (524288000b in total) 00000.005 EE| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} mem 524288000b terminated, limit 314572800b is exceeded 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{10 20480b}, Memory{0 dyn 0} 00000.005 DD| RESOURCE_BROKER: Update cookie for task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062]) 00000.005 DD| RESOURCE_BROKER: Finish task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062]) (release resources {0, 20480}) 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001311 to 0.000000 (remove task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062])) 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 19456b requested for data (20480b in total) 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{11 20480b} type small_transaction 00000.005 DD| RESOURCE_BROKER: Submitted new unknown task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) priority=5 resources={0, 20480} 00000.005 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.005 DD| RESOURCE_BROKER: Allocate resources {0, 20480} for task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) from queue queue_default 00000.005 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 0.001192 (insert task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{11 20480b}, Memory{0 dyn 20480} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} update resource task 11 releasing 0b, Memory{0 dyn 20480} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} captured Res{11 20480b} 00000.005 DD| RESOURCE_BROKER: Update task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) (priority=5 type=small_transaction resources={0, 20480} resubmit=0) 00000.005 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 0.001192 (insert task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 20480} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 19456b requested for data (20480b in total) 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 20480} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{12 20480b} type small_transaction 00000.005 DD| RESOURCE_BROKER: Submitted new unknown task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) priority=5 resources={0, 20480} 00000.005 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.005 DD| RESOURCE_BROKER: Allocate resources {0, 20480} for task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) from queue queue_default 00000.005 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001192 to 0.002384 (insert task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{12 20480b}, Memory{0 dyn 40960} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} found attached Res{11 20480b} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} moving tx data from attached Res{11 20480b} to Res{12 ...} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 524267520b requested for data (524288000b in total) 00000.005 EE| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} mem 524288000b terminated, limit 314572800b is exceeded 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{12 40960b}, Memory{0 dyn 0} 00000.005 DD| RESOURCE_BROKER: Update task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) (priority=5 type=small_transaction resources={0, 40960} resubmit=0) 00000.005 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001192 to 0.003576 (insert task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00000.005 DD| RESOURCE_BROKER: Finish task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) (release resources {0, 20480}) 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.003576 to 0.002384 (remove task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00000.005 DD| RESOURCE_BROKER: Finish task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) (release resources {0, 40960}) 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.002384 to 0.000000 (remove task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00000.005 II| TABLET_EXECUTOR: Leader{1:2:4} suiciding, Waste{2:0, 317b +(0, 0b), 3 trc, -0b acc} 00000.006 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.006 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.006 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.006 II| FAKE_ENV: DS.0 gone, left {180b, 3}, put {200b, 4} 00000.006 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: DS.1 gone, left {352b, 3}, put {352b, 3} 00000.006 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: All BS storage groups are stopped 00000.006 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.006 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 45 Left 401}, stopped >> GroupReconfiguration::ReassignsDoNotCauseErrorMessagesMirror3of4 [GOOD] >> GroupReconfiguration::ReassignsDoNotCauseErrorMessagesBlock4Plus2 >> TConsoleConfigTests::TestValidation [GOOD] >> TConsoleConfigTests::TestCheckConfigUpdates >> TConsoleTests::TestGetUnknownTenantStatusExtSubdomain [GOOD] >> TConsoleTests::TestRestartConsoleAndPools >> TKeyValueTest::TestBasicWriteReadOverrun [GOOD] >> TKeyValueTest::TestBlockedEvGetRequest >> TNodeBrokerTest::SyncNodes [GOOD] >> TLogSettingsConfiguratorTests::TestNoChanges [GOOD] >> TLogSettingsConfiguratorTests::TestAddComponentEntries >> TConfigsDispatcherTests::TestYamlEndToEnd [GOOD] >> TConsoleConfigHelpersTests::TestConfigCourier >> TKeyValueTest::TestBlockedEvGetRequest [GOOD] >> BuildStatsHistogram::Ten_Crossed_Log [GOOD] >> BuildStatsHistogram::Five_Five_Mixed >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query [GOOD] >> TNodeBrokerTest::UpdateNodesLogEmptyEpoch >> TConsoleConfigTests::TestCheckConfigUpdates [GOOD] >> TConsoleConfigTests::TestManageValidators >> TLogSettingsConfiguratorTests::TestAddComponentEntries [GOOD] >> TLogSettingsConfiguratorTests::TestRemoveComponentEntries >> TChargeBTreeIndex::NoNodes_Groups_History [GOOD] >> TChargeBTreeIndex::OneNode >> TConsoleConfigHelpersTests::TestConfigCourier [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriber ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestBlockedEvGetRequest [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:53:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:53:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:53:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:53:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:52:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:52:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:83:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:83:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:54:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:54:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:54:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:54:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:53:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:53:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:53:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:53:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:79:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:81:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:83:2057] recipient: [10:82:2112] Leader for TabletID 72057594037927937 is [10:84:2113] sender: [10:85:2057] recipient: [10:82:2112] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:84:2113] Leader for TabletID 72057594037927937 is [10:84:2113] sender: [10:170:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:79:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:82:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:83:2057] recipient: [11:81:2112] Leader for TabletID 72057594037927937 is [11:84:2113] sender: [11:85:2057] recipient: [11:81:2112] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:84:2113] Leader for TabletID 72057594037927937 is [11:84:2113] sender: [11:170:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:52:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:52:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:80:2057] recipient: [12:38:2085] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:82:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:84:2057] recipient: [12:83:2112] Leader for TabletID 72057594037927937 is [12:85:2113] sender: [12:86:2057] recipient: [12:83:2112] !Reboot 72057594037927937 (actor [12:59:2099]) rebooted! !Reboot 72057594037927937 (actor [12:59:2099]) tablet resolver refreshed! new actor is[12:85:2113] Leader for TabletID 72057594037927937 is [12:85:2113] sender: [12:171:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:60:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:77:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:83:2057] recipient: [13:38:2085] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:86:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:87:2057] recipient: [13:85:2115] Leader for TabletID 72057594037927937 is [13:88:2116] sender: [13:89:2057] recipient: [13:85:2115] !Reboot 72057594037927937 (actor [13:59:2099]) rebooted! !Reboot 72057594037927937 (actor [13:59:2099]) tablet resolver refreshed! new actor is[13:88:2116] Leader for TabletID 72057594037927937 is [13:88:2116] sender: [13:174:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:53:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:60:2057] recipient: [14:53:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:77:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:59:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:83:2057] recipient: [14:38:2085] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:86:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:87:2057] recipient: [14:85:2115] Leader for TabletID 72057594037927937 is [14:88:2116] sender: [14:89:2057] recipient: [14:85:2115] !Reboot 72057594037927937 (actor [14:59:2099]) rebooted! !Reboot 72057594037927937 (actor [14:59:2099]) tablet resolver refreshed! new actor is[14:88:2116] Leader for TabletID 72057594037927937 is [14:88:2116] sender: [14:174:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:54:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:60:2057] recipient: [15:54:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:77:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:84:2057] recipient: [15:38:2085] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:87:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:88:2057] recipient: [15:86:2115] Leader for TabletID 72057594037927937 is [15:89:2116] sender: [15:90:2057] recipient: [15:86:2115] !Reboot 72057594037927937 (actor [15:59:2099]) rebooted! !Reboot 72057594037927937 (actor [15:59:2099]) tablet resolver refreshed! new actor is[15:89:2116] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:60:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:77:2057] recipient: [16:14:2061] 2025-06-30T09:20:46.656775Z node 17 :KEYVALUE ERROR: keyvalue_storage_read_request.cpp:254: {KV323@keyvalue_storage_read_request.cpp:254} Received BLOCKED EvGetResult. KeyValue# 72057594037927937 Status# BLOCKED Deadline# 18446744073709551 Now# 0 SentAt# 1970-01-01T00:00:00.000000Z GotAt# 0 ErrorReason# block race detected 2025-06-30T09:20:46.657421Z node 17 :TABLET_MAIN ERROR: tablet_sys.cpp:934: Tablet: 72057594037927937 HandleBlockBlobStorageResult, msg->Status: ALREADY, not discovered Marker# TSYS21 2025-06-30T09:20:46.657434Z node 17 :TABLET_MAIN ERROR: tablet_sys.cpp:1849: Tablet: 72057594037927937 Type: KeyValue, EReason: ReasonBootBSError, SuggestedGeneration: 0, KnownGeneration: 3 Marker# TSYS31 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::SyncNodes [GOOD] Test command err: 2025-06-30T09:20:44.947312Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.951769Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.951843Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.951889Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.951946Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.951997Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.952030Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.957282Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.957387Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.957455Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.957516Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.957582Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.957633Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.957671Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.957772Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.957823Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.957870Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.959395Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.959464Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.959502Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.959542Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.959577Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.959668Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.961164Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.961325Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.961393Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.961460Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.961528Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.961564Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.961595Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.961621Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.962493Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.962626Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.962669Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.962701Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.962757Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.962819Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.962863Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.962885Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.962938Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.963983Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.964138Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.964212Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.969808Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.969958Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.969990Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.970933Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.971004Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.971075Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.971136Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.971182Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.971273Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.971470Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.971505Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.973640Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.973805Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.974511Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.975295Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.999440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:44.999471Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:45.006682Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:45.007446Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:45.007599Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:45.007888Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:45.008581Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:45.008631Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:45.008689Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:45.008710Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:45.008716Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:45.008737Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:45.008784Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:45.008791Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:45.008797Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:45.008804Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:45.008832Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:45.008840Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:45.041284Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:45.041342Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.023000Z 2025-06-30T09:20:45.041356Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z, approximate epoch start #1.1 nodes=0 expired=0 2025-06-30T09:20:45.041369Z ... 1 (not fixed) tenant: dc-1 2025-06-30T09:20:45.269703Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v2 host1:1001 to database state=Active resolvehost=host1.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:45.269764Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:45.269772Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 1 to 2 2025-06-30T09:20:45.269776Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=2 2025-06-30T09:20:45.280811Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:45.280839Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:45.280850Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 1 to 2 2025-06-30T09:20:45.280855Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v2 host1:1001 to epoch cache 2025-06-30T09:20:45.280883Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v2 to update nodes log 2025-06-30T09:20:45.280930Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-0" } 2025-06-30T09:20:45.281092Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:645:2222], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.281122Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 0 SeqNo: 1 } 2025-06-30T09:20:45.281128Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:45.281134Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:629:2213], seqNo: 1, version: 0, server pipe id: [1:645:2222] 2025-06-30T09:20:45.281142Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v0 -> v2 to [1:629:2213] 2025-06-30T09:20:45.281157Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:646:2223], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.281167Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:45.281171Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:45.281179Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.2 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:45.281257Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:648:2225], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.281279Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:45.281283Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:45.281289Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:45.281328Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:23:2070], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:45.281354Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:23:2070], cacheItem# { Subscriber: { Subscriber: [1:633:2216] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:45.281403Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:650:2226], recipient# [1:649:2185], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:45.281415Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:45.281427Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:45.281438Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [1:649:2185], Recipient [1:563:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:45.281441Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:45.281454Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:45.281457Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host2:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:45.281485Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v3 host2:1001 to database state=Active resolvehost=host2.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:45.281533Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:45.281538Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 2 to 3 2025-06-30T09:20:45.281541Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=3 2025-06-30T09:20:45.292693Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:45.292727Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:45.292741Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 2 to 3 2025-06-30T09:20:45.292746Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v3 host2:1001 to epoch cache 2025-06-30T09:20:45.292779Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v3 to update nodes log 2025-06-30T09:20:45.292838Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-1" } 2025-06-30T09:20:45.292975Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:654:2230], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.293009Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:45.293015Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:45.293025Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:45.293082Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039952, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest { SeqNo: 1 } 2025-06-30T09:20:45.293088Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:262: StateWork, processing event TEvNodeBroker::TEvSyncNodesRequest 2025-06-30T09:20:45.293097Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:629:2213] 2025-06-30T09:20:45.293165Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:656:2232], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.293179Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:45.293183Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:45.293188Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:45.293216Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039952, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest { SeqNo: 1 } 2025-06-30T09:20:45.293220Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:262: StateWork, processing event TEvNodeBroker::TEvSyncNodesRequest >> TNodeBrokerTest::NodesV2BackMigrationShiftIdRange >> TLogSettingsConfiguratorTests::TestRemoveComponentEntries [GOOD] >> TLogSettingsConfiguratorTests::TestChangeDefaults >> TNodeBrokerTest::NodesMigrationExtendLeaseThenExpire [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForNewConfigItem [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItem >> TTenantPoolTests::TestSensorsConfigForStaticSlot >> TPartBtreeIndexIteration::OneNode_Groups_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_History_Slices >> TFlatExecutorLeases::BasicsInitialLeaseTimeout [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseSleep >> TNodeBrokerTest::NodesMigrationRemoveActive >> TLogSettingsConfiguratorTests::TestChangeDefaults [GOOD] >> TModificationsValidatorTests::TestApplyValidators_TENANTS [GOOD] >> TModificationsValidatorTests::TestApplyValidators_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestApplyValidatorsWithOldConfig [GOOD] >> TModificationsValidatorTests::TestChecksLimitError [GOOD] >> TModificationsValidatorTests::TestChecksLimitWarning [GOOD] >> TNodeBrokerTest::UpdateNodesLog >> TConsoleConfigTests::TestManageValidators [GOOD] >> TConsoleConfigTests::TestDryRun ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExtendLeaseThenExpire [GOOD] Test command err: 2025-06-30T09:20:45.292785Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.297582Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.297664Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.297698Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.297748Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.297783Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.297810Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.302586Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.302681Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.302762Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.302813Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.302867Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.302907Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.302937Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.303023Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.303063Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.303087Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.304069Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.304102Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.304126Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.304145Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.304163Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.304239Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.305302Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.305426Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.305481Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.305518Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.305562Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.305584Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.305605Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.305621Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.306306Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.306403Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.306438Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.306468Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.306492Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.306539Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.306580Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.306714Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.306778Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.307299Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.307352Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.307374Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.307399Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.307474Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.307535Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.311146Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.311219Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.311833Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.311880Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.312111Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.312367Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.312761Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.312792Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.312945Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.312957Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.313060Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.313167Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.313189Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.313254Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.330086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:45.330113Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:45.335053Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:45.335742Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:45.335844Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:45.336082Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:45.337277Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:45.337332Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:45.337398Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:45.337412Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.024000Z - 1970-01-01T01:00:00.024000Z - 1970-01-01T02:00:00.024000Z 2025-06-30T09:20:45.337416Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:45.337431Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:45.337476Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:45.337488Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:45.337493Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:45.337499Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.024000Z - 1970-01-01T01:00:00.024000Z - 1970-01-01T02:00:00.024000Z 2025-06-30T09:20:45.337522Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:45.337531Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:45.369768Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:45.369812Z node 1 :NODE_BROKER TRACE: node_brok ... Kikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:45.675757Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:45.675826Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:45.675894Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:45.675991Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:45.676005Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1000: [DB] Loaded current epoch: #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:45.676013Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1024: [DB] Approximate epoch start is changed: #4.5 2025-06-30T09:20:45.676018Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1046: [DB] Loaded main nodes table: Nodes 2025-06-30T09:20:45.676057Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:294: [Dirty] Added expired node #1024.v0 host1:1001 2025-06-30T09:20:45.676086Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1128: [DB] Loaded node #1024.v0 { NodeId: 1024, State: Expired, Version: 0, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 2, Expire: Thu, 01 Jan 1970 03:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:45.676117Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1024.v2 { NodeId: 1024, State: Active, Version: 2, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:45.676127Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1229: [DB] Migrating changed node #1024.v5 { NodeId: 1024, State: Expired, Version: 5, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 2, Expire: Thu, 01 Jan 1970 03:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:45.676144Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:45.676190Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:45.676196Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 1, NewVersionUpdateNodes left 0 2025-06-30T09:20:45.676206Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v5 host1:1001 to database state=Expired resolvehost=host1.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=2 expire=Thu, 01 Jan 1970 03:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:45.676246Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:45.676251Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #4.5 2025-06-30T09:20:45.701899Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:45.701956Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T04:00:00.024000Z 2025-06-30T09:20:45.701969Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z, approximate epoch start #4.5 nodes=0 expired=1 2025-06-30T09:20:45.702003Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z nodes=0 expired=1 removed=0 2025-06-30T09:20:45.702014Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v5 to update nodes log 2025-06-30T09:20:45.702242Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:694:2247], Recipient [1:686:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.702270Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:694:2247] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:45.702290Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:695:2248] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:45.702304Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:696:2249] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:45.702317Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:695:2248], Recipient [1:686:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.702336Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:696:2249], Recipient [1:686:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.702418Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:695:2248] 2025-06-30T09:20:45.702425Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:45.702438Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:45.702456Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:696:2249] 2025-06-30T09:20:45.702461Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:45.702470Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:45.702525Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [4:112:2072], Recipient [1:694:2247] 2025-06-30T09:20:45.702530Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:45.702537Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:45.702762Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:726:2273], Recipient [1:686:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.702797Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:632:2214], Recipient [1:686:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:45.702803Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:45.702810Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:45.702881Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:728:2275], Recipient [1:686:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.702902Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:632:2214], Recipient [1:686:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:45.702907Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:45.702914Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:45.702987Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:730:2277], Recipient [1:686:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.703006Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:632:2214], Recipient [1:686:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:45.703012Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:45.703018Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:45.703074Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:732:2279], Recipient [1:686:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.703097Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:632:2214], Recipient [1:686:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 5 } 2025-06-30T09:20:45.703102Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:45.703108Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:45.703171Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:734:2281], Recipient [1:686:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.703195Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:632:2214], Recipient [1:686:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 5 SeqNo: 2 } 2025-06-30T09:20:45.703203Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:45.703211Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:632:2214], seqNo: 2, version: 5, server pipe id: [1:734:2281] 2025-06-30T09:20:45.703220Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v5 -> v5 to [1:632:2214] 2025-06-30T09:20:45.703283Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:734:2281], Recipient [1:686:2241]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:45.703289Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:632:2214], seqNo: 2, server pipe id: [1:734:2281] 2025-06-30T09:20:45.703323Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:736:2283], Recipient [1:686:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:45.703345Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:632:2214], Recipient [1:686:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:45.703351Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:45.703371Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query [GOOD] >> TDynamicNameserverTest::BasicFunctionality-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-false >> TNodeBrokerTest::EpochCacheUpdate [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Table [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdatesAddRemove [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientDeadCausesSubscriptionDeregistration ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query [GOOD] Test command err: 2025-06-30T09:19:14.055896Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669516592409939:2151];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a6a/r3tmp/tmp5Edvh0/pdisk_1.dat 2025-06-30T09:19:14.107256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:19:14.107456Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:19:14.131788Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:14.134836Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669516592409812:2080] 1751275154044705 != 1751275154044708 TServer::EnableGrpc on GrpcPort 27163, node 1 2025-06-30T09:19:14.161172Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004a6a/r3tmp/yandex1xwHCT.tmp 2025-06-30T09:19:14.161192Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004a6a/r3tmp/yandex1xwHCT.tmp 2025-06-30T09:19:14.161279Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004a6a/r3tmp/yandex1xwHCT.tmp 2025-06-30T09:19:14.161308Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:14.163877Z INFO: TTestServer started on Port 21251 GrpcPort 27163 TClient is connected to server localhost:21251 2025-06-30T09:19:14.199393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:14.199434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:14.200002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected PQClient connected to localhost:27163 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:14.236644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:14.241337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:14.251283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:19:14.333396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:14.336067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:19:14.657761Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669516592410585:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:14.657800Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:14.657991Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669516592410597:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:14.659071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:14.662890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-30T09:19:14.662994Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669516592410599:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:19:14.759135Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669516592410683:2439] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:14.759330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:14.777914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:14.783143Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669516592410709:2313], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:19:14.783960Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=Yjg3YjJkODgtYjc0MTIxN2UtM2FmNWEwYTYtNjIxMTAwMA==, ActorId: [1:7521669516592410582:2296], ActorState: ExecuteState, TraceId: 01jz024b7165g7nfwp4w28s79x, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:19:14.784550Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:19:14.839752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669516592410945:2610] 2025-06-30T09:19:15.055987Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:19.059022Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669516592409939:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:19.059068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:19:20.163461Z :WriteToTopic_Demo_12_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:19:20.171833Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:19:20.177265Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521669542362214938:2700] connected; active server actors: 1 2025-06-30T09:19:20.177450Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:19:20.177591Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:19:20.177644Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-30T09:19:20.178808Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp: ... 2). Partition stream id: 1 2025-06-30T09:20:44.919641Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 grpc read done: success# 1, data# { commit_offset_request { commit_offsets { partition_session_id: 1 offsets { start: 6 end: 22 } } } } 2025-06-30T09:20:44.919899Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) committing to position 22 prev 6 end 22 by cookie 3 2025-06-30T09:20:44.920014Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-30T09:20:44.920030Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037894] got client message batch for topic 'topic_A' partition 0 2025-06-30T09:20:44.920078Z node 11 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 22 (startOffset 22) session test-consumer_11_1_9576250724710552810_v1 2025-06-30T09:20:44.920124Z node 11 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:20:44.920561Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 22 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:20:44.920584Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:20:44.920595Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:20:44.920602Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 3 2025-06-30T09:20:44.920641Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 3 } 2025-06-30T09:20:44.920655Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 22 endOffset 22 with cookie 3 2025-06-30T09:20:44.920665Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 replying for commits: assignId# 1, from# 3, to# 3, offset# 22 2025-06-30T09:20:44.920876Z :DEBUG: [/Root] [/Root] [80dde40c-d3d0a47b-5166d48c-9214be89] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 22 } } 2025-06-30T09:20:45.857920Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:21:22 2025-06-30T09:20:45.857943Z :INFO: [/Root] [/Root] [80dde40c-d3d0a47b-5166d48c-9214be89] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1000 BytesRead: 16000000 MessagesRead: 22 BytesReadCompressed: 16000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:45.859596Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 checking auth because of timeout 2025-06-30T09:20:45.859633Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 auth for : test-consumer 2025-06-30T09:20:45.859825Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 Handle describe topics response 2025-06-30T09:20:45.859840Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 auth is DEAD 2025-06-30T09:20:45.859853Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:20:45.862136Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 checking auth because of timeout 2025-06-30T09:20:45.862163Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 auth for : test-consumer 2025-06-30T09:20:45.863025Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 Handle describe topics response 2025-06-30T09:20:45.863050Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 auth is DEAD 2025-06-30T09:20:45.863142Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:20:46.859440Z :INFO: [/Root] [/Root] [80dde40c-d3d0a47b-5166d48c-9214be89] Closing read session. Close timeout: 0.000000s 2025-06-30T09:20:46.859457Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:21:22 2025-06-30T09:20:46.859468Z :INFO: [/Root] [/Root] [80dde40c-d3d0a47b-5166d48c-9214be89] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2002 BytesRead: 16000000 MessagesRead: 22 BytesReadCompressed: 16000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:46.859488Z :NOTICE: [/Root] [/Root] [80dde40c-d3d0a47b-5166d48c-9214be89] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:20:46.859500Z :DEBUG: [/Root] [/Root] [80dde40c-d3d0a47b-5166d48c-9214be89] [] Abort session to cluster 2025-06-30T09:20:46.859717Z :DEBUG: [/Root] 0x0000522479B00D90 TDirectReadSessionManager ServerSessionId=test-consumer_11_1_9576250724710552810_v1 Close 2025-06-30T09:20:46.859749Z :DEBUG: [/Root] 0x0000522479B00D90 TDirectReadSessionManager ServerSessionId=test-consumer_11_1_9576250724710552810_v1 Close 2025-06-30T09:20:46.859768Z :NOTICE: [/Root] [/Root] [80dde40c-d3d0a47b-5166d48c-9214be89] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:20:46.859982Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|39ecc15d-5e858015-3a4a436-c7a18d98_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2025-06-30T09:20:46.859988Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|39ecc15d-5e858015-3a4a436-c7a18d98_0] PartitionId [0] Generation [2] Write session will now close 2025-06-30T09:20:46.859993Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|39ecc15d-5e858015-3a4a436-c7a18d98_0] PartitionId [0] Generation [2] Write session: aborting 2025-06-30T09:20:46.859992Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 grpc read done: success# 0, data# { } 2025-06-30T09:20:46.860017Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 grpc read failed 2025-06-30T09:20:46.860030Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 grpc closed 2025-06-30T09:20:46.860053Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 is DEAD 2025-06-30T09:20:46.860089Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|39ecc15d-5e858015-3a4a436-c7a18d98_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2025-06-30T09:20:46.860095Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|39ecc15d-5e858015-3a4a436-c7a18d98_0] PartitionId [0] Generation [2] Write session: destroy 2025-06-30T09:20:46.860152Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [11:7521669902635244667:2515]: session cookie 2 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 grpc read done: success# 0, data# { } 2025-06-30T09:20:46.860163Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [11:7521669902635244667:2515]: session cookie 2 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1grpc read failed 2025-06-30T09:20:46.860169Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [11:7521669902635244667:2515]: session cookie 2 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 grpc closed 2025-06-30T09:20:46.860172Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [11:7521669902635244667:2515]: session cookie 2 consumer test-consumer session test-consumer_11_1_9576250724710552810_v1 proxy is DEAD 2025-06-30T09:20:46.860336Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_11_1_9576250724710552810_v1 2025-06-30T09:20:46.860351Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669902635244661:2512] destroyed 2025-06-30T09:20:46.860382Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_1_9576250724710552810_v1 2025-06-30T09:20:46.860396Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|39ecc15d-5e858015-3a4a436-c7a18d98_0 grpc read done: success: 0 data: 2025-06-30T09:20:46.860397Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|39ecc15d-5e858015-3a4a436-c7a18d98_0 grpc read failed 2025-06-30T09:20:46.860403Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|39ecc15d-5e858015-3a4a436-c7a18d98_0 grpc closed 2025-06-30T09:20:46.860405Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|39ecc15d-5e858015-3a4a436-c7a18d98_0 is DEAD 2025-06-30T09:20:46.860446Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [11:7521669902635244658:2509] disconnected; active server actors: 1 2025-06-30T09:20:46.860457Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [11:7521669902635244658:2509] client test-consumer disconnected session test-consumer_11_1_9576250724710552810_v1 2025-06-30T09:20:46.860547Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:20:46.860756Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669894045309985:2481] destroyed 2025-06-30T09:20:46.860788Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Table >> CompressExecutor::TestReorderedExecutor [GOOD] >> CompressExecutor::TestExecutorMemUsage >> TChargeBTreeIndex::OneNode [GOOD] >> TChargeBTreeIndex::OneNode_Groups ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/console/ut/unittest >> TModificationsValidatorTests::TestChecksLimitWarning [GOOD] Test command err: 2025-06-30T09:20:41.070814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:41.070842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:41.070848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:41.070853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:41.070869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:41.070873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:41.070883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:41.070912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:41.071063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:41.071155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:41.076065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:41.076085Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:41.079000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:41.079070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:41.079119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046578944 2025-06-30T09:20:41.080501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:41.080613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:41.080717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.080840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: dc-1, pathId: [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:41.081512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:41.081555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:41.081757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:41.081770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:41.081886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:41.081903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046578944, domainId: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:41.081911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:41.081932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.127290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "hdd" } StoragePools { Name: "" Kind: "hdd-3" } StoragePools { Name: "" Kind: "hdd-1" } StoragePools { Name: "" Kind: "hdd-2" } } } TxId: 1 TabletId: 72057594046578944 , at schemeshard: 72057594046578944 2025-06-30T09:20:41.127387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //dc-1, opId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.127483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 0 2025-06-30T09:20:41.127494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046578944, LocalPathId: 1] source path: 2025-06-30T09:20:41.127549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046578944 2025-06-30T09:20:41.127570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:41.128577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046578944 PathId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.128666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //dc-1 2025-06-30T09:20:41.128734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.128767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046578944 2025-06-30T09:20:41.128774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:41.128781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:41.129360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.129374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046578944 2025-06-30T09:20:41.129380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:41.129742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.129752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.129758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.129767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:41.130541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046578944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:41.131036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046578944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:41.131089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:41.131287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.131293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:20:41.131297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.358281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.358359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046578944, at schemeshard: 72057594046578944 2025-06-30T09:20:41.358373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.358489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:41.358500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.358567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 1 2025-06-30T09:20:41.358582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:41.359560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:41.359581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046578944, txId: 1, path id: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:41.35964 ... NOTICE to ALERT 2025-06-30T09:20:47.468138Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TX_CONVEYOR has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468140Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component TX_CONVEYOR has been changed from 0 to 10 2025-06-30T09:20:47.468143Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component TX_LIMITER has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468145Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TX_LIMITER has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468147Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component TX_LIMITER has been changed from 0 to 10 2025-06-30T09:20:47.468151Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component ARROW_HELPER has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468153Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component ARROW_HELPER has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468155Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component ARROW_HELPER has been changed from 0 to 10 2025-06-30T09:20:47.468158Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component SSA_GRAPH_EXECUTION has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468160Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component SSA_GRAPH_EXECUTION has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468162Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component SSA_GRAPH_EXECUTION has been changed from 0 to 10 2025-06-30T09:20:47.468165Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KAFKA_PROXY has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468168Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KAFKA_PROXY has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468170Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component KAFKA_PROXY has been changed from 0 to 10 2025-06-30T09:20:47.468173Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component OBJECTS_MONITORING has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468175Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component OBJECTS_MONITORING has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468177Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component OBJECTS_MONITORING has been changed from 0 to 10 2025-06-30T09:20:47.468181Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component STATISTICS has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468184Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component STATISTICS has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468186Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component STATISTICS has been changed from 0 to 10 2025-06-30T09:20:47.468189Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_REQUEST_COST has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468191Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_REQUEST_COST has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468194Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_REQUEST_COST has been changed from 0 to 10 2025-06-30T09:20:47.468197Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_VDISK_BALANCING has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468199Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_VDISK_BALANCING has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468202Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_VDISK_BALANCING has been changed from 0 to 10 2025-06-30T09:20:47.468204Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PROXY_GETBLOCK has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468206Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PROXY_GETBLOCK has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468209Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PROXY_GETBLOCK has been changed from 0 to 10 2025-06-30T09:20:47.468211Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_SHRED has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468213Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_SHRED has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468216Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_SHRED has been changed from 0 to 10 2025-06-30T09:20:47.468218Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PROXY_CHECKINTEGRITY has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468221Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PROXY_CHECKINTEGRITY has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468223Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PROXY_CHECKINTEGRITY has been changed from 0 to 10 2025-06-30T09:20:47.468226Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PROXY_BRIDGE has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468228Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PROXY_BRIDGE has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468231Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PROXY_BRIDGE has been changed from 0 to 10 2025-06-30T09:20:47.468234Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component LDAP_AUTH_PROVIDER has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468236Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component LDAP_AUTH_PROVIDER has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468239Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component LDAP_AUTH_PROVIDER has been changed from 0 to 10 2025-06-30T09:20:47.468241Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component GROUPED_MEMORY_LIMITER has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468244Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component GROUPED_MEMORY_LIMITER has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468246Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component GROUPED_MEMORY_LIMITER has been changed from 0 to 10 2025-06-30T09:20:47.468249Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component DATA_INTEGRITY has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468252Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component DATA_INTEGRITY has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468254Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component DATA_INTEGRITY has been changed from 0 to 10 2025-06-30T09:20:47.468257Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component TX_PRIORITIES_QUEUE has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468259Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TX_PRIORITIES_QUEUE has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468262Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component TX_PRIORITIES_QUEUE has been changed from 0 to 10 2025-06-30T09:20:47.468264Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BSCONFIG has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468267Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BSCONFIG has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468269Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BSCONFIG has been changed from 0 to 10 2025-06-30T09:20:47.468272Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component NAMESERVICE has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468274Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component NAMESERVICE has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468276Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component NAMESERVICE has been changed from 0 to 10 2025-06-30T09:20:47.468279Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BRIDGE has been changed from NOTICE to ALERT 2025-06-30T09:20:47.468283Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BRIDGE has been changed from DEBUG to ALERT 2025-06-30T09:20:47.468285Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BRIDGE has been changed from 0 to 10 2025-06-30T09:20:47.468308Z node 14 :CMS_CONFIGS TRACE: log_settings_configurator.cpp:100: TLogSettingsConfigurator: Send TEvConfigNotificationResponse: SubscriptionId: 0 ConfigId { } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::EpochCacheUpdate [GOOD] Test command err: 2025-06-30T09:20:42.346203Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.351473Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.351535Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.351572Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.351614Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.351652Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.351678Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.356596Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.356689Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.356739Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.356787Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.356863Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.356899Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.356935Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.357025Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.357074Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.357111Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.358468Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.358518Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.358547Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.358573Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.358602Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.358663Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.360296Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.360473Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.360540Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.360600Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.360660Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.360690Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.360718Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.360741Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.361603Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.361777Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.361823Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.361877Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.361911Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.361981Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.362038Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:42.362248Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.362297Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.362954Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.363050Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.363087Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.363180Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.363220Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.368644Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.369622Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.369651Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.369905Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.370031Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.370426Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.370946Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.371222Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.371392Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.371493Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.371563Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.373645Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.373977Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.374381Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.374522Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:42.394596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:42.394627Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:42.400567Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:42.401215Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:42.401327Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:42.401579Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:42.402198Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:42.402241Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:42.402306Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:42.402325Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:42.402331Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:42.402352Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:42.402398Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:42.402404Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:42.402409Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:42.402416Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:42.402443Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:42.402450Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:42.435192Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:42.435246Z node 1 :NODE_BROKER TRACE: node_brok ... p:82: TTxExtendLease reply with: NKikimr::NNodeBroker::TEvNodeBroker::TEvExtendLeaseResponse { Status { Code: OK } NodeId: 1024 Expire: 14400023000 Epoch { Id: 3 Version: 6 Start: 7200023000 End: 10800023000 NextEnd: 14400023000 } } 2025-06-30T09:20:46.288263Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:317: [Committed] Extended lease of #1024.v6 host1:1001 up to Thu, 01 Jan 1970 04:00:00 UTC (lease 3) 2025-06-30T09:20:46.288269Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 5 to 6 2025-06-30T09:20:46.288275Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v6 host1:1001 to epoch cache 2025-06-30T09:20:46.288320Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v6 to update nodes log 2025-06-30T09:20:46.288521Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [9:713:2260], Recipient [9:569:2183]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:46.288583Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [9:637:2213], Recipient [9:569:2183]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:46.288591Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:46.288603Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:46.288662Z node 9 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [9:23:2070], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:46.288682Z node 9 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [9:23:2070], path# /dc-1, domainOwnerId# 72057594046678944 2025-06-30T09:20:46.289063Z node 9 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [9:23:2070], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 } 2025-06-30T09:20:46.289127Z node 9 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [9:23:2070], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }, by path# { Subscriber: { Subscriber: [9:715:2261] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:20:46.289174Z node 9 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [9:23:2070], cacheItem# { Subscriber: { Subscriber: [9:715:2261] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:46.289243Z node 9 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [9:722:2262], recipient# [9:714:2183], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:46.289260Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:46.289281Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:46.289299Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [9:714:2183], Recipient [9:569:2183]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:46.289305Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:46.289329Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:46.289333Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host2:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:46.289363Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v7 host2:1001 to database state=Active resolvehost=host2.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 04:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:46.289432Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v7 host2:1001 2025-06-30T09:20:46.289441Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 6 to 7 2025-06-30T09:20:46.289445Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=7 2025-06-30T09:20:46.300564Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:46.300593Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v7 host2:1001 2025-06-30T09:20:46.300608Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 6 to 7 2025-06-30T09:20:46.300617Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v7 host2:1001 to epoch cache 2025-06-30T09:20:46.300649Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v7 to update nodes log 2025-06-30T09:20:46.300708Z node 9 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 14400023000 Name: "slot-1" } 2025-06-30T09:20:46.300900Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [9:726:2266], Recipient [9:569:2183]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:46.300921Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [9:637:2213], Recipient [9:569:2183]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:46.300928Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:46.300940Z node 9 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.7 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z >> TConsoleConfigTests::TestDryRun [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlWithoutFlag >> TNodeBrokerTest::NodesMigrationReuseID >> TContinuousBackupTests::Basic >> TTenantPoolTests::TestSensorsConfigForStaticSlot [GOOD] >> TNodeBrokerTest::ExtendLeaseSetLocationInOneRegistration [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriber [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantTenant >> TConsoleTests::TestSetDefaultStorageUnitsQuota [GOOD] >> TConsoleTests::TestSetDefaultComputationalUnitsQuota >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientDeadCausesSubscriptionDeregistration [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientReconnectsOnConnectionLoose >> TPartBtreeIndexIteration::OneNode_History_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups_History_Slices ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] Test command err: 2025-06-30T09:20:44.839827Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.843259Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.843304Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.843326Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.843355Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.843382Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.843398Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.847081Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.847137Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.847167Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.847202Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.847245Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.847278Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.847303Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.847365Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.847394Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.847414Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.848276Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.848306Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.848338Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.848355Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.848374Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.848420Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.849347Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.849467Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.849506Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.849537Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.849568Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.849583Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.849600Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.849618Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.850202Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.850284Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.850308Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.850326Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.850343Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.850385Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.850417Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:44.850483Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.850520Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.851278Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.851342Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.851442Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.851459Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.851579Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.856671Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.856775Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.856806Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.856979Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.857311Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.857652Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.857716Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.857793Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.858005Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.858072Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.858128Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.858179Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.858339Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.859793Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.859982Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.860406Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.861006Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.861188Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.861575Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.862095Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.862653Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.864820Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.864972Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:44.877098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:44.877117Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:44.881402Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:44.881842Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:44.881950Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:44.882117Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:44.882710Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:44.882928Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:44.882986Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:44.883001Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Star ... }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:47.932116Z node 9 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [9:215:2203], recipient# [9:207:2176], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:47.932137Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:47.932155Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:47.932171Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [9:207:2176], Recipient [9:171:2176]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:47.932177Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:47.932200Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:47.932205Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host1:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:47.932241Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v2 host1:1001 to database state=Active resolvehost=host1.host1.host1 address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:47.932299Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:47.932307Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 1 to 2 2025-06-30T09:20:47.932312Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=2 2025-06-30T09:20:47.943182Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:47.943216Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:47.943228Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 1 to 2 2025-06-30T09:20:47.943237Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v2 host1:1001 to epoch cache 2025-06-30T09:20:47.943260Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v2 to update nodes log 2025-06-30T09:20:47.943302Z node 9 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200024000 Name: "slot-0" } 2025-06-30T09:20:47.943405Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [9:219:2207], Recipient [9:171:2176]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.943437Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [9:204:2199], Recipient [9:171:2176]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:47.943444Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:47.943454Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:47.943497Z node 9 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [9:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:47.943525Z node 9 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [9:16:2063], cacheItem# { Subscriber: { Subscriber: [9:208:2202] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:47.943576Z node 9 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [9:221:2208], recipient# [9:220:2176], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:47.943594Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:47.943612Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:47.943626Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [9:220:2176], Recipient [9:171:2176]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:47.943631Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:47.943642Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:47.943647Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host2:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:47.943676Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v3 host2:1001 to database state=Active resolvehost=host2.host2.host2 address=1.2.3.5 dc=1 location=DC=1/M=2/R=3/U=5/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:47.943723Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:47.943728Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 2 to 3 2025-06-30T09:20:47.943733Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=3 2025-06-30T09:20:47.954675Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:47.954703Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:47.954713Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 2 to 3 2025-06-30T09:20:47.954720Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v3 host2:1001 to epoch cache 2025-06-30T09:20:47.954758Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v3 to update nodes log 2025-06-30T09:20:47.954804Z node 9 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 7200024000 Name: "slot-1" } ... waiting for cache miss 2025-06-30T09:20:47.954879Z node 9 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1024 Deadline: 1.107024s } 2025-06-30T09:20:47.954900Z node 9 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:649: New cache miss: nodeId# 1024, deadline# 1.107024s 2025-06-30T09:20:47.954904Z node 9 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:653: Schedule wakeup for new earliest deadline 1.107024s 2025-06-30T09:20:47.954914Z node 9 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1025 Deadline: 2.107024s } 2025-06-30T09:20:47.954920Z node 9 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:649: New cache miss: nodeId# 1025, deadline# 2.107024s ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) 2025-06-30T09:20:48.026798Z node 9 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:896: HandleWakeup at 1.108024s 2025-06-30T09:20:48.026839Z node 9 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:134: Cache miss failed: nodeId=1024, error=Deadline exceeded 2025-06-30T09:20:48.026856Z node 9 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:911: Schedule next wakeup at 2.107024s 2025-06-30T09:20:48.078069Z node 9 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:896: HandleWakeup at 2.108024s 2025-06-30T09:20:48.078091Z node 9 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:134: Cache miss failed: nodeId=1025, error=Deadline exceeded ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TTenantPoolTests::TestSensorsConfigForStaticSlot [GOOD] Test command err: 2025-06-30T09:20:47.525996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:47.526017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:47.526022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:47.526027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:47.526037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:47.526040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:47.526046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:47.526062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:47.526142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:47.526190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:47.528749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:47.528765Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:47.530791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:47.530843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:47.530859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046578944 2025-06-30T09:20:47.531789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:47.531852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:47.531922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:47.532000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: dc-1, pathId: [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:47.532422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:47.532467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:47.532607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:47.532613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:47.532679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:47.532686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046578944, domainId: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:47.532691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:47.532702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046578944 2025-06-30T09:20:47.569078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "hdd" } StoragePools { Name: "" Kind: "hdd-3" } StoragePools { Name: "" Kind: "hdd-1" } StoragePools { Name: "" Kind: "hdd-2" } } } TxId: 1 TabletId: 72057594046578944 , at schemeshard: 72057594046578944 2025-06-30T09:20:47.569159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //dc-1, opId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:47.569216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 0 2025-06-30T09:20:47.569225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046578944, LocalPathId: 1] source path: 2025-06-30T09:20:47.569283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046578944 2025-06-30T09:20:47.569297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:47.570019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046578944 PathId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:47.570057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //dc-1 2025-06-30T09:20:47.570103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:47.570114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046578944 2025-06-30T09:20:47.570120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:47.570126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:47.570549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:47.570559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046578944 2025-06-30T09:20:47.570563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:47.570873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:47.570882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:47.570887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:47.570892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:47.571345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046578944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:47.571623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046578944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:47.571652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:47.571793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:47.571798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:20:47.571801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:47.795708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:47.795759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046578944, at schemeshard: 72057594046578944 2025-06-30T09:20:47.795768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:47.795834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:47.795841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:47.795870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 1 2025-06-30T09:20:47.795878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:47.796344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:47.796355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046578944, txId: 1, path id: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:47.79639 ... { Items { Kind: 10 Id: 4 Generation: 1 } } } AffectedKinds: 10 RawConsoleConfig { MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { StaticSlotLabelValue: "static-again" } } Version { Items { Kind: 10 Id: 1 Generation: 1 } Items { Kind: 10 Id: 2 Generation: 1 } Items { Kind: 10 Id: 3 Generation: 1 } Items { Kind: 10 Id: 4 Generation: 1 } } } } 2025-06-30T09:20:48.008427Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:221: StateWork, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-30T09:20:48.008453Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: MonitoringConfigItem 2025-06-30T09:20:48.008465Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [1:412:2362]: Config { MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { StaticSlotLabelValue: "static-again" } } } ItemKinds: 10 Local: true 2025-06-30T09:20:48.008470Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: MonitoringConfigItem 2025-06-30T09:20:48.008477Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [1:409:2364]: Config { MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { StaticSlotLabelValue: "static-again" } } } ItemKinds: 10 Local: true 2025-06-30T09:20:48.008855Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:486: TDomainTenantPool(dc-1) Got new monitoring config: MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { StaticSlotLabelValue: "static-again" } } 2025-06-30T09:20:48.008866Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:452: TDomainTenantPool(dc-1) static slot label modified from static to static-again 2025-06-30T09:20:48.008869Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:409:2364] 2025-06-30T09:20:48.008891Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [1:409:2364], Recipient [1:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:48.008918Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:48.008943Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [1:412:2362], Recipient [1:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:48.008946Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:48.030442Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273285146, Sender [1:414:2363], Recipient [1:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Generation: 1 Config { MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { Enabled: false StaticSlotLabelValue: "static-again" } } FeatureFlags { EnableExternalHive: false EnableColumnStatistics: false EnableScaleRecommender: true } Version { Items { Kind: 10 Id: 5 Generation: 1 } } } AffectedKinds: 10 RawConsoleConfig { MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { Enabled: false StaticSlotLabelValue: "static-again" } } Version { Items { Kind: 10 Id: 1 Generation: 1 } Items { Kind: 10 Id: 2 Generation: 1 } Items { Kind: 10 Id: 3 Generation: 1 } Items { Kind: 10 Id: 4 Generation: 1 } Items { Kind: 10 Id: 5 Generation: 1 } } } } 2025-06-30T09:20:48.030463Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:221: StateWork, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-30T09:20:48.030502Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: MonitoringConfigItem 2025-06-30T09:20:48.030514Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [1:412:2362]: Config { MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { Enabled: false StaticSlotLabelValue: "static-again" } } } ItemKinds: 10 Local: true 2025-06-30T09:20:48.030519Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: MonitoringConfigItem 2025-06-30T09:20:48.030526Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [1:409:2364]: Config { MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { Enabled: false StaticSlotLabelValue: "static-again" } } } ItemKinds: 10 Local: true 2025-06-30T09:20:48.031240Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:486: TDomainTenantPool(dc-1) Got new monitoring config: MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { Enabled: false StaticSlotLabelValue: "static-again" } } 2025-06-30T09:20:48.031259Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [1:409:2364], Recipient [1:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:48.031264Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:48.031278Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [1:412:2362], Recipient [1:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:48.031281Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:48.052675Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273285146, Sender [1:414:2363], Recipient [1:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Generation: 1 Config { MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { Enabled: true StaticSlotLabelValue: "static-again" } } FeatureFlags { EnableExternalHive: false EnableColumnStatistics: false EnableScaleRecommender: true } Version { Items { Kind: 10 Id: 6 Generation: 1 } } } AffectedKinds: 10 RawConsoleConfig { MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { Enabled: true StaticSlotLabelValue: "static-again" } } Version { Items { Kind: 10 Id: 1 Generation: 1 } Items { Kind: 10 Id: 2 Generation: 1 } Items { Kind: 10 Id: 3 Generation: 1 } Items { Kind: 10 Id: 4 Generation: 1 } Items { Kind: 10 Id: 5 Generation: 1 } Items { Kind: 10 Id: 6 Generation: 1 } } } } 2025-06-30T09:20:48.052697Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:221: StateWork, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-30T09:20:48.052738Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: MonitoringConfigItem 2025-06-30T09:20:48.052751Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [1:412:2362]: Config { MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { Enabled: true StaticSlotLabelValue: "static-again" } } } ItemKinds: 10 Local: true 2025-06-30T09:20:48.052756Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: MonitoringConfigItem 2025-06-30T09:20:48.052763Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [1:409:2364]: Config { MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { Enabled: true StaticSlotLabelValue: "static-again" } } } ItemKinds: 10 Local: true 2025-06-30T09:20:48.053479Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:486: TDomainTenantPool(dc-1) Got new monitoring config: MonitoringConfig { ForceDatabaseLabels: true DatabaseLabels { Enabled: true StaticSlotLabelValue: "static-again" } } 2025-06-30T09:20:48.053501Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [1:409:2364], Recipient [1:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:48.053506Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:48.053516Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [1:412:2362], Recipient [1:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:48.053519Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:48.075171Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273285146, Sender [1:414:2363], Recipient [1:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Generation: 1 Config { MonitoringConfig { ForceDatabaseLabels: false DatabaseLabels { Enabled: true StaticSlotLabelValue: "static-again" } } FeatureFlags { EnableExternalHive: false EnableColumnStatistics: false EnableScaleRecommender: true } Version { Items { Kind: 10 Id: 7 Generation: 1 } } } AffectedKinds: 10 RawConsoleConfig { MonitoringConfig { ForceDatabaseLabels: false DatabaseLabels { Enabled: true StaticSlotLabelValue: "static-again" } } Version { Items { Kind: 10 Id: 1 Generation: 1 } Items { Kind: 10 Id: 2 Generation: 1 } Items { Kind: 10 Id: 3 Generation: 1 } Items { Kind: 10 Id: 4 Generation: 1 } Items { Kind: 10 Id: 5 Generation: 1 } Items { Kind: 10 Id: 6 Generation: 1 } Items { Kind: 10 Id: 7 Generation: 1 } } } } 2025-06-30T09:20:48.075198Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:221: StateWork, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-30T09:20:48.075236Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: MonitoringConfigItem 2025-06-30T09:20:48.075252Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [1:412:2362]: Config { MonitoringConfig { ForceDatabaseLabels: false DatabaseLabels { Enabled: true StaticSlotLabelValue: "static-again" } } } ItemKinds: 10 Local: true 2025-06-30T09:20:48.075260Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: MonitoringConfigItem 2025-06-30T09:20:48.075270Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [1:409:2364]: Config { MonitoringConfig { ForceDatabaseLabels: false DatabaseLabels { Enabled: true StaticSlotLabelValue: "static-again" } } } ItemKinds: 10 Local: true 2025-06-30T09:20:48.076138Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:486: TDomainTenantPool(dc-1) Got new monitoring config: MonitoringConfig { ForceDatabaseLabels: false DatabaseLabels { Enabled: true StaticSlotLabelValue: "static-again" } } 2025-06-30T09:20:48.076169Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [1:409:2364], Recipient [1:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:48.076176Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:48.076193Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [1:412:2362], Recipient [1:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:48.076197Z node 1 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ExtendLeaseSetLocationInOneRegistration [GOOD] Test command err: 2025-06-30T09:20:45.631332Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.635212Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.635250Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.635275Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.635308Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.635335Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.635352Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.638828Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.638889Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.638923Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.638954Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.638995Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.639044Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.639076Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.639162Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.639208Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.639228Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.640133Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.640166Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.640188Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.640207Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.640225Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.640277Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.641158Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.641251Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.641295Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.641326Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.641361Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.641376Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.641390Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.641402Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.641984Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.642069Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.642096Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.642132Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.642149Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.642180Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.642209Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:45.642312Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.642337Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.642702Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.643078Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.643094Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.643116Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.643184Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.643262Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.646256Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.646797Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.646928Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.647072Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.647184Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.647388Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.647463Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.647551Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.647592Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.647603Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.647742Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.647878Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.649416Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.649526Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.649570Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.649716Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:45.661291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:45.661310Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:45.665591Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:45.665978Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:45.666056Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:45.666229Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:45.666820Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:45.666960Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:45.667014Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:45.667028Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:45.667032Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:45.667044Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:45.667055Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:45.667060Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:45.667063Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:45.667067Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:45.667083Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:134 ... ectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }, by path# { Subscriber: { Subscriber: [1:680:2242] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:20:46.653621Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:23:2070], cacheItem# { Subscriber: { Subscriber: [1:680:2242] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:46.653663Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:687:2243], recipient# [1:679:2185], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:46.653675Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:46.653687Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:46.653699Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [1:679:2185], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:46.653703Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:46.653724Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:46.653727Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host1:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:46.653743Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:244: [Dirty] Updated location of #1024.v4 host1:1001 to DC=1/M=2/R=3/U=4/ 2025-06-30T09:20:46.653755Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v4 host1:1001 to database state=Active resolvehost=host1.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:46.653854Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:317: [Dirty] Extended lease of #1024.v4 host1:1001 up to Thu, 01 Jan 1970 03:00:00 UTC (lease 2) 2025-06-30T09:20:46.653860Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v4 host1:1001 to database state=Active resolvehost=host1.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=2 expire=Thu, 01 Jan 1970 03:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:46.653873Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 3 to 4 2025-06-30T09:20:46.653875Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=4 2025-06-30T09:20:46.664775Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:46.664816Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:244: [Committed] Updated location of #1024.v4 host1:1001 to DC=1/M=2/R=3/U=4/ 2025-06-30T09:20:46.664827Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:317: [Committed] Extended lease of #1024.v4 host1:1001 up to Thu, 01 Jan 1970 03:00:00 UTC (lease 2) 2025-06-30T09:20:46.664831Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 3 to 4 2025-06-30T09:20:46.664834Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v4 host1:1001 to epoch cache 2025-06-30T09:20:46.664854Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v4 to update nodes log 2025-06-30T09:20:46.664896Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 10800025000 Name: "slot-0" } 2025-06-30T09:20:46.665008Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:691:2247], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:46.665034Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2215], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:46.665038Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:46.665046Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.4 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:46.665100Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:693:2249], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:46.665117Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:635:2215], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:46.665122Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:46.665140Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 10800025000 Name: "slot-0" } } 2025-06-30T09:20:46.665187Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:695:2251], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:46.665198Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2215], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:46.665200Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:46.665204Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.4 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:46.665247Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:697:2253], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:46.665255Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2215], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:46.665258Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:46.665261Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.4 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:46.665296Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:699:2255], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:46.665306Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2215], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 3 } 2025-06-30T09:20:46.665309Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:46.665312Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.4 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:46.665368Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:701:2257], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:46.665383Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:635:2215], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 3 SeqNo: 2 } 2025-06-30T09:20:46.665387Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:46.665393Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:635:2215], seqNo: 2, version: 3, server pipe id: [1:701:2257] 2025-06-30T09:20:46.665399Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v3 -> v4 to [1:635:2215] >> TContinuousBackupTests::TakeIncrementalBackup >> TExportToS3Tests::ShouldExcludeBackupTableFromStats [GOOD] >> TContinuousBackupTests::Basic [GOOD] >> TConsoleTests::TestCreateTenantExtSubdomain [GOOD] >> TConsoleTests::TestCreateSharedTenant >> TExportToS3Tests::ShouldRestartOnScanErrors >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantTenant [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantMultipleTenants >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlWithoutFlag [GOOD] >> TNodeBrokerTest::NodeNameExpiration >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestart >> TNodeBrokerTest::ListNodesEpochDeltasPersistance >> TVersions::Wreck1Reverse [GOOD] >> TVersions::Wreck0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_continuous_backup/unittest >> TContinuousBackupTests::Basic [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:48.247728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:48.247755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:48.247762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:48.247767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:48.247774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:48.247779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:48.247788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:48.247804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:48.247932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:48.248000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:48.264018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:48.264054Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:48.267405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:48.267470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:48.267504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:48.269261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:48.269334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:48.269457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:48.269534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:48.270161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:48.270196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:48.270447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:48.270455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:48.270478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:48.270485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:48.270489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:48.270504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.271842Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:48.292163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:48.292269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.292368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:48.292379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:48.292454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:48.292472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:48.293612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:48.293660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:48.293746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.293760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:48.293768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:48.293775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:48.294423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.294439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:48.294447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:48.294882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.294896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.294904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:48.294914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:48.295757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:48.297044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:48.297104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:48.297386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:48.297425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:48.297435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:48.297521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:48.297531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:48.297572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:48.297589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:48.298538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:48.298552Z node 1 :FLAT_TX_SCHEMESHARD ... esult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 218 } } CommitVersion { Step: 5000005 TxId: 104 } 2025-06-30T09:20:48.558162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-06-30T09:20:48.558191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 218 } } CommitVersion { Step: 5000005 TxId: 104 } 2025-06-30T09:20:48.558210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 218 } } CommitVersion { Step: 5000005 TxId: 104 } FAKE_COORDINATOR: Erasing txId 104 2025-06-30T09:20:48.558494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-30T09:20:48.558502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-06-30T09:20:48.558518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-30T09:20:48.558527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-30T09:20:48.558538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-30T09:20:48.558555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 104:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:48.558559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.558566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 104:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:20:48.558578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 104:0 129 -> 240 2025-06-30T09:20:48.559222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.559714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.559844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.559858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-30T09:20:48.559881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 3/3 2025-06-30T09:20:48.559887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-30T09:20:48.559894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 3/3 2025-06-30T09:20:48.559899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-30T09:20:48.559905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2025-06-30T09:20:48.559929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:342:2319] message: TxId: 104 2025-06-30T09:20:48.559938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-30T09:20:48.559948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-30T09:20:48.559954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:0 2025-06-30T09:20:48.559988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:20:48.559995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:1 2025-06-30T09:20:48.559999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:1 2025-06-30T09:20:48.560005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:20:48.560010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:2 2025-06-30T09:20:48.560014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:2 2025-06-30T09:20:48.560026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:20:48.560146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:20:48.560155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:20:48.560183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:20:48.560192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:20:48.560199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:20:48.560913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-30T09:20:48.560926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:731:2643] 2025-06-30T09:20:48.560966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 2025-06-30T09:20:48.561076Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:20:48.561118Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl" took 51us result status StatusPathDoesNotExist 2025-06-30T09:20:48.561152Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/continuousBackupImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 2]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Table/continuousBackupImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:20:48.561204Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:20:48.561221Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl/streamImpl" took 18us result status StatusPathDoesNotExist 2025-06-30T09:20:48.561236Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/continuousBackupImpl/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 2]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TNodeBrokerTest::NodesMigrationSetLocation >> TContinuousBackupTests::TakeIncrementalBackup [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientReconnectsOnConnectionLoose [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithKnownConfig ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> TStorageBalanceTest::TestScenario3 [GOOD] Test command err: 2025-06-30T09:17:55.219793Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:55.222913Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:55.222981Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:55.223211Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:55.223270Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:17:55.223439Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-30T09:17:55.223449Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:55.223590Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:28:2075] ControllerId# 72057594037932033 2025-06-30T09:17:55.223594Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:55.223612Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:55.223629Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:55.226377Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:55.226392Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:55.226760Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:36:2080] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:55.226790Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:37:2081] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:55.226808Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:38:2082] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:55.226853Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:39:2083] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:55.226888Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:40:2084] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:55.226918Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:41:2085] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:55.226942Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:27:2074] Create Queue# [1:42:2086] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:55.226948Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-30T09:17:55.226962Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:28:2075] 2025-06-30T09:17:55.226966Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:28:2075] 2025-06-30T09:17:55.226974Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-30T09:17:55.226982Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:17:55.227113Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:17:55.227185Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:55.227190Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:17:55.227200Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:17:55.227223Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:55.229963Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:28:2075] 2025-06-30T09:17:55.230006Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:55.230012Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-30T09:17:55.230633Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-30T09:17:55.230949Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-30T09:17:55.230961Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:17:55.231023Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:55.231496Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-30T09:17:55.231514Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:17:55.231518Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:17:55.231541Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:53:2093] 2025-06-30T09:17:55.231554Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:17:55.232290Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:32:2063] 2025-06-30T09:17:55.232296Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:32:2063] 2025-06-30T09:17:55.232309Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:28:2075] 2025-06-30T09:17:55.232316Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-30T09:17:55.232326Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [1:53:2093] 2025-06-30T09:17:55.232328Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [1:53:2093] 2025-06-30T09:17:55.232335Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-30T09:17:55.232342Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-30T09:17:55.232346Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-30T09:17:55.232352Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:17:55.232374Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [1:53:2093] 2025-06-30T09:17:55.232378Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:17:55.232401Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:55.232532Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-30T09:17:55.232543Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:17:55.232569Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\363\365\\\016\336\205\240m2\241c\3010\003\261\342\227\n\267}" } 2025-06-30T09:17:55.232628Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:17:55.232683Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:55.232731Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StInit ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:55.232751Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-30T09:17:55.232772Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:17:55.232806Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-30T09:17:55.232867Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-30T09:17:55.232874Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-30T09:17:55.233322Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:28:2075] 2025-06-30T09:17:55.233329Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:28:2075] 2025-06-30T09:17:55.233336Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-30T09:17:55.233340Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 2146435075 Sender# [1:47:2090] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:17:55.233347Z node 1 :BS_NODE DEBUG: {NWDC3 ... h:65: [773f913e1c05c34f] restore Id# [72057594037927937:2:491:0:0:246:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-30T09:20:43.312612Z node 13 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [773f913e1c05c34f] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:491:0:0:246:1] Marker# BPG33 2025-06-30T09:20:43.312619Z node 13 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [773f913e1c05c34f] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:491:0:0:246:1] Marker# BPG32 2025-06-30T09:20:43.312656Z node 13 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [13:466:2089] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:491:0:0:246:1] FDS# 246 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-30T09:20:43.313541Z node 13 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [773f913e1c05c34f] received {EvVPutResult Status# OK ID# [72057594037927937:2:491:0:0:246:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 507 } Cost# 81937 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 508 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-30T09:20:43.313578Z node 13 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [773f913e1c05c34f] Result# TEvPutResult {Id# [72057594037927937:2:491:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-30T09:20:43.313588Z node 13 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [773f913e1c05c34f] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:491:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-30T09:20:43.313619Z node 13 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.216 sample PartId# [72057594037927937:2:491:0:0:246:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 13 } TEvVPutResult{ TimestampMs# 1.113 VDiskId# [0:1:0:0:0] NodeId# 13 Status# OK } ] } 2025-06-30T09:20:43.313757Z node 13 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:491:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-30T09:20:43.313801Z node 13 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:492} commited cookie 1 for step 491 2025-06-30T09:20:43.313815Z node 13 :HIVE DEBUG: tx__reassign_groups.cpp:56: HIVE#72057594037927937 THive::TTxReassignGroups(72075186224037902)::Complete 2025-06-30T09:20:43.313846Z node 13 :HIVE DEBUG: tx__update_tablet_groups.cpp:332: HIVE#72057594037927937 THive::TTxUpdateTabletGroups{53393934917440}(72075186224037902)::Complete SideEffects: {Notifications: 0x7FF0000F [13:5565:2222] NKikimr::NHive::TEvPrivate::TEvRestartCancelled} 2025-06-30T09:20:43.313959Z node 13 :HIVE DEBUG: storage_balancer.cpp:115: HIVE#72057594037927937 StorageBalancer received RestartCancelled for tablet (72075186224037902,0) 2025-06-30T09:20:43.313971Z node 13 :HIVE DEBUG: storage_balancer.cpp:92: HIVE#72057594037927937 StorageBalancer initiating reassign for tablet 72075186224037971 2025-06-30T09:20:43.314134Z node 13 :HIVE DEBUG: hive_impl.cpp:978: HIVE#72057594037927937 THive::TEvReassignTablet TabletID: 72075186224037971 Channels: 1 Channels: 2 Channels: 0 ReassignReason: HIVE_REASSIGN_REASON_BALANCE 2025-06-30T09:20:43.314152Z node 13 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:492} Tx{1488, NKikimr::NHive::TTxReassignGroups} queued, type NKikimr::NHive::TTxReassignGroups 2025-06-30T09:20:43.314160Z node 13 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:492} Tx{1488, NKikimr::NHive::TTxReassignGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:20:43.314169Z node 13 :HIVE DEBUG: tx__reassign_groups.cpp:30: HIVE#72057594037927937 THive::TTxReassignGroups(72075186224037971,[0,1,2])::Execute 2025-06-30T09:20:43.314242Z node 13 :HIVE DEBUG: hive_impl.cpp:1079: HIVE#72057594037927937 THive::AssignTabletGroups TEvControllerSelectGroups tablet 72075186224037971 GroupParameters { StoragePoolSpecifier { Name: "def1" } } ReturnAllMatchingGroups: true 2025-06-30T09:20:43.314265Z node 13 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:492} Tx{1488, NKikimr::NHive::TTxReassignGroups} hope 1 -> done Change{994, redo 303b alter 0b annex 0, ~{ 1, 2 } -{ }, 0 gb} 2025-06-30T09:20:43.314275Z node 13 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:492} Tx{1488, NKikimr::NHive::TTxReassignGroups} release 4194304b of static, Memory{0 dyn 0} 2025-06-30T09:20:43.314305Z node 13 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037932033] send [13:1367:2260] 2025-06-30T09:20:43.314311Z node 13 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037932033] push event to server [13:1367:2260] 2025-06-30T09:20:43.314324Z node 13 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037932033] HandleSend Sender# [13:1301:2222] EventType# 268637702 c[def1] *****----------------------------------------------------------------------------------------------- (0.046) ******---------------------------------------------------------------------------------------------- (0.058) *****----------------------------------------------------------------------------------------------- (0.054) *******--------------------------------------------------------------------------------------------- (0.066) *****----------------------------------------------------------------------------------------------- (0.05) *****----------------------------------------------------------------------------------------------- (0.052) *****----------------------------------------------------------------------------------------------- (0.048) *****----------------------------------------------------------------------------------------------- (0.054) ******---------------------------------------------------------------------------------------------- (0.062) ******---------------------------------------------------------------------------------------------- (0.056) *****----------------------------------------------------------------------------------------------- (0.054) 2025-06-30T09:20:43.415405Z node 13 :HIVE DEBUG: hive_impl.cpp:437: HIVE#72057594037927937 THive::Handle TEvControllerSelectGroupsResult: success Status: OK MatchingGroups { Groups { GroupID: 2147483649 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2300000000 Occupancy: 0.046 } AllocatedSize: 2300000000 } Groups { GroupID: 2147483650 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2900000000 Occupancy: 0.058 } AllocatedSize: 2900000000 } Groups { GroupID: 2147483651 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2700000000 Occupancy: 0.054 } AllocatedSize: 2700000000 } Groups { GroupID: 2147483652 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 3300000000 Occupancy: 0.066 } AllocatedSize: 3300000000 } Groups { GroupID: 2147483653 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2500000000 Occupancy: 0.05 } AllocatedSize: 2500000000 } Groups { GroupID: 2147483654 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2600000000 Occupancy: 0.052 } AllocatedSize: 2600000000 } Groups { GroupID: 2147483655 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2400000000 Occupancy: 0.048 } AllocatedSize: 2400000000 } Groups { GroupID: 2147483656 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2700000000 Occupancy: 0.054 } AllocatedSize: 2700000000 } Groups { GroupID: 2147483657 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 3100000000 Occupancy: 0.062 } AllocatedSize: 3100000000 } Groups { GroupID: 2147483658 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2800000000 Occupancy: 0.056 } AllocatedSize: 2800000000 } Groups { GroupID: 2147483659 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2700000000 Occupancy: 0.054 } AllocatedSize: 2700000000 } } 2025-06-30T09:20:43.415498Z node 13 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:493} Tx{1489, NKikimr::NHive::TTxUpdateTabletGroups} queued, type NKikimr::NHive::TTxUpdateTabletGroups 2025-06-30T09:20:43.415509Z node 13 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:493} Tx{1489, NKikimr::NHive::TTxUpdateTabletGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-30T09:20:43.415527Z node 13 :HIVE DEBUG: tx__update_tablet_groups.cpp:63: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{53393934917440}(72075186224037971,HIVE_REASSIGN_REASON_BALANCE,[]) 2025-06-30T09:20:43.415556Z node 13 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{53393934917440}: tablet 72075186224037971 channel 0 assigned to group 2147483651 2025-06-30T09:20:43.415563Z node 13 :HIVE DEBUG: tx__update_tablet_groups.cpp:171: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{53393934917440}: tablet 72075186224037971 skipped reassign of channel 0 2025-06-30T09:20:43.415571Z node 13 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{53393934917440}: tablet 72075186224037971 channel 1 assigned to group 2147483659 2025-06-30T09:20:43.415576Z node 13 :HIVE DEBUG: tx__update_tablet_groups.cpp:171: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{53393934917440}: tablet 72075186224037971 skipped reassign of channel 1 2025-06-30T09:20:43.415584Z node 13 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{53393934917440}: tablet 72075186224037971 channel 2 assigned to group 2147483659 2025-06-30T09:20:43.415589Z node 13 :HIVE DEBUG: tx__update_tablet_groups.cpp:171: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{53393934917440}: tablet 72075186224037971 skipped reassign of channel 2 2025-06-30T09:20:43.415597Z node 13 :HIVE WARN: tx__update_tablet_groups.cpp:272: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{53393934917440}: tablet 72075186224037971 wasn't changed 2025-06-30T09:20:43.415605Z node 13 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{53393934917440}: tablet 72075186224037971 skipped channel 0 2025-06-30T09:20:43.415636Z node 13 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{53393934917440}: tablet 72075186224037971 skipped channel 1 2025-06-30T09:20:43.415643Z node 13 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{53393934917440}: tablet 72075186224037971 skipped channel 2 2025-06-30T09:20:43.415673Z node 13 :HIVE NOTICE: tx__update_tablet_groups.cpp:326: HIVE#72057594037927937 THive::TTxUpdateTabletGroups{53393934917440}(72075186224037971)::Execute - TryToBoot was not successfull 2025-06-30T09:20:43.415692Z node 13 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:493} Tx{1489, NKikimr::NHive::TTxUpdateTabletGroups} hope 1 -> done Change{995, redo 257b alter 0b annex 0, ~{ 2, 1 } -{ }, 0 gb} 2025-06-30T09:20:43.415704Z node 13 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:493} Tx{1489, NKikimr::NHive::TTxUpdateTabletGroups} release 4194304b of static, Memory{0 dyn 0} >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantMultipleTenants [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantDomain >> BuildStatsHistogram::Five_Five_Mixed [GOOD] >> BuildStatsHistogram::Five_Five_Serial >> TNodeBrokerTest::UpdateNodesLogEmptyEpoch [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_continuous_backup/unittest >> TContinuousBackupTests::TakeIncrementalBackup [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:48.715275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:48.715298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:48.715303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:48.715307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:48.715311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:48.715314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:48.715323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:48.715335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:48.715428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:48.715489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:48.727034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:48.727072Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:48.730483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:48.730560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:48.730600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:48.732072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:48.732147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:48.732258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:48.732367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:48.733256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:48.733307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:48.733642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:48.733657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:48.733682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:48.733689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:48.733693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:48.733713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.735029Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:48.749910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:48.750003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.750067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:48.750074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:48.750120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:48.750131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:48.750992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:48.751035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:48.751095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.751104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:48.751108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:48.751112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:48.751964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.751982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:48.751992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:48.752377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.752386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.752390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:48.752397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:48.752870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:48.753215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:48.753251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:48.753411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:48.753431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:48.753436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:48.753493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:48.753498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:48.753525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:48.753534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:48.753952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:48.753959Z node 1 :FLAT_TX_SCHEMESHARD ... LocalPathId: 3] was 3 2025-06-30T09:20:49.005495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:1 2025-06-30T09:20:49.005498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:1 2025-06-30T09:20:49.005516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:20:49.005520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:2 2025-06-30T09:20:49.005524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:2 2025-06-30T09:20:49.005532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-30T09:20:49.005537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:3 2025-06-30T09:20:49.005540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 103:3 2025-06-30T09:20:49.005550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:20:49.006052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:20:49.006066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:725:2627] TestWaitNotification: OK eventTxId 103 2025-06-30T09:20:49.006178Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:20:49.006224Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl" took 53us result status StatusSuccess 2025-06-30T09:20:49.006338Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl" PathDescription { Self { Name: "IncrBackupImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:49.006402Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:20:49.006428Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl/streamImpl" took 28us result status StatusSuccess 2025-06-30T09:20:49.006525Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" PathDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeStreamImpl Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409548 } PersQueueGroup { Name: "streamImpl" PathId: 4 TotalGroupCount: 1 PartitionPerTablet: 2 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 } TopicName: "continuousBackupImpl" TopicPath: "/MyRoot/Table/continuousBackupImpl/streamImpl" YdbDatabasePath: "/MyRoot" PartitionKeySchema { Name: "key" TypeId: 4 } MeteringMode: METERING_MODE_REQUEST_UNITS OffloadConfig { IncrementalBackup { DstPath: "/MyRoot/IncrBackupImpl" DstPathId { OwnerId: 72057594046678944 LocalId: 5 } } } } Partitions { PartitionId: 0 TabletId: 72075186233409547 Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409548 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:49.006671Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:20:49.006689Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl" took 19us result status StatusSuccess 2025-06-30T09:20:49.006767Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl" PathDescription { Self { Name: "IncrBackupImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TNodeBrokerTest::NodesMigrationExpireRemoved >> TNodeBrokerTest::Test1000NodesSubscribers >> TExportToS3Tests::ShouldRestartOnScanErrors [GOOD] >> TNodeBrokerTest::TestListNodesEpochDeltas >> TNodeBrokerTest::NodesMigrationRemoveActive [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithKnownConfig [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPending >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestart [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestartSimplified >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItem [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItemScope ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::UpdateNodesLogEmptyEpoch [GOOD] Test command err: 2025-06-30T09:20:47.049939Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.053749Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.053792Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.053817Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.053865Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.053893Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.053910Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.058440Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.058532Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.058583Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.058630Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.058689Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.058733Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.058805Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.058901Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.058953Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.058982Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.060418Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.060491Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.060526Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.060561Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.060594Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.060687Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.062155Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.062322Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.062394Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.062445Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.062500Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.062528Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.062551Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.062572Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.063398Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.063538Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.063580Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.063618Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.063648Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.063709Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.063759Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.063937Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.063984Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.064516Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.064828Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.064853Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.064971Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.065009Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.065101Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.069958Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.069997Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.070490Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.070545Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.070578Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.070723Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.070949Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.071207Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.071295Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.071329Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.071402Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.071501Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.071549Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.071585Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.086361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:47.086386Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:47.091643Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:47.092137Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:47.092214Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:47.092401Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:47.093204Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:47.093387Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:47.093425Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:47.093438Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:47.093442Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:47.093454Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:47.093466Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:47.093470Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:47.093474Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:47.093478Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:47.093492Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:47.093497Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:47.125350Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:47.125390Z node 1 :NODE_BROKER TRACE: node_brok ... ], Recipient [1:565:2185]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:47.313470Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:633:2215], seqNo: 2, server pipe id: [1:637:2219] 2025-06-30T09:20:47.313488Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:639:2221], Recipient [1:565:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.313498Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:633:2215], Recipient [1:565:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 0 SeqNo: 3 } 2025-06-30T09:20:47.313501Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:47.313503Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:633:2215], seqNo: 3, version: 0, server pipe id: [1:639:2221] 2025-06-30T09:20:47.313506Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v0 -> v1 to [1:633:2215] 2025-06-30T09:20:47.313536Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:639:2221], Recipient [1:565:2185]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:47.313539Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:633:2215], seqNo: 3, server pipe id: [1:639:2221] 2025-06-30T09:20:47.313555Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:641:2223], Recipient [1:565:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.313565Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2215], Recipient [1:565:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:47.313567Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.313571Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:47.565459Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435072, Sender [1:565:2185], Recipient [1:565:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvUpdateEpoch 2025-06-30T09:20:47.565482Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:263: StateWork, processing event TEvPrivate::TEvUpdateEpoch 2025-06-30T09:20:47.565508Z node 1 :NODE_BROKER DEBUG: node_broker__update_epoch.cpp:20: TTxUpdateEpoch Execute 2025-06-30T09:20:47.565519Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:548: [Dirty] Move to new epoch #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z, approximate epoch start #2.2 2025-06-30T09:20:47.565524Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:47.565546Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #2.2 2025-06-30T09:20:47.961172Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:25:2072], Recipient [1:565:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { MinEpoch: 2 } 2025-06-30T09:20:47.961188Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.961194Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:47.961241Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:625:2207] 2025-06-30T09:20:47.961244Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.961247Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:47.961255Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:628:2210] 2025-06-30T09:20:47.961257Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.961259Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:47.961263Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [3:83:2072], Recipient [1:630:2212] 2025-06-30T09:20:47.961265Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.961268Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:47.961273Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:627:2209] 2025-06-30T09:20:47.961276Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.961278Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:47.961282Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [2:54:2072], Recipient [1:629:2211] 2025-06-30T09:20:47.961284Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.961287Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:47.961291Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [4:112:2072], Recipient [1:631:2213] 2025-06-30T09:20:47.961293Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.961295Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:47.961299Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:626:2208] 2025-06-30T09:20:47.961301Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.961303Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:47.971999Z node 1 :NODE_BROKER DEBUG: node_broker__update_epoch.cpp:31: TTxUpdateEpoch Complete 2025-06-30T09:20:47.972022Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:548: [Committed] Move to new epoch #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z, approximate epoch start #2.2 2025-06-30T09:20:47.972037Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:47.972042Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z, approximate epoch start #2.2 nodes=0 expired=0 2025-06-30T09:20:47.972051Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z nodes=0 expired=0 removed=0 2025-06-30T09:20:47.972061Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:47.972067Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:47.972073Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:47.972077Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:47.972081Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:47.972085Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:47.972089Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:47.972094Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:47.992719Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:661:2233], Recipient [1:565:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.992765Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2215], Recipient [1:565:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:47.992770Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.992781Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.2 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:47.992829Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:663:2235], Recipient [1:565:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.992852Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:633:2215], Recipient [1:565:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 1 SeqNo: 4 } 2025-06-30T09:20:47.992857Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:47.992864Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:633:2215], seqNo: 4, version: 1, server pipe id: [1:663:2235] 2025-06-30T09:20:47.992871Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v1 -> v2 to [1:633:2215] 2025-06-30T09:20:47.992915Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:663:2235], Recipient [1:565:2185]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:47.992921Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:633:2215], seqNo: 4, server pipe id: [1:663:2235] 2025-06-30T09:20:47.992937Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:665:2237], Recipient [1:565:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.992950Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:633:2215], Recipient [1:565:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 0 SeqNo: 5 } 2025-06-30T09:20:47.992956Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:47.992960Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:633:2215], seqNo: 5, version: 0, server pipe id: [1:665:2237] 2025-06-30T09:20:47.992963Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v0 -> v2 to [1:633:2215] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationRemoveActive [GOOD] Test command err: 2025-06-30T09:20:47.598659Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.602050Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.602094Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.602121Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.602157Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.602186Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.602206Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.606119Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.606180Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.606222Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.606259Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.606306Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.606335Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.606357Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.606430Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.606468Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.606491Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.607483Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.607518Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.607541Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.607563Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.607584Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.607637Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.608586Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.608682Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.608725Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.608758Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.608790Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.608806Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.608823Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.608836Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.609368Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.609451Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.609476Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.609496Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.609514Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.609548Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.609572Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.609628Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.609655Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.610212Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.610250Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.610299Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.610319Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.610413Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.613254Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.614242Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.614334Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.614407Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.614947Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.615250Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.615558Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.615634Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.615744Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.615992Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.616103Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.617426Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.617510Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.617576Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.617762Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.618120Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.618692Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.633256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:47.633286Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:47.638250Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:47.638771Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:47.638872Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:47.639097Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:47.639853Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:47.639884Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:47.639920Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:47.639933Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.024000Z - 1970-01-01T01:00:00.024000Z - 1970-01-01T02:00:00.024000Z 2025-06-30T09:20:47.639936Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:47.639948Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:47.639972Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:47.639976Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:47.639980Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:47.639984Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.024000Z - 1970-01-01T01:00:00.024000Z - 1970-01-01T02:00:00.024000Z 2025-06-30T09:20:47.639997Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:134 ... dle NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594037936129 ClientId: [2:328:2072] ServerId: [1:626:2207] } 2025-06-30T09:20:47.971973Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:672: Handle NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594037936129 ClientId: [3:346:2072] ServerId: [1:629:2210] } 2025-06-30T09:20:47.971986Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:672: Handle NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594037936129 ClientId: [4:241:2072] ServerId: [1:630:2211] } 2025-06-30T09:20:47.974586Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:47.975492Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:47.975575Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:47.975939Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:47.975999Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:47.976052Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:47.976116Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:47.976127Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1000: [DB] Loaded current epoch: #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:47.976132Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1024: [DB] Approximate epoch start is changed: #4.5 2025-06-30T09:20:47.976135Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1046: [DB] Loaded main nodes table: Nodes 2025-06-30T09:20:47.976197Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1024.v2 { NodeId: 1024, State: Active, Version: 2, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:47.976206Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:303: [Dirty] Added removed node #1024.v5 2025-06-30T09:20:47.976212Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1245: [DB] Migrating removed node #1024.v5 2025-06-30T09:20:47.976231Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:47.976271Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:47.976276Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 1, NewVersionUpdateNodes left 0 2025-06-30T09:20:47.976279Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:836: [DB] Removing node #1024.v5 from database 2025-06-30T09:20:47.976297Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:47.976302Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #4.5 2025-06-30T09:20:47.998592Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:47.998634Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T04:00:00.024000Z 2025-06-30T09:20:47.998644Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z, approximate epoch start #4.5 nodes=0 expired=0 2025-06-30T09:20:47.998654Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z nodes=0 expired=0 removed=1 2025-06-30T09:20:47.998660Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v5 to update nodes log 2025-06-30T09:20:47.998858Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:696:2247], Recipient [1:688:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.998896Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:696:2247] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:47.998912Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:697:2248] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:47.998921Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:698:2249] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:47.998929Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:697:2248], Recipient [1:688:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.998945Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:698:2249], Recipient [1:688:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.999021Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:697:2248] 2025-06-30T09:20:47.999028Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.999038Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:47.999053Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:698:2249] 2025-06-30T09:20:47.999055Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.999060Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:47.999082Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [4:112:2072], Recipient [1:696:2247] 2025-06-30T09:20:47.999088Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.999092Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:47.999233Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:728:2273], Recipient [1:688:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.999262Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:634:2214], Recipient [1:688:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:47.999268Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.999276Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:47.999325Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:730:2275], Recipient [1:688:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.999337Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:634:2214], Recipient [1:688:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:47.999340Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.999344Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:47.999388Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:732:2277], Recipient [1:688:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.999401Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:634:2214], Recipient [1:688:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:47.999404Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.999408Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:47.999448Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:734:2279], Recipient [1:688:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.999465Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:634:2214], Recipient [1:688:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 5 } 2025-06-30T09:20:47.999469Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:47.999473Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.5 1970-01-01T03:00:00.024000Z - 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z 2025-06-30T09:20:47.999522Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:736:2281], Recipient [1:688:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.999539Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:634:2214], Recipient [1:688:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 5 SeqNo: 2 } 2025-06-30T09:20:47.999544Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:47.999551Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:634:2214], seqNo: 2, version: 5, server pipe id: [1:736:2281] 2025-06-30T09:20:47.999557Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v5 -> v5 to [1:634:2214] 2025-06-30T09:20:47.999604Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:736:2281], Recipient [1:688:2241]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:47.999608Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:634:2214], seqNo: 2, server pipe id: [1:736:2281] 2025-06-30T09:20:47.999649Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:738:2283], Recipient [1:688:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:47.999664Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:634:2214], Recipient [1:688:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:47.999668Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:47.999682Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } >> TxUsage::Sinks_Oltp_WriteToTopics_1_Table [GOOD] >> BuildStatsHistogram::Five_Five_Serial [GOOD] >> BuildStatsHistogram::Five_Five_Crossed >> TNodeBrokerTest::NodesMigrationReuseIDThenExtendLease |80.8%| [TA] $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ShouldRestartOnScanErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:24.618062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:24.618103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.618110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:24.618116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:24.618128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:24.618132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:24.618143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.618154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:24.618229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:24.618291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:24.632413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:24.632431Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:24.635052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:24.635095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:24.635152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:24.637882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:24.637983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:24.638144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.638216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:24.639123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.639168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:24.639505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.639516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.639547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:24.639556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:24.639563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:24.639585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.641475Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:24.661932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:24.662002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.662059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:24.662067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:24.662119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:24.662130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:24.662774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.662804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:24.662845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.662855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:24.662861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:24.662865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:24.663303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.663313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:24.663319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:24.663735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.663745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.663749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.663754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:24.664261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:24.664758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:24.664805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:24.664965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.664987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:24.664992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.665042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:24.665050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.665096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:24.665110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:24.665591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.665598Z node 1 :FLAT_TX_SCHEMESHARD ... id 281474976710759:0 128 -> 129 2025-06-30T09:20:48.952414Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:64902 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: F81619A9-B741-4AF0-8176-3FBE7D77255D amz-sdk-request: attempt=1 content-length: 106 content-md5: heRlZdXBqq/26pCrTLfM5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /metadata.json / / 106 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:64902 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 480717A1-D998-4CAF-AA48-91D95FBCC719 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000005 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-30T09:20:48.961372Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:48.961409Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710759, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:20:48.961530Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:48.961538Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:212:2212], at schemeshard: 72057594046678944, txId: 281474976710759, path id: 4 2025-06-30T09:20:48.961718Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:20:48.961729Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 281474976710759:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:48.962072Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-30T09:20:48.962086Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-30T09:20:48.962094Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710759 2025-06-30T09:20:48.962099Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710759, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-30T09:20:48.962104Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:20:48.962122Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 281474976710759 2025-06-30T09:20:48.962933Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710759 TestWaitNotification wait txId: 102 2025-06-30T09:20:48.963005Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:20:48.963012Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-30T09:20:48.964227Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:62: NotifyTxCompletion export in-flight, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:20:48.964239Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 102, at schemeshard: 72057594046678944 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:64902 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 2A79ECC3-E3BE-47A5-BA60-6E75774E1AFF amz-sdk-request: attempt=1 content-length: 106 content-md5: heRlZdXBqq/26pCrTLfM5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /metadata.json / / 106 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:64902 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 64EAEB3F-5A32-4CC3-A951-75996D8439CD amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 357 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:64902 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: AA71C026-14E4-42B0-8F58-3BB82F6ADE87 amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-06-30T09:20:49.462935Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 452 RawX2: 17179871603 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:20:49.462957Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-06-30T09:20:49.462978Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 452 RawX2: 17179871603 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:20:49.462989Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 452 RawX2: 17179871603 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:20:49.463001Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:49.463004Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:20:49.463008Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-30T09:20:49.463014Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710759:0 129 -> 240 2025-06-30T09:20:49.463047Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:49.463534Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:20:49.463597Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:20:49.463604Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-06-30T09:20:49.463614Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-30T09:20:49.463618Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-30T09:20:49.463622Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-30T09:20:49.463625Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-30T09:20:49.463629Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-06-30T09:20:49.463639Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:128:2152] message: TxId: 281474976710759 2025-06-30T09:20:49.463644Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-30T09:20:49.463649Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710759:0 2025-06-30T09:20:49.463652Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710759:0 2025-06-30T09:20:49.463674Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:20:49.464040Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-30T09:20:49.464051Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710759 2025-06-30T09:20:49.464478Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:20:49.464492Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:593:2547] TestWaitNotification: OK eventTxId 102 >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloadedWithReboot [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsMultipleColumns >> TxUsage::Sinks_Oltp_WriteToTopics_2_Table >> TNodeBrokerTest::TestRandomActions >> TNodeBrokerTest::NodesMigrationReuseID [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantDomain [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriptionEraser >> TConsoleTests::TestRestartConsoleAndPools [GOOD] >> TConsoleTests::TestRemoveTenantWithBorrowedStorageUnits >> TNodeBrokerTest::NodesMigrationNewActiveNode >> TFlatExecutorLeases::BasicsInitialLeaseSleep [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseSleepTimeout ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationReuseID [GOOD] Test command err: 2025-06-30T09:20:48.220338Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.223864Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.223905Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.223937Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.223967Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.224002Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.224027Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.227594Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.227652Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.227689Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.227721Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.227759Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.227788Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.227819Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.227895Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.227929Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.227944Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.228852Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.228884Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.228908Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.228933Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.228950Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.229001Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.229974Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.230080Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.230129Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.230174Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.230213Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.230231Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.230251Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.230270Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.230970Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.231083Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.231115Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.231146Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.231172Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.231220Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.231263Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:48.231420Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.231454Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.231786Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.232013Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.232032Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.232061Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.232134Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.232242Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.236172Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.236208Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.236301Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.236316Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.236329Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.236349Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.236649Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.237168Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.237239Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.237291Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.237467Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.237517Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.238265Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.238525Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:48.251504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:48.251525Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:48.256711Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:48.257181Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:48.257269Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:48.257439Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:48.258295Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:48.258331Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:48.258370Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:48.258384Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:48.258389Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:48.258401Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:48.258417Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:48.258422Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:48.258425Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:48.258430Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:48.258447Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:48.258451Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:48.290718Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:48.290781Z node 1 :NODE_BROKER TRACE: node_brok ... 44:1 } 2025-06-30T09:20:48.656098Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1024.v2 { NodeId: 1024, State: Active, Version: 2, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:48.656104Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1229: [DB] Migrating changed node #1024.v7 { NodeId: 1024, State: Active, Version: 7, Host: host2, Port: 1001, ResolveHost: host2.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 05:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:48.656109Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 6 to 7 2025-06-30T09:20:48.656117Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:48.656152Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:48.656156Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 1 2025-06-30T09:20:48.656160Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #4.7 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z 2025-06-30T09:20:48.656177Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #4.6 2025-06-30T09:20:48.656191Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v7 host2:1001 to database state=Active resolvehost=host2.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 05:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:48.656228Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:48.667942Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:48.667981Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:48.667990Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #4.7 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z, approximate epoch start #4.6 nodes=1 expired=0 2025-06-30T09:20:48.668025Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##4.7 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z nodes=1 expired=0 removed=0 2025-06-30T09:20:48.668032Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v7 to update nodes log 2025-06-30T09:20:48.668136Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:708:2260], Recipient [1:698:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:48.668195Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:708:2260] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:48.668240Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:710:2262], Recipient [1:698:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:48.668260Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:711:2263], Recipient [1:698:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:48.668265Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:712:2264] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:48.668273Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:711:2263] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:48.668286Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:710:2262] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:48.668297Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:712:2264], Recipient [1:698:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:48.668340Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:708:2260] 2025-06-30T09:20:48.668345Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:48.668353Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z 2025-06-30T09:20:48.668371Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:711:2263] 2025-06-30T09:20:48.668373Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:48.668377Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z 2025-06-30T09:20:48.668390Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:712:2264] 2025-06-30T09:20:48.668393Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:48.668397Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z 2025-06-30T09:20:48.668428Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:710:2262] 2025-06-30T09:20:48.668431Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:48.668434Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z 2025-06-30T09:20:48.668581Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:740:2287], Recipient [1:698:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:48.668603Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2215], Recipient [1:698:2254]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:48.668605Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:48.668609Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z 2025-06-30T09:20:48.668666Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:742:2289], Recipient [1:698:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:48.668678Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2215], Recipient [1:698:2254]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:48.668682Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:48.668687Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z 2025-06-30T09:20:48.668757Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:744:2291], Recipient [1:698:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:48.668768Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2215], Recipient [1:698:2254]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:48.668771Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:48.668774Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z 2025-06-30T09:20:48.668837Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:746:2293], Recipient [1:698:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:48.668854Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2215], Recipient [1:698:2254]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 6 } 2025-06-30T09:20:48.668857Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:48.668861Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z - 1970-01-01T05:00:00.025000Z 2025-06-30T09:20:48.668903Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:748:2295], Recipient [1:698:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:48.668919Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:633:2215], Recipient [1:698:2254]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 6 SeqNo: 2 } 2025-06-30T09:20:48.668923Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:48.668928Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:633:2215], seqNo: 2, version: 6, server pipe id: [1:748:2295] 2025-06-30T09:20:48.668933Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v6 -> v7 to [1:633:2215] 2025-06-30T09:20:48.668976Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:748:2295], Recipient [1:698:2254]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:48.668981Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:633:2215], seqNo: 2, server pipe id: [1:748:2295] 2025-06-30T09:20:48.669003Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:750:2297], Recipient [1:698:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:48.669014Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:633:2215], Recipient [1:698:2254]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:48.669017Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:48.669039Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 18000025000 Name: "slot-0" } } >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestartSimplified [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestComplexYamlConfigChanges >> TNodeBrokerTest::ShiftIdRangeRemoveNew >> TNodeBrokerTest::NodesV2BackMigrationShiftIdRange [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriptionEraser [GOOD] >> FeatureFlagsConfiguratorTest::TestFeatureFlagsUpdates >> BuildStatsHistogram::Five_Five_Crossed [GOOD] >> BuildStatsHistogram::Single_Small_2_Levels [GOOD] >> BuildStatsHistogram::Single_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Single_Small_1_Level [GOOD] >> BuildStatsHistogram::Single_Small_0_Levels [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_2_Levels [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_1_Level [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_0_Levels >> BuildStatsHistogram::Three_Mixed_Small_0_Levels [GOOD] >> BuildStatsHistogram::Mixed_Groups_History >> TDynamicNameserverTest::CacheMissSimpleDeadline-EnableNodeBrokerDeltaProtocol-false ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesV2BackMigrationShiftIdRange [GOOD] Test command err: 2025-06-30T09:20:47.244083Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.247760Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.247800Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.247826Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.247884Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.247915Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.247932Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.251628Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.251685Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.251723Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.251757Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.251794Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.251822Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.251844Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.251910Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.251948Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.251968Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.252880Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.252910Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.252930Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.252952Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.252976Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.253027Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.254007Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.254111Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.254153Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.254190Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.254224Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.254239Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.254255Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.254268Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.254901Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.255014Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.255050Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.255080Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.255115Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.255164Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.255206Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.255258Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.255291Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.255962Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.255986Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.256013Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.256090Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.256120Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.256186Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.259836Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.260568Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.260589Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.260612Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.260717Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.260739Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.261072Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.261380Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.261620Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.261847Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.261892Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.261982Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.263185Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.263749Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.264113Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.264214Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.264292Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.264496Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.264665Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.264839Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.268712Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.268948Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.269880Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.270418Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.285159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:47.285182Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:47.288890Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:47.289300Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:47.289374Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:47.289519Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:47.289900Z node 1 :NODE_BROKER DEBUG: node_ ... DE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:926:2410], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.135338Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:49.135342Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.135347Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.14 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:49.135404Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:928:2412], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.135419Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 13 } 2025-06-30T09:20:49.135423Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.135428Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.14 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:49.135481Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:930:2414], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.135497Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:49.135501Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.135507Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.14 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:49.135558Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:932:2416], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.135574Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 12 } 2025-06-30T09:20:49.135578Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.135583Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.14 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:49.135630Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:934:2418], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.135644Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:49.135648Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.135653Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.14 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:49.135704Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:936:2420], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.135742Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 11 } 2025-06-30T09:20:49.135747Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.135753Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.14 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:49.135827Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:938:2422], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.135855Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 14 SeqNo: 6 } 2025-06-30T09:20:49.135862Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:49.135871Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:641:2213], seqNo: 6, version: 14, server pipe id: [1:938:2422] 2025-06-30T09:20:49.135879Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v14 -> v14 to [1:641:2213] 2025-06-30T09:20:49.135949Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:938:2422], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:49.135960Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:641:2213], seqNo: 6, server pipe id: [1:938:2422] 2025-06-30T09:20:49.135984Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:940:2424], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.136001Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 13 SeqNo: 7 } 2025-06-30T09:20:49.136006Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:49.136010Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:641:2213], seqNo: 7, version: 13, server pipe id: [1:940:2424] 2025-06-30T09:20:49.136015Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v13 -> v14 to [1:641:2213] 2025-06-30T09:20:49.136067Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:940:2424], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:49.136072Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:641:2213], seqNo: 7, server pipe id: [1:940:2424] 2025-06-30T09:20:49.136092Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:942:2426], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.136108Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 12 SeqNo: 8 } 2025-06-30T09:20:49.136111Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:49.136115Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:641:2213], seqNo: 8, version: 12, server pipe id: [1:942:2426] 2025-06-30T09:20:49.136119Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v12 -> v14 to [1:641:2213] 2025-06-30T09:20:49.136163Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:942:2426], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:49.136168Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:641:2213], seqNo: 8, server pipe id: [1:942:2426] 2025-06-30T09:20:49.136189Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:944:2428], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.136205Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 11 SeqNo: 9 } 2025-06-30T09:20:49.136210Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:49.136214Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:641:2213], seqNo: 9, version: 11, server pipe id: [1:944:2428] 2025-06-30T09:20:49.136219Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v11 -> v14 to [1:641:2213] 2025-06-30T09:20:49.136274Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:944:2428], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:49.136279Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:641:2213], seqNo: 9, server pipe id: [1:944:2428] 2025-06-30T09:20:49.136322Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:946:2430], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.136343Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:49.136348Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:49.136382Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 18000023000 Name: "slot-0" } } 2025-06-30T09:20:49.136452Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:948:2432], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.136468Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1025 } 2025-06-30T09:20:49.136473Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:49.136480Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } 2025-06-30T09:20:49.136542Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:950:2434], Recipient [1:884:2375]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.136554Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:641:2213], Recipient [1:884:2375]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1026 } 2025-06-30T09:20:49.136559Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:49.136565Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } >> TNodeBrokerTest::NodesMigrationSetLocation [GOOD] >> TNodeBrokerTest::DoNotReuseDynnodeIdsBelowMinDynamicNodeId >> TDynamicNameserverTest::CacheMissSimpleDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-true >> FeatureFlagsConfiguratorTest::TestFeatureFlagsUpdates [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationSetLocation [GOOD] Test command err: 2025-06-30T09:20:49.264315Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.267866Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.267924Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.267958Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.268000Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.268028Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.268048Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.271612Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.271678Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.271715Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.271748Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.271793Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.271822Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.271844Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.271920Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.271971Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.271991Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.272932Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.272959Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.272983Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.273007Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.273035Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.273084Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.274038Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.274147Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.274189Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.274220Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.274256Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.274271Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.274285Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.274297Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.274878Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.274974Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.275001Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.275023Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.275040Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.275073Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.275099Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.275216Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.275240Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.275978Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.276044Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.276059Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.276133Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.276155Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.279047Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.279069Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.279572Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.279584Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.279611Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.279988Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.280231Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.280299Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.280406Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.280457Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.280539Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.280594Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.280684Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.281481Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.281711Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.295683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:49.295706Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:49.299904Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:49.300300Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:49.300383Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:49.300545Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:49.300950Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:49.300975Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:49.301005Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:49.301016Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.301020Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:49.301031Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:49.301055Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:49.301059Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:49.301062Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:49.301066Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.301078Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:49.301083Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:49.333061Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:49.333103Z node 1 :NODE_BROKER TRACE: node_brok ... KER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:49.573967Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:49.574014Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:49.574080Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:49.574089Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1000: [DB] Loaded current epoch: #1.2 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.574093Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1029: [DB] Loaded approximate epoch start: #1.1 2025-06-30T09:20:49.574096Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1046: [DB] Loaded main nodes table: Nodes 2025-06-30T09:20:49.574121Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:284: [Dirty] Added node #1024.v0 host1:1001 2025-06-30T09:20:49.574142Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1128: [DB] Loaded node #1024.v0 { NodeId: 1024, State: Active, Version: 0, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:49.574161Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1024.v2 { NodeId: 1024, State: Active, Version: 2, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: , AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:49.574169Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1229: [DB] Migrating changed node #1024.v3 { NodeId: 1024, State: Active, Version: 3, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:49.574174Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 2 to 3 2025-06-30T09:20:49.574182Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:49.574206Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:49.574211Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 1 2025-06-30T09:20:49.574215Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.574231Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v3 host1:1001 to database state=Active resolvehost=host1.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:49.574274Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:49.586248Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:49.586293Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.023000Z 2025-06-30T09:20:49.586301Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z, approximate epoch start #1.1 nodes=1 expired=0 2025-06-30T09:20:49.586326Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z nodes=1 expired=0 removed=0 2025-06-30T09:20:49.586331Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v3 to update nodes log 2025-06-30T09:20:49.586469Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:683:2236], Recipient [1:673:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.586504Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:683:2236] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:49.586516Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:685:2238], Recipient [1:673:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.586522Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:686:2239] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:49.586533Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:685:2238] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:49.586555Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:686:2239], Recipient [1:673:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.586628Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:683:2236] 2025-06-30T09:20:49.586634Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.586638Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:49.586643Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:685:2238] 2025-06-30T09:20:49.586646Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.586648Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:49.586653Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:686:2239] 2025-06-30T09:20:49.586656Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.586658Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:49.586788Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:714:2262], Recipient [1:673:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.586811Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:631:2213], Recipient [1:673:2230]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:49.586814Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.586821Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.586869Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:716:2264], Recipient [1:673:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.586880Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:631:2213], Recipient [1:673:2230]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:49.586883Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.586886Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.586934Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:718:2266], Recipient [1:673:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.586945Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:631:2213], Recipient [1:673:2230]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:49.586948Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.586951Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.586998Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:720:2268], Recipient [1:673:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.587013Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:631:2213], Recipient [1:673:2230]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 2 } 2025-06-30T09:20:49.587018Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.587022Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.587067Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:722:2270], Recipient [1:673:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.587083Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:631:2213], Recipient [1:673:2230]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 2 SeqNo: 2 } 2025-06-30T09:20:49.587087Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:49.587092Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:631:2213], seqNo: 2, version: 2, server pipe id: [1:722:2270] 2025-06-30T09:20:49.587097Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:631:2213] 2025-06-30T09:20:49.587141Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:722:2270], Recipient [1:673:2230]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:49.587145Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:631:2213], seqNo: 2, server pipe id: [1:722:2270] 2025-06-30T09:20:49.587167Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:724:2272], Recipient [1:673:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.587181Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:631:2213], Recipient [1:673:2230]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:49.587184Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:49.587206Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-0" } } >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes >> BuildStatsHistogram::Mixed_Groups_History [GOOD] >> BuildStatsHistogram::Serial_Groups_History >> TNodeBrokerTest::NodesMigrationReuseExpiredID >> TNodeBrokerTest::UpdateNodesLog [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestComplexYamlConfigChanges [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlResend >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp [GOOD] >> BuildStatsHistogram::Serial_Groups_History [GOOD] >> BuildStatsHistogram::Benchmark >> TVersions::Wreck0 [GOOD] >> TVersions::Wreck0Reverse ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-30T09:20:50.889900Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.892949Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.893389Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.893583Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.905164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:50.905186Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:50.908634Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:50.909033Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:50.909088Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:50.909250Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:50.909969Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:50.909999Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:50.910030Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:50.910041Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:50.910045Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:50.910057Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:50.910072Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:50.910076Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:50.910080Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:50.910084Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:50.910097Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:50.910101Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:50.931108Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:50.931147Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.025000Z 2025-06-30T09:20:50.931159Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z, approximate epoch start #1.1 nodes=0 expired=0 2025-06-30T09:20:50.931170Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z nodes=0 expired=0 removed=0 2025-06-30T09:20:50.971860Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:204:2199], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.971894Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:204:2199] Leader: 1 Dead: 0 Generation: 2 VersionInfo:  } ... waiting for nameservers are connected (done) 2025-06-30T09:20:50.973067Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:18:2065], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { MinEpoch: 1 } 2025-06-30T09:20:50.973080Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.973092Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:50.973136Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:208:2203], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.973165Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:206:2201], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:50.973169Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:50.973176Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:50.973222Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:50.973237Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:16:2063], path# /dc-1, domainOwnerId# 72057594046678944 2025-06-30T09:20:50.977454Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 } 2025-06-30T09:20:50.977527Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }, by path# { Subscriber: { Subscriber: [1:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:20:50.977571Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:16:2063], cacheItem# { Subscriber: { Subscriber: [1:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:50.977628Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:217:2205], recipient# [1:209:2178], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:50.977649 ... h: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:51.212539Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:51.212552Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:51.212565Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [2:209:2178], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:51.212568Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:51.212589Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:51.212593Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host1:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:51.212621Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v2 host1:1001 to database state=Active resolvehost=host1.host1.host1 address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:51.212667Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:51.212673Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 1 to 2 2025-06-30T09:20:51.212676Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=2 2025-06-30T09:20:51.223428Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:51.223463Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:51.223473Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 1 to 2 2025-06-30T09:20:51.223480Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v2 host1:1001 to epoch cache 2025-06-30T09:20:51.223506Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v2 to update nodes log 2025-06-30T09:20:51.223549Z node 2 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200024000 Name: "slot-0" } 2025-06-30T09:20:51.223646Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [2:221:2209], Recipient [2:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.223664Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [2:206:2201], Recipient [2:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:51.223670Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:51.223677Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:51.223713Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:51.223751Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:16:2063], cacheItem# { Subscriber: { Subscriber: [2:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:51.223794Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:223:2210], recipient# [2:222:2178], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:51.223804Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:51.223815Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:51.223826Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [2:222:2178], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:51.223829Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:51.223839Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:51.223843Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host2:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:51.223866Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v3 host2:1001 to database state=Active resolvehost=host2.host2.host2 address=1.2.3.5 dc=1 location=DC=1/M=2/R=3/U=5/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:51.223906Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:51.223927Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 2 to 3 2025-06-30T09:20:51.223930Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=3 2025-06-30T09:20:51.234665Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:51.234687Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:51.234695Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 2 to 3 2025-06-30T09:20:51.234700Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v3 host2:1001 to epoch cache 2025-06-30T09:20:51.234722Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v3 to update nodes log 2025-06-30T09:20:51.234782Z node 2 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 7200024000 Name: "slot-1" } ... waiting for cache miss 2025-06-30T09:20:51.234844Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1024 Deadline: 1.107024s } 2025-06-30T09:20:51.234853Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:649: New cache miss: nodeId# 1024, deadline# 1.107024s 2025-06-30T09:20:51.234856Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:653: Schedule wakeup for new earliest deadline 1.107024s 2025-06-30T09:20:51.234862Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1025 Deadline: 1.107024s } 2025-06-30T09:20:51.234864Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:649: New cache miss: nodeId# 1025, deadline# 1.107024s ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) 2025-06-30T09:20:51.245073Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:51.245112Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:51.245128Z node 2 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v1 -> v3 to [2:18:2065] ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 2025-06-30T09:20:51.327099Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:896: HandleWakeup at 1.108024s 2025-06-30T09:20:51.327138Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:134: Cache miss failed: nodeId=1024, error=Deadline exceeded 2025-06-30T09:20:51.327153Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:134: Cache miss failed: nodeId=1025, error=Deadline exceeded ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/console/ut/unittest >> FeatureFlagsConfiguratorTest::TestFeatureFlagsUpdates [GOOD] Test command err: 2025-06-30T09:20:41.035117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:41.035142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:41.035146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:41.035150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:41.035161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:41.035165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:41.035172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:41.035198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:41.035294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:41.035364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:41.038128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:41.038150Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:41.041620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:41.041704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:41.041749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046578944 2025-06-30T09:20:41.043168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:41.043283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:41.043406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.043519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: dc-1, pathId: [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:41.044193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:41.044230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:41.044421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:41.044432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:41.044547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:41.044560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046578944, domainId: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:41.044567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:41.044582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.087406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "hdd" } StoragePools { Name: "" Kind: "hdd-3" } StoragePools { Name: "" Kind: "hdd-1" } StoragePools { Name: "" Kind: "hdd-2" } } } TxId: 1 TabletId: 72057594046578944 , at schemeshard: 72057594046578944 2025-06-30T09:20:41.087489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //dc-1, opId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.087568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 0 2025-06-30T09:20:41.087578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046578944, LocalPathId: 1] source path: 2025-06-30T09:20:41.087627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046578944 2025-06-30T09:20:41.087644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:41.088381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046578944 PathId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.088437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //dc-1 2025-06-30T09:20:41.088489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.088500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046578944 2025-06-30T09:20:41.088507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:41.088514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:41.088973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.088987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046578944 2025-06-30T09:20:41.088994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:41.089366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.089379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.089387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.089395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:41.090226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046578944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:41.090699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046578944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:41.090773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:41.091009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.091017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:20:41.091031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.323182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.323259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046578944, at schemeshard: 72057594046578944 2025-06-30T09:20:41.323273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.323378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:41.323390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.323431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 1 2025-06-30T09:20:41.323446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:41.328034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:41.328065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046578944, txId: 1, path id: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:41.32812 ... 25-06-30T09:20:51.029661Z node 24 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:51.029685Z node 24 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:51.029707Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:193: StateInit, received event# 273481728, Sender [24:409:2364], Recipient [24:408:2363]: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionRequest 2025-06-30T09:20:51.029712Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:202: StateInit, processing event TEvConfigsDispatcher::TEvSetConfigSubscriptionRequest 2025-06-30T09:20:51.029767Z node 24 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:51.029773Z node 24 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:51.029776Z node 24 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:51.029787Z node 24 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[24:418:2370] 2025-06-30T09:20:51.029808Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:193: StateInit, received event# 273481728, Sender [24:412:2362], Recipient [24:408:2363]: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionRequest 2025-06-30T09:20:51.029811Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:202: StateInit, processing event TEvConfigsDispatcher::TEvSetConfigSubscriptionRequest 2025-06-30T09:20:51.030116Z node 24 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:51.030121Z node 24 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [24:409:2364] 2025-06-30T09:20:51.030230Z node 24 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[24:418:2370]} 2025-06-30T09:20:51.030246Z node 24 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:51.030252Z node 24 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:51.030254Z node 24 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:51.031817Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:193: StateInit, received event# 273481728, Sender [24:444:2377], Recipient [24:408:2363]: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionRequest 2025-06-30T09:20:51.031832Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:202: StateInit, processing event TEvConfigsDispatcher::TEvSetConfigSubscriptionRequest 2025-06-30T09:20:51.043682Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:193: StateInit, received event# 273481728, Sender [24:467:2404], Recipient [24:408:2363]: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionRequest 2025-06-30T09:20:51.043700Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:202: StateInit, processing event TEvConfigsDispatcher::TEvSetConfigSubscriptionRequest 2025-06-30T09:20:51.055373Z node 24 :BS_CONTROLLER DEBUG: {BSC19@console_interaction.cpp:74} Console proposed config response Response# {Status: ReverseCommit ConsoleConfigVersion: 0 YAML: "" } 2025-06-30T09:20:51.065742Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:193: StateInit, received event# 273285146, Sender [24:414:2363], Recipient [24:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Generation: 1 Config { FeatureFlags { EnableExternalHive: false EnableColumnStatistics: false EnableScaleRecommender: true } } RawConsoleConfig { } } 2025-06-30T09:20:51.065762Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:199: StateInit, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-30T09:20:51.065795Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: AllowEditYamlInUiItem 2025-06-30T09:20:51.065808Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [24:444:2377]: Config { } ItemKinds: 75 Local: true 2025-06-30T09:20:51.065841Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: FeatureFlagsItem 2025-06-30T09:20:51.065853Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [24:467:2404]: Config { FeatureFlags { EnableExternalHive: false EnableColumnStatistics: false EnableScaleRecommender: true } } ItemKinds: 26 Local: true 2025-06-30T09:20:51.065869Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: MonitoringConfigItem 2025-06-30T09:20:51.065874Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [24:412:2362]: Config { } ItemKinds: 10 Local: true 2025-06-30T09:20:51.065877Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: MonitoringConfigItem 2025-06-30T09:20:51.065883Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [24:409:2364]: Config { } ItemKinds: 10 Local: true 2025-06-30T09:20:51.066559Z node 24 :TENANT_POOL DEBUG: tenant_pool.cpp:486: TDomainTenantPool(dc-1) Got new monitoring config: 2025-06-30T09:20:51.066596Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [24:409:2364], Recipient [24:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:51.066601Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:51.066613Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [24:412:2362], Recipient [24:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:51.066616Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:51.066624Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [24:444:2377], Recipient [24:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:51.066627Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:51.066635Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [24:467:2404], Recipient [24:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:51.066638Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:51.088054Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273285146, Sender [24:414:2363], Recipient [24:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Generation: 1 Config { FeatureFlags { EnableExternalHive: false } Version { Items { Kind: 26 Id: 1 Generation: 1 } } } AffectedKinds: 26 RawConsoleConfig { FeatureFlags { EnableExternalHive: false } Version { Items { Kind: 26 Id: 1 Generation: 1 } } } } 2025-06-30T09:20:51.088079Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:221: StateWork, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-30T09:20:51.088115Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: FeatureFlagsItem 2025-06-30T09:20:51.088134Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [24:467:2404]: Config { FeatureFlags { EnableExternalHive: false } } ItemKinds: 26 Local: true 2025-06-30T09:20:51.088171Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [24:467:2404], Recipient [24:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:51.088177Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:51.109800Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273285146, Sender [24:414:2363], Recipient [24:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Generation: 1 Config { FeatureFlags { EnableExternalHive: false EnableDataShardVolatileTransactions: false } Version { Items { Kind: 26 Id: 1 Generation: 1 } Items { Kind: 26 Id: 2 Generation: 1 } } } AffectedKinds: 26 RawConsoleConfig { FeatureFlags { EnableExternalHive: false EnableDataShardVolatileTransactions: false } Version { Items { Kind: 26 Id: 1 Generation: 1 } Items { Kind: 26 Id: 1 Generation: 1 } Items { Kind: 26 Id: 2 Generation: 1 } } } } 2025-06-30T09:20:51.109849Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:221: StateWork, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-30T09:20:51.109883Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: FeatureFlagsItem 2025-06-30T09:20:51.109902Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [24:467:2404]: Config { FeatureFlags { EnableExternalHive: false EnableDataShardVolatileTransactions: false } } ItemKinds: 26 Local: true 2025-06-30T09:20:51.109945Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [24:467:2404], Recipient [24:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:51.109950Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:20:51.131537Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273285146, Sender [24:414:2363], Recipient [24:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Generation: 1 Config { FeatureFlags { EnableVolatileTransactionArbiters: false } Version { Items { Kind: 26 Id: 3 Generation: 1 } } } AffectedKinds: 26 RawConsoleConfig { FeatureFlags { EnableVolatileTransactionArbiters: false } Version { Items { Kind: 26 Id: 1 Generation: 1 } Items { Kind: 26 Id: 1 Generation: 1 } Items { Kind: 26 Id: 2 Generation: 1 } Items { Kind: 26 Id: 3 Generation: 1 } } } } 2025-06-30T09:20:51.131564Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:221: StateWork, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-30T09:20:51.131595Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: FeatureFlagsItem 2025-06-30T09:20:51.131617Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [24:467:2404]: Config { FeatureFlags { EnableVolatileTransactionArbiters: false } } ItemKinds: 26 Local: true 2025-06-30T09:20:51.131653Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [24:467:2404], Recipient [24:408:2363]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:20:51.131658Z node 24 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::UpdateNodesLog [GOOD] Test command err: 2025-06-30T09:20:47.669320Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.672645Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.672690Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.672719Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.672751Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.672780Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.672799Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.676673Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.676735Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.676770Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.676802Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.676841Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.676872Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.676894Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.676963Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.676998Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.677013Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.677908Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.677936Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.677952Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.677969Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.677983Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.678027Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.679080Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.679214Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.679277Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.679328Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.679377Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.679403Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.679426Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.679445Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.680194Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.680316Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.680349Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.680371Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.680391Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.680430Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.680460Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:47.680497Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.680522Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.681006Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.681054Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.681125Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.681145Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.681158Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.681170Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.681269Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.684036Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.684060Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.684159Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.684186Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.684660Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.684694Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.685035Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.685260Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.685388Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.685425Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.685482Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.685537Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.685628Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.686366Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.686511Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.687535Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.687707Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.689518Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.689647Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.691469Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.691537Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:47.702394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:47.702417Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:47.706077Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:47.706413Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:47.706472Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:47.706611Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:47.707538Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:47.707624Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:47.707694Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:47.707714Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Star ... kimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.896172Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:937:2399] Leader: 1 Dead: 0 Generation: 4 VersionInfo:  } 2025-06-30T09:20:49.896192Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:938:2400] Leader: 1 Dead: 0 Generation: 4 VersionInfo:  } 2025-06-30T09:20:49.896230Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:938:2400], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.896241Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:940:2402] Leader: 1 Dead: 0 Generation: 4 VersionInfo:  } 2025-06-30T09:20:49.896254Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:941:2403] Leader: 1 Dead: 0 Generation: 4 VersionInfo:  } 2025-06-30T09:20:49.896266Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:942:2404] Leader: 1 Dead: 0 Generation: 4 VersionInfo:  } 2025-06-30T09:20:49.896280Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:940:2402], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.896319Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:941:2403], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.896334Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:942:2404], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.896412Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [3:83:2072], Recipient [1:937:2399] 2025-06-30T09:20:49.896418Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.896446Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #6 2025-06-30T09:20:49.896457Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [4:112:2072], Recipient [1:938:2400] 2025-06-30T09:20:49.896461Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.896465Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #6 2025-06-30T09:20:49.896473Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:940:2402] 2025-06-30T09:20:49.896477Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.896481Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #6 2025-06-30T09:20:49.896489Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:941:2403] 2025-06-30T09:20:49.896493Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.896498Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #6 2025-06-30T09:20:49.896506Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:942:2404] 2025-06-30T09:20:49.896510Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.896514Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #6 2025-06-30T09:20:49.896684Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:970:2427], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.896718Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2215], Recipient [1:928:2393]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:49.896722Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:49.896733Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #5.14 1970-01-01T04:00:00.024000Z - 1970-01-01T05:00:00.024000Z - 1970-01-01T06:00:00.024000Z 2025-06-30T09:20:49.896804Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:972:2429], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.896831Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:641:2215], Recipient [1:928:2393]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 14 SeqNo: 21 } 2025-06-30T09:20:49.896839Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:49.896847Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:641:2215], seqNo: 21, version: 14, server pipe id: [1:972:2429] 2025-06-30T09:20:49.896855Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v14 -> v14 to [1:641:2215] 2025-06-30T09:20:49.896917Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:972:2429], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:49.896924Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:641:2215], seqNo: 21, server pipe id: [1:972:2429] 2025-06-30T09:20:49.896950Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:974:2431], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.896967Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:641:2215], Recipient [1:928:2393]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 13 SeqNo: 22 } 2025-06-30T09:20:49.896971Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:49.896976Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:641:2215], seqNo: 22, version: 13, server pipe id: [1:974:2431] 2025-06-30T09:20:49.896981Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v13 -> v14 to [1:641:2215] 2025-06-30T09:20:49.897035Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:974:2431], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:49.897040Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:641:2215], seqNo: 22, server pipe id: [1:974:2431] 2025-06-30T09:20:49.897064Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:976:2433], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.897079Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:641:2215], Recipient [1:928:2393]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 12 SeqNo: 23 } 2025-06-30T09:20:49.897083Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:49.897087Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:641:2215], seqNo: 23, version: 12, server pipe id: [1:976:2433] 2025-06-30T09:20:49.897092Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v12 -> v14 to [1:641:2215] 2025-06-30T09:20:49.897141Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:976:2433], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:49.897146Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:641:2215], seqNo: 23, server pipe id: [1:976:2433] 2025-06-30T09:20:49.897167Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:978:2435], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.897183Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:641:2215], Recipient [1:928:2393]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 11 SeqNo: 24 } 2025-06-30T09:20:49.897186Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:49.897192Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:641:2215], seqNo: 24, version: 11, server pipe id: [1:978:2435] 2025-06-30T09:20:49.897197Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v11 -> v14 to [1:641:2215] 2025-06-30T09:20:49.897248Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:978:2435], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:49.897253Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:641:2215], seqNo: 24, server pipe id: [1:978:2435] 2025-06-30T09:20:49.897275Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:980:2437], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.897290Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:641:2215], Recipient [1:928:2393]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 10 SeqNo: 25 } 2025-06-30T09:20:49.897293Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:49.897297Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:641:2215], seqNo: 25, version: 10, server pipe id: [1:980:2437] 2025-06-30T09:20:49.897302Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v10 -> v14 to [1:641:2215] 2025-06-30T09:20:49.897352Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:980:2437], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:49.897357Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:641:2215], seqNo: 25, server pipe id: [1:980:2437] 2025-06-30T09:20:49.897379Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:982:2439], Recipient [1:928:2393]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:49.897394Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:641:2215], Recipient [1:928:2393]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 0 SeqNo: 26 } 2025-06-30T09:20:49.897399Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:49.897403Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:641:2215], seqNo: 26, version: 0, server pipe id: [1:982:2439] 2025-06-30T09:20:49.897408Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v0 -> v14 to [1:641:2215] >> BuildStatsHistogram::Benchmark [GOOD] >> BuildStatsHistogram::Many_Mixed >> TNodeBrokerTest::SingleDomainModeBannedIds ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:234:2060] recipient: [1:228:2144] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:234:2060] recipient: [1:228:2144] Leader for TabletID 72057594046678944 is [1:245:2155] sender: [1:246:2060] recipient: [1:228:2144] 2025-06-30T09:19:57.920993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:19:57.921013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:19:57.921017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:19:57.921021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:19:57.921025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:19:57.921028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:19:57.921035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:19:57.921046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:19:57.921136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:19:57.921210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:19:57.934105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:19:57.934131Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:57.936853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:19:57.937187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:19:57.937232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:19:57.942176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:19:57.942336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:19:57.942447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:57.942541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:19:57.943514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:57.943566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:19:57.943845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:57.943856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:57.943895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:19:57.943903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:19:57.943910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:19:57.943946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:19:57.945846Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:245:2155] sender: [1:359:2060] recipient: [1:17:2064] 2025-06-30T09:19:57.966934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:19:57.967000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:57.967063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:19:57.967072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:19:57.967127Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:19:57.967142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:57.968210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:57.968257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:19:57.968335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:57.968356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:19:57.968362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:19:57.968368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:19:57.968885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:57.968898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:19:57.968906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:19:57.969270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:57.969281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:57.969287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:57.969295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:19:57.970001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:19:57.970431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:19:57.970470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:19:57.970645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:57.970668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 252 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:57.970675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:57.970762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:19:57.970770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:57.970811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:19:57.970825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:19:57.971264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:57.971273Z node 1 :FLAT_TX_SCHEMESHARD ... RD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 2/3, is published: true 2025-06-30T09:20:51.084031Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:20:51.084035Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:20:51.084039Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 104:0 2025-06-30T09:20:51.084048Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [7:997:2754] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 104 at schemeshard: 72057594046678944 2025-06-30T09:20:51.084063Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [7:247:2156], Recipient [7:997:2754]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 104 2025-06-30T09:20:51.084067Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-30T09:20:51.084071Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 104 datashard 72075186233409551 state Ready 2025-06-30T09:20:51.084077Z node 7 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186233409551 Got TEvSchemaChangedResult from SS at 72075186233409551 2025-06-30T09:20:51.084108Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [7:247:2156], Recipient [7:247:2156]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:20:51.084112Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:20:51.084117Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:20:51.084121Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-30T09:20:51.084128Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:20:51.084131Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 3/3 2025-06-30T09:20:51.084135Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-30T09:20:51.084139Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 3/3 2025-06-30T09:20:51.084142Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-30T09:20:51.084146Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2025-06-30T09:20:51.084158Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:593:2408] message: TxId: 104 2025-06-30T09:20:51.084164Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-30T09:20:51.084170Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-30T09:20:51.084175Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:0 2025-06-30T09:20:51.084206Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 4 2025-06-30T09:20:51.084214Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:1 2025-06-30T09:20:51.084218Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:1 2025-06-30T09:20:51.084222Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 11] was 3 2025-06-30T09:20:51.084227Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:2 2025-06-30T09:20:51.084230Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:2 2025-06-30T09:20:51.084237Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 3 2025-06-30T09:20:51.084694Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:20:51.084721Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:20:51.084737Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [7:593:2408] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 104 at schemeshard: 72057594046678944 2025-06-30T09:20:51.084771Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-30T09:20:51.084778Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [7:1049:2791] 2025-06-30T09:20:51.084829Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [7:1051:2793], Recipient [7:247:2156]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:51.084836Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:51.084841Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-30T09:20:51.085054Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [8:567:2104], Recipient [7:247:2156] 2025-06-30T09:20:51.085061Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:20:51.085895Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/tmp" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "NotTempTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Utf8" } KeyColumnNames: "key" } IndexDescription { Name: "ValueIndex" KeyColumnNames: "value" } } AllowCreateInTempDir: false } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:51.085994Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp:65, at schemeshard: 72057594046678944 2025-06-30T09:20:51.086005Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 105:1, propose status:StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp:65, at schemeshard: 72057594046678944 2025-06-30T09:20:51.100739Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:20:51.101835Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 105, response: Status: StatusPreconditionFailed Reason: "Check failed: path: \'/MyRoot/tmp\', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp:65" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:51.101909Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp:65, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/tmp/NotTempTable 2025-06-30T09:20:51.101918Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-30T09:20:51.102063Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-30T09:20:51.102074Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-30T09:20:51.102156Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [7:1120:2862], Recipient [7:247:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.102164Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.102167Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:20:51.102191Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124996, Sender [7:593:2408], Recipient [7:247:2156]: NKikimrScheme.TEvNotifyTxCompletion TxId: 105 2025-06-30T09:20:51.102198Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4973: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-30T09:20:51.102218Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-30T09:20:51.102261Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-30T09:20:51.102268Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [7:1118:2860] 2025-06-30T09:20:51.102296Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [7:1120:2862], Recipient [7:247:2156]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:51.102302Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:51.102307Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 >> TConsoleTests::TestSetDefaultComputationalUnitsQuota [GOOD] >> TConsoleTests::TestTenantConfigConsistency >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlResend [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApply >> TNodeBrokerTest::ListNodesEpochDeltasPersistance [GOOD] >> TNodeBrokerTest::NodesMigrationReuseIDThenExtendLease [GOOD] >> TNodeBrokerTest::ShiftIdRangeRemoveExpired >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItemScope [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRemovedConfigItem >> TPartBtreeIndexIteration::FewNodes [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups >> TConsoleTests::TestCreateSharedTenant [GOOD] >> TConsoleTests::TestCreateServerlessTenant ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ListNodesEpochDeltasPersistance [GOOD] Test command err: 2025-06-30T09:20:49.095792Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.099521Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.099579Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.099636Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.099679Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.099715Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.099741Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.103571Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.103632Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.103668Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.103705Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.103749Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.103775Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.103797Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.103861Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.103897Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.103919Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.104828Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.104855Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.104874Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.104891Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.104909Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.104958Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.105878Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.105989Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.106039Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.106071Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.106109Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.106127Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.106145Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.106158Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.106751Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.106850Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.106877Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.106931Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.106958Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.106995Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.107026Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.107065Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.107090Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.107670Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.107704Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.107760Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.107780Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.107791Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.107803Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.107897Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.110845Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.110897Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.110943Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.110951Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.111467Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.111714Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.111964Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.112182Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.112195Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.112274Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.112355Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.112641Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.112769Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.113653Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.113950Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.114028Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.114236Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.115135Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.115363Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.116490Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.116704Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.127603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:49.127623Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:49.131437Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:49.131832Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:49.131905Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:49.132108Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:49.132479Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:49.132503Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:49.132547Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:49.132563Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Star ... broker_impl.h:249: StateWork, received event# 269877761, Sender [1:1001:2496], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.467202Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:639:2213], Recipient [1:938:2441]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 10 SeqNo: 27 } 2025-06-30T09:20:50.467206Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:50.467210Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:639:2213], seqNo: 27, version: 10, server pipe id: [1:1001:2496] 2025-06-30T09:20:50.467215Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v10 -> v10 to [1:639:2213] 2025-06-30T09:20:50.467262Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:1001:2496], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:50.467266Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:639:2213], seqNo: 27, server pipe id: [1:1001:2496] 2025-06-30T09:20:50.467298Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:1003:2498], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.467318Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:639:2213], Recipient [1:938:2441]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 9 SeqNo: 28 } 2025-06-30T09:20:50.467323Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:50.467328Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:639:2213], seqNo: 28, version: 9, server pipe id: [1:1003:2498] 2025-06-30T09:20:50.467332Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v9 -> v10 to [1:639:2213] 2025-06-30T09:20:50.467390Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:1003:2498], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:50.467394Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:639:2213], seqNo: 28, server pipe id: [1:1003:2498] 2025-06-30T09:20:50.467417Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:1005:2500], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.467427Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:639:2213], Recipient [1:938:2441]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 8 SeqNo: 29 } 2025-06-30T09:20:50.467429Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:50.467432Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:639:2213], seqNo: 29, version: 8, server pipe id: [1:1005:2500] 2025-06-30T09:20:50.467435Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v8 -> v10 to [1:639:2213] 2025-06-30T09:20:50.467477Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:1005:2500], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:50.467480Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:639:2213], seqNo: 29, server pipe id: [1:1005:2500] 2025-06-30T09:20:50.467500Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:1007:2502], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.467510Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:639:2213], Recipient [1:938:2441]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 7 SeqNo: 30 } 2025-06-30T09:20:50.467512Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:50.467515Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:639:2213], seqNo: 30, version: 7, server pipe id: [1:1007:2502] 2025-06-30T09:20:50.467518Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v7 -> v10 to [1:639:2213] 2025-06-30T09:20:50.467559Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:1007:2502], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:50.467562Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:639:2213], seqNo: 30, server pipe id: [1:1007:2502] 2025-06-30T09:20:50.467579Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:1009:2504], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.467590Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:639:2213], Recipient [1:938:2441]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 6 SeqNo: 31 } 2025-06-30T09:20:50.467593Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:50.467596Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:639:2213], seqNo: 31, version: 6, server pipe id: [1:1009:2504] 2025-06-30T09:20:50.467598Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v6 -> v10 to [1:639:2213] 2025-06-30T09:20:50.467648Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:1009:2504], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:50.467652Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:639:2213], seqNo: 31, server pipe id: [1:1009:2504] 2025-06-30T09:20:50.467677Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:1011:2506], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.467686Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:639:2213], Recipient [1:938:2441]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 5 SeqNo: 32 } 2025-06-30T09:20:50.467689Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:50.467692Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:639:2213], seqNo: 32, version: 5, server pipe id: [1:1011:2506] 2025-06-30T09:20:50.467695Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v5 -> v10 to [1:639:2213] 2025-06-30T09:20:50.467738Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:1011:2506], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:50.467741Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:639:2213], seqNo: 32, server pipe id: [1:1011:2506] 2025-06-30T09:20:50.467762Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:1013:2508], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.467773Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:639:2213], Recipient [1:938:2441]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 4 SeqNo: 33 } 2025-06-30T09:20:50.467777Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:50.467779Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:639:2213], seqNo: 33, version: 4, server pipe id: [1:1013:2508] 2025-06-30T09:20:50.467782Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v4 -> v10 to [1:639:2213] 2025-06-30T09:20:50.467823Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:1013:2508], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:50.467827Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:639:2213], seqNo: 33, server pipe id: [1:1013:2508] 2025-06-30T09:20:50.467849Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:1015:2510], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.467858Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:639:2213], Recipient [1:938:2441]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 3 SeqNo: 34 } 2025-06-30T09:20:50.467861Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:50.467864Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:639:2213], seqNo: 34, version: 3, server pipe id: [1:1015:2510] 2025-06-30T09:20:50.467866Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v3 -> v10 to [1:639:2213] 2025-06-30T09:20:50.467910Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:1015:2510], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:50.467913Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:639:2213], seqNo: 34, server pipe id: [1:1015:2510] 2025-06-30T09:20:50.467931Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:1017:2512], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.467943Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:639:2213], Recipient [1:938:2441]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 2 SeqNo: 35 } 2025-06-30T09:20:50.467946Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:50.467949Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:639:2213], seqNo: 35, version: 2, server pipe id: [1:1017:2512] 2025-06-30T09:20:50.467952Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v10 to [1:639:2213] 2025-06-30T09:20:50.467992Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:1017:2512], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:50.467995Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:639:2213], seqNo: 35, server pipe id: [1:1017:2512] 2025-06-30T09:20:50.468014Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:1019:2514], Recipient [1:938:2441]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.468028Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:639:2213], Recipient [1:938:2441]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 1 SeqNo: 36 } 2025-06-30T09:20:50.468031Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:50.468035Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:639:2213], seqNo: 36, version: 1, server pipe id: [1:1019:2514] 2025-06-30T09:20:50.468038Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v1 -> v10 to [1:639:2213] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationReuseIDThenExtendLease [GOOD] Test command err: 2025-06-30T09:20:50.094855Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.098670Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.098706Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.098749Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.098787Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.098813Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.098830Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.102293Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.102364Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.102411Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.102449Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.102484Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.102514Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.102537Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.102609Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.102643Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.102659Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.103602Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.103630Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.103649Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.103667Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.103685Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.103735Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.104714Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.104808Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.104851Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.104882Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.104915Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.104930Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.104946Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.104958Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.105593Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.105676Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.105700Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.105722Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.105740Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.105775Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.105804Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.105926Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.105953Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.106344Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.106523Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.106537Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.106614Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.106635Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.106683Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.109629Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.110123Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.110146Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.110175Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.110201Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.110210Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.110521Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.110691Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.111020Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.111055Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.111175Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.111284Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.111363Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.111484Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.111701Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.111934Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.112497Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.112634Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.112711Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.112812Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.112862Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.112943Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.113073Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.113182Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.113262Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.113320Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.126919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:50.126941Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:50.131615Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:50.131986Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTab ... 6 to 7 2025-06-30T09:20:50.558374Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:50.558393Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:50.558397Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 1 2025-06-30T09:20:50.558401Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #4.7 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.558415Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #4.6 2025-06-30T09:20:50.558427Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v7 host2:1001 to database state=Active resolvehost=host2.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=2 expire=Thu, 01 Jan 1970 05:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:50.558472Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:50.570421Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:50.570469Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:50.570481Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #4.7 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z, approximate epoch start #4.6 nodes=1 expired=0 2025-06-30T09:20:50.570512Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##4.7 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z nodes=1 expired=0 removed=0 2025-06-30T09:20:50.570519Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v7 to update nodes log 2025-06-30T09:20:50.570651Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:720:2260], Recipient [1:709:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.570778Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:720:2260] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:50.570797Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:721:2261] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:50.570812Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:723:2263] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:50.570825Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:721:2261], Recipient [1:709:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.570859Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:723:2263], Recipient [1:709:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.570868Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:724:2264] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:50.570893Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:724:2264], Recipient [1:709:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.570905Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:725:2265] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:50.570936Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:725:2265], Recipient [1:709:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.571019Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [2:54:2072], Recipient [1:720:2260] 2025-06-30T09:20:50.571026Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.571038Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.571056Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [4:112:2072], Recipient [1:721:2261] 2025-06-30T09:20:50.571062Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.571070Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.571117Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:723:2263] 2025-06-30T09:20:50.571123Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.571130Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.571158Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:724:2264] 2025-06-30T09:20:50.571162Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.571169Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.571201Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:725:2265] 2025-06-30T09:20:50.571206Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.571213Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.571421Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:753:2288], Recipient [1:709:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.571443Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:643:2213], Recipient [1:709:2254]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:50.571448Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.571455Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.571528Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:755:2290], Recipient [1:709:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.571549Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:643:2213], Recipient [1:709:2254]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:50.571555Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.571562Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.571635Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:757:2292], Recipient [1:709:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.571655Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:643:2213], Recipient [1:709:2254]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:50.571661Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.571668Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.571737Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:759:2294], Recipient [1:709:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.571761Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:643:2213], Recipient [1:709:2254]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 6 } 2025-06-30T09:20:50.571767Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.571773Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.7 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.571838Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:761:2296], Recipient [1:709:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.571861Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:643:2213], Recipient [1:709:2254]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 6 SeqNo: 2 } 2025-06-30T09:20:50.571867Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:50.571874Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:643:2213], seqNo: 2, version: 6, server pipe id: [1:761:2296] 2025-06-30T09:20:50.571881Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v6 -> v7 to [1:643:2213] 2025-06-30T09:20:50.571949Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:761:2296], Recipient [1:709:2254]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:50.571956Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:643:2213], seqNo: 2, server pipe id: [1:761:2296] 2025-06-30T09:20:50.571997Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:763:2298], Recipient [1:709:2254]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.572016Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:643:2213], Recipient [1:709:2254]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:50.572022Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:50.572054Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 18000024000 Name: "slot-0" } } >> TNodeBrokerTest::NodesMigrationNewActiveNode [GOOD] >> TNodeBrokerTest::NodeNameExpiration [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Query >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApply [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithDb |80.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut >> TSlotIndexesPoolTest::Expansion [GOOD] >> TChargeBTreeIndex::OneNode_Groups [GOOD] >> TChargeBTreeIndex::OneNode_History >> TNodeBrokerTest::SingleDomainModeBannedIds [GOOD] >> TNodeBrokerTest::TestListNodesEpochDeltas [GOOD] >> TNodeBrokerTest::NodesMigration999Nodes >> TNodeBrokerTest::NodesMigrationExpireRemoved [GOOD] >> TSlotIndexesPoolTest::Ranges [GOOD] >> TNodeBrokerTest::Test999NodesSubscribers >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithDb [GOOD] >> BasicUsage::WriteSessionSwitchDatabases [GOOD] |80.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |80.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodeNameExpiration [GOOD] Test command err: 2025-06-30T09:20:49.073483Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.077030Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.077071Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.077097Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.077128Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.077162Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.077186Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.081296Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.081362Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.081398Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.081433Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.081475Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.081505Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.081528Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.081600Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.081645Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.081663Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.082586Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.082616Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.082635Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.082656Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.082672Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.082725Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.084015Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.084144Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.084206Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.084253Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.084301Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.084324Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.084347Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.084367Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.085074Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.085159Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.085186Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.085209Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.085228Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.085265Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.085297Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.085333Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.085358Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.086010Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.086044Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.086101Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.086121Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.086133Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.086162Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.086272Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.090371Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.090428Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.090448Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.090478Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.091337Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.091368Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.091684Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.091979Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.092301Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.092463Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.092631Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.092690Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.092756Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.108911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:49.108934Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:49.113382Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:49.113780Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:49.113853Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:49.114007Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:49.114451Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:49.114482Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:49.114510Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:49.114520Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.114524Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:49.114535Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:49.114560Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:49.114564Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:49.114570Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:49.114574Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.114588Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:49.114593Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:49.146980Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:49.147016Z node 1 :NODE_BROKER TRACE: node_brok ... 00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.957314Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.12 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.957321Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.12 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.957329Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.12 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.957336Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.12 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.957344Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.12 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.978046Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:794:2320], Recipient [1:565:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.978082Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:661:2241], Recipient [1:565:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:50.978087Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.978096Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.12 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:50.978146Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:796:2322], Recipient [1:565:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.978171Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:661:2241], Recipient [1:565:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host5" Port: 19001 ResolveHost: "host5" Address: "" Location { } FixedNodeId: false Path: "/dc-1/my-database" } 2025-06-30T09:20:50.978175Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:50.978181Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host5" Port: 19001 ResolveHost: "host5" Address: "" Location { } FixedNodeId: false Path: "/dc-1/my-database" 2025-06-30T09:20:50.978221Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:23:2070], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/my-database TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:50.978244Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:23:2070], path# /dc-1/my-database, domainOwnerId# 72057594046678944 2025-06-30T09:20:50.978485Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:23:2070], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1/my-database PathId: [OwnerId: 72057594046678944, LocalPathId: 2] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1/my-database" PathDescription { Self { Name: "my-database" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 } 2025-06-30T09:20:50.978519Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:23:2070], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1/my-database PathId: [OwnerId: 72057594046678944, LocalPathId: 2] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1/my-database" PathDescription { Self { Name: "my-database" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 }, by path# { Subscriber: { Subscriber: [1:798:2323] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:20:50.978548Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:23:2070], cacheItem# { Subscriber: { Subscriber: [1:798:2323] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 9 TableKind: 0 Created: 1 CreateStep: 5000001 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] DomainId: [OwnerId: 72057594046678944, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/my-database TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:50.978589Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:805:2324], recipient# [1:797:2185], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/my-database TableId: [72057594046678944:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:50.978599Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1/my-database TableId: [72057594046678944:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:50.978609Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host5" Port: 19001 ResolveHost: "host5" Address: "" Location { } FixedNodeId: false Path: "/dc-1/my-database": scope id# <72057594046678944:2>: serviced subdomain# 72057594046678944:2 2025-06-30T09:20:50.978621Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [1:797:2185], Recipient [1:565:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:50.978625Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:50.978638Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:50.978641Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host5:19001 (not fixed) tenant: /dc-1/my-database 2025-06-30T09:20:50.978657Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v13 host5:19001 to database state=Active resolvehost=host5 address= dc= location= lease=1 expire=Thu, 01 Jan 1970 05:00:00 UTC servicedsubdomain=72057594046678944:2 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:50.978692Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v13 host5:19001 2025-06-30T09:20:50.978697Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 12 to 13 2025-06-30T09:20:50.978700Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=13 2025-06-30T09:20:50.989448Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:50.989469Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v13 host5:19001 2025-06-30T09:20:50.989478Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 12 to 13 2025-06-30T09:20:50.989481Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v13 host5:19001 to epoch cache 2025-06-30T09:20:50.989496Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v13 to update nodes log 2025-06-30T09:20:50.989527Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host5" Port: 19001 ResolveHost: "host5" Address: "" Location { } Expire: 18000023000 Name: "slot-1" } |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut >> TFlatExecutorLeases::BasicsInitialLeaseSleepTimeout [GOOD] >> TNodeBrokerTest::SeveralNodesSubscribersPerPipe >> TNodeBrokerTest::NoEffectBeforeCommit >> TFlatTableDatetime::TestDate [GOOD] >> TNodeBrokerTest::ShiftIdRangeRemoveNew [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPending [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPendingExtSubdomain >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundSnapshot [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotToRegular [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen1 [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionToRegular [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen2 >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen2 [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotPriorityByTime [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionPriorityByTime |80.9%| [LD] {RESULT} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |80.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |80.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TSlotIndexesPoolTest::Expansion [GOOD] |80.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TSlotIndexesPoolTest::Ranges [GOOD] |80.9%| [LD] {RESULT} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |80.9%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> LocalPartition::WithoutPartitionPartitionRelocation [GOOD] >> LocalPartition::DirectWriteWithoutDescribeResourcesPermission >> TVersions::Wreck0Reverse [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionPriorityByTime [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_Default >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_Default [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False [GOOD] >> TAsyncIndexTests::MergeIndexWithReboots[TabletReboots] [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups [GOOD] >> TPartBtreeIndexIteration::FewNodes_History >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_EnableLocalDBFlatIndex_False ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExpireRemoved [GOOD] Test command err: 2025-06-30T09:20:49.598876Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.602616Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.602658Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.602681Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.602710Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.602756Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.602782Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.606503Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.606555Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.606591Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.606622Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.606661Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.606687Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.606708Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.606797Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.606832Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.606854Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.607734Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.607764Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.607784Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.607802Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.607820Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.607869Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.608820Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.608911Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.608952Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.608983Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.609015Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.609029Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.609043Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.609055Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.609582Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609672Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609700Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609717Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609737Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609771Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609797Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.609856Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609883Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.610391Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.610405Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.610423Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.610471Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.610489Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.610534Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.613845Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.613865Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.613941Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.614020Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.614097Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.614277Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.614359Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.614601Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.614763Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.614894Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.615059Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.615151Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.616354Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.616825Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.617800Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.617936Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.618094Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.618449Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.628808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:49.628830Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:49.632492Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:49.632827Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:49.632878Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:49.633013Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:49.633356Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:49.633380Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:49.633412Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:49.633422Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.633425Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:49.633436Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:49.633458Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:49.633461Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:49.633465Z node 1 :NODE_BROKER DEB ... ed main nodes table: Nodes 2025-06-30T09:20:51.406424Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:294: [Dirty] Added expired node #1024.v0 host2:1001 2025-06-30T09:20:51.406454Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1128: [DB] Loaded node #1024.v0 { NodeId: 1024, State: Expired, Version: 0, Host: host2, Port: 1001, ResolveHost: host2.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 05:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:51.406482Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1024.v5 { NodeId: 1024, State: Removed, Version: 5, Host: , Port: 0, ResolveHost: , Address: , Lease: 0, Expire: Thu, 01 Jan 1970 00:00:00 UTC, Location: , AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 0:0 } 2025-06-30T09:20:51.406491Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1229: [DB] Migrating changed node #1024.v8 { NodeId: 1024, State: Expired, Version: 8, Host: host2, Port: 1001, ResolveHost: host2.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 05:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:51.406510Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:51.406555Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:51.406561Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 1, NewVersionUpdateNodes left 0 2025-06-30T09:20:51.406573Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v8 host2:1001 to database state=Expired resolvehost=host2.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 05:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:51.406614Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:51.406619Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #6.8 2025-06-30T09:20:51.418757Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:51.418822Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T06:00:00.023000Z 2025-06-30T09:20:51.418835Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #6.8 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z, approximate epoch start #6.8 nodes=0 expired=1 2025-06-30T09:20:51.418869Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##6.8 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z nodes=0 expired=1 removed=0 2025-06-30T09:20:51.418878Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v8 to update nodes log 2025-06-30T09:20:51.419019Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:783:2298], Recipient [1:774:2292]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.419076Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:785:2300], Recipient [1:774:2292]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.419148Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:783:2298] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:51.419172Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:785:2300] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:51.419188Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:786:2301] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:51.419206Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:786:2301], Recipient [1:774:2292]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.419232Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:787:2302] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:51.419245Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:787:2302], Recipient [1:774:2292]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.419347Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:783:2298] 2025-06-30T09:20:51.419355Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.419369Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.8 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:51.419389Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:785:2300] 2025-06-30T09:20:51.419395Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.419404Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.8 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:51.419419Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:786:2301] 2025-06-30T09:20:51.419424Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.419432Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.8 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:51.419485Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:787:2302] 2025-06-30T09:20:51.419490Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.419498Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.8 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:51.419699Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:815:2325], Recipient [1:774:2292]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.419735Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:774:2292]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:51.419740Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.419747Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.8 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:51.419827Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:817:2327], Recipient [1:774:2292]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.419847Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:774:2292]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:51.419852Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.419860Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.8 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:51.419928Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:819:2329], Recipient [1:774:2292]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.419947Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:774:2292]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:51.419952Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.419959Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.8 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:51.420029Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:821:2331], Recipient [1:774:2292]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.420055Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:774:2292]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 8 } 2025-06-30T09:20:51.420061Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.420069Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.8 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:51.420131Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:823:2333], Recipient [1:774:2292]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.420164Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:635:2213], Recipient [1:774:2292]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 8 SeqNo: 2 } 2025-06-30T09:20:51.420171Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:51.420179Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:635:2213], seqNo: 2, version: 8, server pipe id: [1:823:2333] 2025-06-30T09:20:51.420187Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v8 -> v8 to [1:635:2213] 2025-06-30T09:20:51.420261Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:823:2333], Recipient [1:774:2292]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:51.420268Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:635:2213], seqNo: 2, server pipe id: [1:823:2333] 2025-06-30T09:20:51.420305Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:825:2335], Recipient [1:774:2292]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.420330Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:635:2213], Recipient [1:774:2292]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:51.420339Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:51.420359Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationNewActiveNode [GOOD] Test command err: 2025-06-30T09:20:50.450291Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.454161Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.454214Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.454250Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.454284Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.454318Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.454343Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.458348Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.458422Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.458486Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.458539Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.458606Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.458643Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.458679Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.458785Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.458831Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.458861Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.459809Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.459841Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.459863Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.459884Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.459902Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.459957Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.461054Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.461163Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.461213Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.461248Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.461287Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.461308Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.461333Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.461356Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.461972Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.462065Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.462094Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.462117Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.462138Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.462176Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.462208Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.462247Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.462274Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.462974Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.463018Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.463077Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.463096Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.463109Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.463122Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.463218Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.466577Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.466643Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.467179Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.467195Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.467315Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.467608Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.468032Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.468089Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.468246Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.468272Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.468329Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.468572Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.468710Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.469588Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.469859Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.470061Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.470205Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.471730Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.472015Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.473695Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.473921Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.474319Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.474450Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.493539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:50.493572Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:50.499291Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:50.499818Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:50.499914Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:50.500132Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:50.501008Z node 1 :NODE_BROKER DEBUG: node_ ... urrent epoch: #1.2 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:50.829349Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1029: [DB] Loaded approximate epoch start: #1.1 2025-06-30T09:20:50.829354Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1046: [DB] Loaded main nodes table: Nodes 2025-06-30T09:20:50.829378Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:284: [Dirty] Added node #1024.v0 host1:1001 2025-06-30T09:20:50.829491Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1128: [DB] Loaded node #1024.v0 { NodeId: 1024, State: Active, Version: 0, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:50.829506Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1268: [DB] Migrating new active node #1024.v3 { NodeId: 1024, State: Active, Version: 3, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:50.829513Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 2 to 3 2025-06-30T09:20:50.829526Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:50.829570Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:50.829576Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 1 2025-06-30T09:20:50.829583Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.3 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:50.829606Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v3 host1:1001 to database state=Active resolvehost=host1.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:50.829642Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:50.841451Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:50.841495Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.025000Z 2025-06-30T09:20:50.841507Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.3 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z, approximate epoch start #1.1 nodes=1 expired=0 2025-06-30T09:20:50.841540Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##1.3 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z nodes=1 expired=0 removed=0 2025-06-30T09:20:50.841547Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v3 to update nodes log 2025-06-30T09:20:50.841656Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:688:2237], Recipient [1:680:2234]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.841675Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:692:2241], Recipient [1:680:2234]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.841769Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:688:2237] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:50.841789Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:693:2242] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:50.841802Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:694:2243] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:50.841821Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:693:2242], Recipient [1:680:2234]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.841864Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:694:2243], Recipient [1:680:2234]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.841874Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:692:2241] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:50.841970Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [2:54:2072], Recipient [1:688:2237] 2025-06-30T09:20:50.841976Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.841983Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:50.841996Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [3:83:2072], Recipient [1:693:2242] 2025-06-30T09:20:50.842000Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.842005Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:50.842015Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [4:112:2072], Recipient [1:694:2243] 2025-06-30T09:20:50.842020Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.842025Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:50.842033Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:692:2241] 2025-06-30T09:20:50.842038Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.842043Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:50.842179Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:723:2267], Recipient [1:680:2234]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.842206Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:643:2215], Recipient [1:680:2234]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:50.842211Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.842220Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:50.842288Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:725:2269], Recipient [1:680:2234]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.842307Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:643:2215], Recipient [1:680:2234]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:50.842312Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.842319Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:50.842400Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:727:2271], Recipient [1:680:2234]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.842420Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:643:2215], Recipient [1:680:2234]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:50.842425Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.842432Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:50.842492Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:729:2273], Recipient [1:680:2234]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.842515Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:643:2215], Recipient [1:680:2234]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 2 } 2025-06-30T09:20:50.842520Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:50.842527Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:50.842592Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:731:2275], Recipient [1:680:2234]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.842609Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:643:2215], Recipient [1:680:2234]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 2 SeqNo: 2 } 2025-06-30T09:20:50.842615Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:50.842622Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:643:2215], seqNo: 2, version: 2, server pipe id: [1:731:2275] 2025-06-30T09:20:50.842629Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:643:2215] 2025-06-30T09:20:50.842691Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:731:2275], Recipient [1:680:2234]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:50.842698Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:643:2215], seqNo: 2, server pipe id: [1:731:2275] 2025-06-30T09:20:50.842733Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:733:2277], Recipient [1:680:2234]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:50.842774Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:643:2215], Recipient [1:680:2234]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:50.842780Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:50.842810Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200025000 Name: "slot-0" } } >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_EnableLocalDBFlatIndex_False [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False_EnableLocalDBFlatIndex_False >> TConsoleConfigSubscriptionTests::TestNotificationForRemovedConfigItem [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedClient >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False_EnableLocalDBFlatIndex_False [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_TurnOff ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ShiftIdRangeRemoveNew [GOOD] Test command err: 2025-06-30T09:20:50.786313Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.790102Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.790166Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.790201Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.790241Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.790276Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.790301Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.795468Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.795556Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.795611Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.795658Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.795718Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.795760Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.795794Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.795886Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.795935Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.795964Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.797386Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.797445Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.797479Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.797513Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.797546Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.797634Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.798859Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.798996Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.799045Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.799098Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.799141Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.799159Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.799176Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.799205Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.799866Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.799976Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.800005Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.800031Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.800054Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.800106Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.800142Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.800182Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.800211Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.801011Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.801036Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.801063Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.801149Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.801182Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.801261Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.805917Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.805974Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.805992Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.806795Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.806838Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.807533Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.807664Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.807819Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.807986Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.808039Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.808221Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.808346Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.809142Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.809518Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.826898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:50.826922Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:50.830544Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:50.830968Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:50.831047Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:50.831200Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:50.831898Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:50.832020Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:50.832059Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:50.832072Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:50.832076Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:50.832087Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:50.832109Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:50.832113Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:50.832117Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:50.832120Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:50.832133Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:50.832138Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:50.863966Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:50.863999Z node 1 :NODE_BROKER TRACE: node_brok ... d TEvNodesInfo for epoch #2.8 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z 2025-06-30T09:20:51.914521Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:777:2314], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.914535Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:631:2213], Recipient [1:721:2265]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:51.914540Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.914546Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.8 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z 2025-06-30T09:20:51.914602Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:779:2316], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.914620Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:631:2213], Recipient [1:721:2265]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 5 } 2025-06-30T09:20:51.914625Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.914631Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.8 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z 2025-06-30T09:20:51.914685Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:781:2318], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.914701Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:631:2213], Recipient [1:721:2265]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:51.914709Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.914714Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.8 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z 2025-06-30T09:20:51.914789Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:783:2320], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.914808Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:631:2213], Recipient [1:721:2265]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 4 } 2025-06-30T09:20:51.914813Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.914819Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.8 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z 2025-06-30T09:20:51.914877Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:785:2322], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.914900Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:631:2213], Recipient [1:721:2265]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 8 SeqNo: 2 } 2025-06-30T09:20:51.914907Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:51.914915Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:631:2213], seqNo: 2, version: 8, server pipe id: [1:785:2322] 2025-06-30T09:20:51.914923Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v8 -> v8 to [1:631:2213] 2025-06-30T09:20:51.914987Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:785:2322], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:51.914993Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:631:2213], seqNo: 2, server pipe id: [1:785:2322] 2025-06-30T09:20:51.915015Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:787:2324], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.915033Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:631:2213], Recipient [1:721:2265]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 7 SeqNo: 3 } 2025-06-30T09:20:51.915038Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:51.915042Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:631:2213], seqNo: 3, version: 7, server pipe id: [1:787:2324] 2025-06-30T09:20:51.915048Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v7 -> v8 to [1:631:2213] 2025-06-30T09:20:51.915100Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:787:2324], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:51.915115Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:631:2213], seqNo: 3, server pipe id: [1:787:2324] 2025-06-30T09:20:51.915137Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:789:2326], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.915155Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:631:2213], Recipient [1:721:2265]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 6 SeqNo: 4 } 2025-06-30T09:20:51.915161Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:51.915165Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:631:2213], seqNo: 4, version: 6, server pipe id: [1:789:2326] 2025-06-30T09:20:51.915171Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v6 -> v8 to [1:631:2213] 2025-06-30T09:20:51.915218Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:789:2326], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:51.915224Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:631:2213], seqNo: 4, server pipe id: [1:789:2326] 2025-06-30T09:20:51.915246Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:791:2328], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.915264Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:631:2213], Recipient [1:721:2265]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 5 SeqNo: 5 } 2025-06-30T09:20:51.915269Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:51.915273Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:631:2213], seqNo: 5, version: 5, server pipe id: [1:791:2328] 2025-06-30T09:20:51.915281Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v5 -> v8 to [1:631:2213] 2025-06-30T09:20:51.915336Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:791:2328], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:51.915343Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:631:2213], seqNo: 5, server pipe id: [1:791:2328] 2025-06-30T09:20:51.915367Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:793:2330], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.915383Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:631:2213], Recipient [1:721:2265]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 4 SeqNo: 6 } 2025-06-30T09:20:51.915387Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:51.915391Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:631:2213], seqNo: 6, version: 4, server pipe id: [1:793:2330] 2025-06-30T09:20:51.915397Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v4 -> v8 to [1:631:2213] 2025-06-30T09:20:51.915458Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:793:2330], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:51.915465Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:631:2213], seqNo: 6, server pipe id: [1:793:2330] 2025-06-30T09:20:51.915494Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:795:2332], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.915516Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:631:2213], Recipient [1:721:2265]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:51.915522Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:51.915562Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 10800023000 Name: "slot-0" } } 2025-06-30T09:20:51.915643Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:797:2334], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.915660Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:631:2213], Recipient [1:721:2265]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1025 } 2025-06-30T09:20:51.915665Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:51.915683Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 10800023000 Name: "slot-1" } } 2025-06-30T09:20:51.915739Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:799:2336], Recipient [1:721:2265]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.915757Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:631:2213], Recipient [1:721:2265]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1026 } 2025-06-30T09:20:51.915762Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:51.915770Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::TestListNodesEpochDeltas [GOOD] Test command err: 2025-06-30T09:20:49.605702Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609549Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609602Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609635Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609675Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609711Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.609735Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.614624Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.614697Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.614767Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.614820Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.614878Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.614918Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.614951Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.615033Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.615079Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.615104Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.616069Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.616105Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.616125Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.616144Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.616161Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.616214Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.617085Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.617172Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.617222Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.617252Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.617287Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.617303Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.617318Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.617331Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.617877Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.617957Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.617979Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.617999Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.618016Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.618046Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.618071Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.618102Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.618123Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.618306Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.618616Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.618665Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.618685Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.618695Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.618706Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.621359Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.621408Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.621879Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.621890Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.622244Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.622350Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.622687Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.622726Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.622757Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.622825Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.622896Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.623033Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.623055Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.623188Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.623267Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.623589Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.623930Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.623982Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.624173Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.624259Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.624275Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.624531Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.625176Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.625430Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.636566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:49.636586Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:49.640601Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:49.641021Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:49.641101Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:49.641265Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:49.641625Z node 1 :NODE_BROKER DEBUG: node_ ... 0:00.023000Z 2025-06-30T09:20:51.002282Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:817:2331], Recipient [1:760:2282]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.002290Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:760:2282]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:51.002293Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.002297Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.7 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:51.022754Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:818:2332], Recipient [1:760:2282]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.022790Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:818:2332] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:51.022834Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:25:2072], Recipient [1:760:2282]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { MinEpoch: 4 } 2025-06-30T09:20:51.022838Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.022843Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #4 2025-06-30T09:20:51.033322Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:822:2334], Recipient [1:760:2282]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.033354Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:821:2333], Recipient [1:760:2282]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.033393Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:821:2333] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:51.033408Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:822:2334] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:51.033453Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:821:2333] 2025-06-30T09:20:51.033458Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.033467Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #4 2025-06-30T09:20:51.033474Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:822:2334] 2025-06-30T09:20:51.033477Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.033479Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #4 2025-06-30T09:20:51.255012Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435072, Sender [1:760:2282], Recipient [1:760:2282]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvUpdateEpoch 2025-06-30T09:20:51.255044Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:263: StateWork, processing event TEvPrivate::TEvUpdateEpoch 2025-06-30T09:20:51.255064Z node 1 :NODE_BROKER DEBUG: node_broker__update_epoch.cpp:20: TTxUpdateEpoch Execute 2025-06-30T09:20:51.255076Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:548: [Dirty] Move to new epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z, approximate epoch start #4.8 2025-06-30T09:20:51.255082Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.255110Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #4.8 2025-06-30T09:20:51.419415Z node 1 :NODE_BROKER DEBUG: node_broker__update_epoch.cpp:31: TTxUpdateEpoch Complete 2025-06-30T09:20:51.419451Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:548: [Committed] Move to new epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z, approximate epoch start #4.8 2025-06-30T09:20:51.419471Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:51.419481Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z, approximate epoch start #4.8 nodes=4 expired=0 2025-06-30T09:20:51.419538Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z nodes=4 expired=0 removed=0 2025-06-30T09:20:51.419545Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v4 to update nodes log 2025-06-30T09:20:51.419555Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v5 to update nodes log 2025-06-30T09:20:51.419562Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1026.v6 to update nodes log 2025-06-30T09:20:51.419569Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1027.v7 to update nodes log 2025-06-30T09:20:51.419585Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.419596Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.419605Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.419614Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.419622Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.419631Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.419640Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.419679Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.443242Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:845:2345], Recipient [1:760:2282]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.443316Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:760:2282]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:51.443326Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.443347Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.443457Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:847:2347], Recipient [1:760:2282]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.443480Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:760:2282]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:51.443488Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.443495Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.443564Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:849:2349], Recipient [1:760:2282]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.443584Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:760:2282]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:51.443590Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.443596Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.443661Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:851:2351], Recipient [1:760:2282]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.443693Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:760:2282]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 8 } 2025-06-30T09:20:51.443699Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.443705Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.443770Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:853:2353], Recipient [1:760:2282]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.443791Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:760:2282]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:51.443797Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.443803Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:51.443898Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:855:2355], Recipient [1:760:2282]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:51.443920Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:641:2213], Recipient [1:760:2282]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 6 } 2025-06-30T09:20:51.443927Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:51.443934Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.8 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::SingleDomainModeBannedIds [GOOD] Test command err: 2025-06-30T09:20:51.930711Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.933620Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.934054Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.934301Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.947169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:51.947193Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:51.951107Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:51.951525Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:51.951580Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:51.951762Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:51.952792Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:51.952834Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:51.952873Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:51.952886Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:51.952891Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:51.952904Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:51.952924Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:51.952928Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:51.952931Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:51.952936Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:51.952950Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:51.952955Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:51.974245Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:51.974290Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.025000Z 2025-06-30T09:20:51.974299Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z, approximate epoch start #1.1 nodes=0 expired=0 2025-06-30T09:20:51.974308Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z nodes=0 expired=0 removed=0 2025-06-30T09:20:52.015273Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:204:2199], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:52.015331Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:204:2199] Leader: 1 Dead: 0 Generation: 2 VersionInfo:  } ... waiting for nameservers are connected (done) 2025-06-30T09:20:52.017921Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:18:2065], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { MinEpoch: 1 } 2025-06-30T09:20:52.017960Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:52.017980Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:52.018095Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:208:2203], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:52.018141Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039946, Sender [1:206:2201], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSetConfigRequest { Config { BannedNodeIds { From: 1025 To: 1032 } } } 2025-06-30T09:20:52.018149Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:260: StateWork, processing event TEvNodeBroker::TEvSetConfigRequest 2025-06-30T09:20:52.018179Z node 1 :NODE_BROKER DEBUG: node_broker__update_config.cpp:53: TTxUpdateConfig Execute Config { BannedNodeIds { From: 1025 To: 1032 } } 2025-06-30T09:20:52.018210Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1301: [DB] Update config in database config=BannedNodeIds { From: 1025 To: 1032 } 2025-06-30T09:20:52.029308Z node 1 :NODE_BROKER DEBUG: node_broker__update_config.cpp:85: TTxUpdateConfig Complete 2025-06-30T09:20:52.029371Z node 1 :NODE_BROKER TRACE: node_broker__update_config.cpp:92: TTxUpdateConfig reply with: NKikimr::NNodeBroker::TEvNodeBroker::TEvSetConfigResponse { Status { Code: OK } } 2025-06-30T09:20:52.029511Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:212:2207], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:52.029542Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:206:2201], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:52.029549Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:52.029565Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:52.029614Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:214:2209], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:52.029654Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:206:2201], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:52.029661Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:52.029673Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:52.029743Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:52.029761Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:16:2063], path# /dc-1, domainOwnerId# 72057594046678944 2025-06-30T09:20:52.038208Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 } 2025-06-30T09:20:52.038351Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathEl ... de_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:52.878040Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: ERROR_TEMP Reason: "No free node IDs" } 2025-06-30T09:20:52.878186Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 268829696, Sender [1:167:2174], Recipient [1:173:2178]: NKikimr::TEvTablet::TEvTabletDead 2025-06-30T09:20:52.878217Z node 1 :NODE_BROKER INFO: node_broker.cpp:126: OnTabletDead: 72057594037936129 2025-06-30T09:20:52.878221Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:215: TNodeBroker::Cleanup 2025-06-30T09:20:52.878462Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:672: Handle NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594037936129 ClientId: [1:34:2065] ServerId: [1:204:2199] } 2025-06-30T09:20:52.880205Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:52.881054Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:52.881115Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:52.881589Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:52.881655Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:52.881731Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:52.881818Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:970: [DB] Loaded config: BannedNodeIds { From: 1024 To: 1029 } BannedNodeIds { From: 1031 To: 1032 } 2025-06-30T09:20:52.881850Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1000: [DB] Loaded current epoch: #3.8 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:52.881856Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1029: [DB] Loaded approximate epoch start: #3.8 2025-06-30T09:20:52.881860Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1046: [DB] Loaded main nodes table: Nodes 2025-06-30T09:20:52.881894Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:294: [Dirty] Added expired node #1024.v0 host1:1001 2025-06-30T09:20:52.881926Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1128: [DB] Loaded node #1024.v0 { NodeId: 1024, State: Expired, Version: 0, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:52.881937Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:284: [Dirty] Added node #1030.v0 host3:1001 2025-06-30T09:20:52.881946Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1128: [DB] Loaded node #1030.v0 { NodeId: 1030, State: Active, Version: 0, Host: host3, Port: 1001, ResolveHost: host3.yandex.net, Address: 1.2.3.6, Lease: 2, Expire: Thu, 01 Jan 1970 03:00:00 UTC, Location: DC=1/M=2/R=3/U=6/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 2, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:52.881954Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:284: [Dirty] Added node #1033.v0 host2:1001 2025-06-30T09:20:52.881960Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1128: [DB] Loaded node #1033.v0 { NodeId: 1033, State: Active, Version: 0, Host: host2, Port: 1001, ResolveHost: host2.yandex.net, Address: 1.2.3.5, Lease: 2, Expire: Thu, 01 Jan 1970 03:00:00 UTC, Location: DC=1/M=2/R=3/U=5/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 1, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:52.881987Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1024.v8 { NodeId: 1024, State: Expired, Version: 8, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:52.881993Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1254: [DB] Node #1024.v8 is already migrated 2025-06-30T09:20:52.882005Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1030.v6 { NodeId: 1030, State: Active, Version: 6, Host: host3, Port: 1001, ResolveHost: host3.yandex.net, Address: 1.2.3.6, Lease: 2, Expire: Thu, 01 Jan 1970 03:00:00 UTC, Location: DC=1/M=2/R=3/U=6/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 2, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:52.882009Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1254: [DB] Node #1030.v6 is already migrated 2025-06-30T09:20:52.882019Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1033.v7 { NodeId: 1033, State: Active, Version: 7, Host: host2, Port: 1001, ResolveHost: host2.yandex.net, Address: 1.2.3.5, Lease: 2, Expire: Thu, 01 Jan 1970 03:00:00 UTC, Location: DC=1/M=2/R=3/U=5/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 1, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:52.882023Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1254: [DB] Node #1033.v7 is already migrated 2025-06-30T09:20:52.882042Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:52.882073Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:52.882078Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:52.882083Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:52.882088Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:52.882111Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T03:00:00.025000Z 2025-06-30T09:20:52.882119Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #3.8 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z, approximate epoch start #3.8 nodes=2 expired=1 2025-06-30T09:20:52.882141Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##3.8 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z nodes=2 expired=1 removed=0 2025-06-30T09:20:52.882147Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1030.v6 to update nodes log 2025-06-30T09:20:52.882156Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1033.v7 to update nodes log 2025-06-30T09:20:52.882162Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v8 to update nodes log 2025-06-30T09:20:52.883302Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:361:2318], Recipient [1:327:2291]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:52.883345Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:206:2201], Recipient [1:327:2291]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host4" Port: 1001 ResolveHost: "host4.yandex.net" Address: "1.2.3.7" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "7" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:52.883352Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:52.883363Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host4" Port: 1001 ResolveHost: "host4.yandex.net" Address: "1.2.3.7" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "7" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:52.883417Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:52.883443Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:16:2063], cacheItem# { Subscriber: { Subscriber: [1:311:2287] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:52.883491Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:363:2319], recipient# [1:362:2291], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:52.883506Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:52.883522Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host4" Port: 1001 ResolveHost: "host4.yandex.net" Address: "1.2.3.7" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "7" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:52.883534Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [1:362:2291], Recipient [1:327:2291]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:52.883538Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:52.883549Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:52.883554Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host4:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:52.883563Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host4:1001: ERROR_TEMP: No free node IDs 2025-06-30T09:20:52.883574Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:52.883584Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: ERROR_TEMP Reason: "No free node IDs" } >> TNodeBrokerTest::NodesMigrationReuseExpiredID [GOOD] >> TConsoleTests::TestRemoveTenantWithBorrowedStorageUnits [GOOD] >> TConsoleTests::TestListTenants >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_TurnOff [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_Generations ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionSwitchDatabases [GOOD] Test command err: 2025-06-30T09:18:36.804932Z :WriteSessionNoAvailableDatabase INFO: Random seed for debugging is 1751275116804925 2025-06-30T09:18:36.922176Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669351767627432:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:36.922199Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:36.931815Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669351292306178:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:36.931837Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d30/r3tmp/tmpyR93Lq/pdisk_1.dat 2025-06-30T09:18:36.967386Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:36.972953Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:36.993996Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:37.006991Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 30456, node 1 2025-06-30T09:18:37.020315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:37.020353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:37.022067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:37.022627Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002d30/r3tmp/yandexa9atuU.tmp 2025-06-30T09:18:37.022642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002d30/r3tmp/yandexa9atuU.tmp 2025-06-30T09:18:37.022730Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002d30/r3tmp/yandexa9atuU.tmp 2025-06-30T09:18:37.022822Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:37.029175Z INFO: TTestServer started on Port 64075 GrpcPort 30456 TClient is connected to server localhost:64075 PQClient connected to localhost:30456 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:18:37.067588Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:37.067635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:37.068644Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:37.069357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:37.070106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... waiting... waiting... waiting... 2025-06-30T09:18:37.435810Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669356062595403:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.435850Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.436012Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669356062595439:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.437167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:37.441157Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669356062595473:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.441228Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:37.449318Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669356062595441:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:18:37.507891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.507430Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669355587273757:2273], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:37.508274Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=NmMwM2Y4YzItOTYzMzU5ZGMtZGFiMWNhMmItYzU0N2Y3OGI=, ActorId: [2:7521669355587273717:2267], ActorState: ExecuteState, TraceId: 01jz0236xg3g49557b08xz5snh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:37.508882Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:37.512716Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669356062595566:2673] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:37.517262Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669356062595582:2312], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:37.517815Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=OTI3ODQ4YmUtOTU1ZmEyMDQtNDMyOGJhN2YtNzhkZjUyM2Q=, ActorId: [1:7521669356062595400:2294], ActorState: ExecuteState, TraceId: 01jz0236vndq3r0j4gs0wysz9t, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:37.517974Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:37.581465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:37.671681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first Get ... 51458f3-d0ff7c1c-65178d9f-248bc95_0 2025-06-30T09:20:32.337881Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-30T09:20:32.337900Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1751275232337 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:20:32.337943Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session established. Init response: last_seq_no: 2 session_id: "src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0" supported_codecs { codecs: 1 codecs: 2 codecs: 3 } 2025-06-30T09:20:33.334699Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7521669851634312286:3954] (SourceId=src_id, PreferedPartition=(NULL)) Update the table 2025-06-30T09:20:33.340889Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7521669851634312286:3954] (SourceId=src_id, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=1 Status=SUCCESS 2025-06-30T09:20:33.340910Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7521669851634312286:3954] (SourceId=src_id, PreferedPartition=(NULL)) Start idle 2025-06-30T09:20:36.111863Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=320, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:20:41.112017Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=320, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:20:46.112194Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=320, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:20:51.083651Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186224037893][rt3.dc1--test-topic] TPersQueueReadBalancer::HandleWakeup 2025-06-30T09:20:51.083692Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][rt3.dc1--test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037892 Cookie: 4 2025-06-30T09:20:51.083964Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 TotalPartitions: 1 SourceIdMaxCounts: 6000000 } 2025-06-30T09:20:51.084221Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][rt3.dc1--test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 4 DataSize: 0 UsedReserveSize: 0 2025-06-30T09:20:51.084261Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][rt3.dc1--test-topic] ProcessPendingStats. PendingUpdates size 1 2025-06-30T09:20:51.112439Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=320, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 >>> Ready to answer: ok 2025-06-30T09:20:52.458423Z :DEBUG: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] Write 1 messages with Id from 1 to 1 >>> Got event: ReadyToAcceptEvent 2025-06-30T09:20:52.458831Z :DEBUG: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] Write session: try to update token 2025-06-30T09:20:52.458850Z :DEBUG: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] Send 1 message(s) (0 left), first sequence number is 3 2025-06-30T09:20:52.459161Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-30T09:20:52.459281Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-30T09:20:52.459327Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:20:52.459340Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:20:52.459389Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 1 2025-06-30T09:20:52.459442Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-30T09:20:52.459533Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-30T09:20:52.459540Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-30T09:20:52.459560Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72075186224037892] got client message topic: rt3.dc1--test-topic partition: 0 SourceId: '\0src_id' SeqNo: 3 partNo : 0 messageNo: 1 size 98 offset: -1 2025-06-30T09:20:52.459622Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob processing sourceId '\0src_id' seqNo 3 partNo 0 2025-06-30T09:20:52.459682Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob complete sourceId '\0src_id' seqNo 3 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 172 count 1 nextOffset 3 batches 1 2025-06-30T09:20:52.459743Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Add new write blob: topic 'rt3.dc1--test-topic' partition 0 compactOffset 2,1 HeadOffset 2 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000? size 160 WTime 1751275252459 2025-06-30T09:20:52.459780Z node 3 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:20:52.459800Z node 3 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 160 2025-06-30T09:20:52.460707Z node 3 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 2 count 1 size 160 actorID [3:7521669417842607616:2443] 2025-06-30T09:20:52.460779Z node 3 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' size 160 2025-06-30T09:20:52.460780Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1382: [PQ: 72075186224037892] Topic 'rt3.dc1--test-topic' counters. CacheSize 480 CachedBlobs 3 2025-06-30T09:20:52.460798Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 105 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:20:52.460818Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:20:52.460834Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0src_id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-30T09:20:52.460841Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-30T09:20:52.460863Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-30T09:20:52.460864Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=480, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:20:52.461128Z :DEBUG: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-30T09:20:52.461196Z :DEBUG: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 3 written { offset: 2 } } write_statistics { persisting_time { nanos: 1000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-30T09:20:52.461204Z :DEBUG: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] OnAck: seqNo=1, txId=? 2025-06-30T09:20:52.461208Z :DEBUG: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] Write session: acknoledged message 1 2025-06-30T09:20:52.461578Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0 grpc read done: success: 0 data: 2025-06-30T09:20:52.461587Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0 grpc read failed 2025-06-30T09:20:52.461593Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0 grpc closed 2025-06-30T09:20:52.461598Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0 is DEAD 2025-06-30T09:20:52.461737Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:20:52.461783Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7521669851634312318:3954] destroyed 2025-06-30T09:20:52.461804Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:20:52.462045Z :DEBUG: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 1, Msg: Cancelled on the server side, Details: , InternalError: 0 2025-06-30T09:20:52.462078Z :ERROR: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] Got error. Status: CLIENT_CANCELLED, Description:
: Error: GRpc error: (1): Cancelled on the server side 2025-06-30T09:20:52.462081Z :ERROR: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] Write session will not restart after a fatal error 2025-06-30T09:20:52.462086Z :INFO: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] Write session will now close 2025-06-30T09:20:52.462103Z :DEBUG: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] Write session: aborting 2025-06-30T09:20:52.466716Z :DEBUG: [/Root] TraceId [] SessionId [src_id|51458f3-d0ff7c1c-65178d9f-248bc95_0] MessageGroupId [src_id] Write session: destroy ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/console/ut/unittest >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithDb [GOOD] Test command err: 2025-06-30T09:20:40.679402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:40.679433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:40.679440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:40.679446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:40.679459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:40.679465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:40.679476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:40.679501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:40.679625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:40.679712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:40.684166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:40.684193Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:40.687324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:40.687410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:40.687452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046578944 2025-06-30T09:20:40.689006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:40.689127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:40.689228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:40.689338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: dc-1, pathId: [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:40.690018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:40.690062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:40.690258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:40.690269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:40.690368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:40.690382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046578944, domainId: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:40.690389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:40.690405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046578944 2025-06-30T09:20:40.731036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "hdd" } StoragePools { Name: "" Kind: "hdd-3" } StoragePools { Name: "" Kind: "hdd-1" } StoragePools { Name: "" Kind: "hdd-2" } } } TxId: 1 TabletId: 72057594046578944 , at schemeshard: 72057594046578944 2025-06-30T09:20:40.731116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //dc-1, opId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:40.731188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 0 2025-06-30T09:20:40.731196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046578944, LocalPathId: 1] source path: 2025-06-30T09:20:40.731236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046578944 2025-06-30T09:20:40.731251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:40.732148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046578944 PathId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:40.732207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //dc-1 2025-06-30T09:20:40.732258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:40.732270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046578944 2025-06-30T09:20:40.732276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:40.732282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:40.732732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:40.732745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046578944 2025-06-30T09:20:40.732750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:40.733161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:40.733173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:40.733179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:40.733187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:40.733872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046578944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:40.734320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046578944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:40.734374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:40.734593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:40.734601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:20:40.734606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:40.959914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:40.959981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046578944, at schemeshard: 72057594046578944 2025-06-30T09:20:40.959995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:40.960081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:40.960093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:40.960132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 1 2025-06-30T09:20:40.960146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:40.960866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:40.960883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046578944, txId: 1, path id: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:40.96093 ... TxTrimUnusedSlots 2025-06-30T09:20:52.590149Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:52.590166Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:52.613121Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:52.613167Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:52.624080Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:52.624137Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:52.624153Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:52.624162Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:52.624189Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:52.624196Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:52.624201Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:52.624207Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:52.635060Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:52.635108Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:52.645983Z node 24 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:52.646036Z node 24 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:20:52.646268Z node 24 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:20:52.646277Z node 24 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:20:52.646370Z node 24 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:20:52.646376Z node 24 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:20:52.646664Z node 24 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 24 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 24 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-30T09:20:52.646777Z node 24 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/0043a8/r3tmp/tmp3ouFBZ/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } } } } 2025-06-30T09:20:52.646838Z node 24 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 24:1 Path# /home/runner/.ya/build/build_root/cl3w/0043a8/r3tmp/tmp3ouFBZ/pdisk_1.dat 2025-06-30T09:20:52.647094Z node 24 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 24 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:20:52.647127Z node 24 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 24 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:52.647144Z node 24 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 24 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:52.647188Z node 24 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 24 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:20:52.647210Z node 24 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 24 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-30T09:20:52.647600Z node 24 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 24 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:52.647656Z node 24 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 24 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:52.658524Z node 24 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 24 Devices# [] 2025-06-30T09:20:52.658677Z node 24 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:52.658784Z node 24 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:52.658800Z node 24 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:52.658836Z node 24 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1/users/tenant-2 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:52.658846Z node 24 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1/users/tenant-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:52.658868Z node 24 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:52.659576Z node 24 :LOCAL DEBUG: local.cpp:1117: TDomainLocal(dc-1): Send resolve request for /dc-1/users/tenant-1 to schemeshard 72057594046578944 2025-06-30T09:20:52.659676Z node 24 :LOCAL DEBUG: local.cpp:1258: TDomainLocal(dc-1): TDomainLocal::TEvClientConnected for dc-1 shard 72057594046578944 2025-06-30T09:20:52.659679Z node 24 :LOCAL DEBUG: local.cpp:1117: TDomainLocal(dc-1): Send resolve request for /dc-1/users/tenant-1 to schemeshard 72057594046578944 2025-06-30T09:20:52.659682Z node 24 :LOCAL DEBUG: local.cpp:1117: TDomainLocal(dc-1): Send resolve request for /dc-1/users/tenant-2 to schemeshard 72057594046578944 2025-06-30T09:20:52.659769Z node 24 :LOCAL DEBUG: local.cpp:1285: TDomainLocal(dc-1): HandleResolve from schemeshard 72057594046578944: Status: StatusSuccess Path: "/dc-1/users/tenant-1" PathDescription { Self { Name: "/dc-1/users/tenant-1" PathId: 100 SchemeshardId: 72057594046578944 PathType: EPathTypeSubDomain } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 100 DomainKey { SchemeShard: 72057594046578944 PathId: 100 } } } 2025-06-30T09:20:52.659786Z node 24 :LOCAL DEBUG: local.cpp:1223: TDomainLocal(dc-1): Binding tenant /dc-1/users/tenant-1 to hive 72057594046578946 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:52.659856Z node 24 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:52.659860Z node 24 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:52.659870Z node 24 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[24:342:2308] 2025-06-30T09:20:52.659890Z node 24 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1/users/tenant-1 2025-06-30T09:20:52.659894Z node 24 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [24:332:2303] 2025-06-30T09:20:52.659918Z node 24 :LOCAL DEBUG: local.cpp:1285: TDomainLocal(dc-1): HandleResolve from schemeshard 72057594046578944: Status: StatusSuccess Path: "/dc-1/users/tenant-1" PathDescription { Self { Name: "/dc-1/users/tenant-1" PathId: 100 SchemeshardId: 72057594046578944 PathType: EPathTypeSubDomain } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 100 DomainKey { SchemeShard: 72057594046578944 PathId: 100 } } } 2025-06-30T09:20:52.659922Z node 24 :LOCAL DEBUG: local.cpp:1289: TDomainLocal(dc-1): Missing task for /dc-1/users/tenant-1 2025-06-30T09:20:52.659942Z node 24 :LOCAL DEBUG: local.cpp:1285: TDomainLocal(dc-1): HandleResolve from schemeshard 72057594046578944: Status: StatusSuccess Path: "/dc-1/users/tenant-2" PathDescription { Self { Name: "/dc-1/users/tenant-2" PathId: 101 SchemeshardId: 72057594046578944 PathType: EPathTypeSubDomain } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 101 DomainKey { SchemeShard: 72057594046578944 PathId: 101 } } } 2025-06-30T09:20:52.659950Z node 24 :LOCAL DEBUG: local.cpp:1223: TDomainLocal(dc-1): Binding tenant /dc-1/users/tenant-2 to hive 72057594046578946 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:52.659985Z node 24 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:52.659991Z node 24 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:52.659996Z node 24 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[24:348:2310] 2025-06-30T09:20:52.660011Z node 24 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1/users/tenant-2 2025-06-30T09:20:52.660014Z node 24 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [24:332:2303] 2025-06-30T09:20:52.660206Z node 24 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[24:342:2308]} 2025-06-30T09:20:52.660211Z node 24 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[24:348:2310]} 2025-06-30T09:20:52.660226Z node 24 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:52.660232Z node 24 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:52.660234Z node 24 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:52.660246Z node 24 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:52.660248Z node 24 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:52.660250Z node 24 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:52.688268Z node 24 :BS_CONTROLLER DEBUG: {BSC19@console_interaction.cpp:74} Console proposed config response Response# {Status: ReverseCommit ConsoleConfigVersion: 0 YAML: "" } >> TConsoleTests::TestTenantConfigConsistency [GOOD] >> TNodeBrokerTest::NodeNameReuseRestartWithHostChanges >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_Generations [GOOD] >> TFlatTableExecutor_CachePressure::TestNotEnoughLocalCache [GOOD] >> TFlatTableExecutor_Cold::ColdBorrowScan [GOOD] >> TNodeBrokerTest::NodesV2BackMigration >> TNodeBrokerTest::TestListNodes >> TNodeBrokerTest::NoEffectBeforeCommit [GOOD] |80.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut >> TChargeBTreeIndex::OneNode_History [GOOD] >> TConsoleTests::TestSetConfig >> TFlatTableExecutor_ColumnGroups::TestManyRows >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-true >> TNodeBrokerTest::DoNotReuseDynnodeIdsBelowMinDynamicNodeId [GOOD] >> TNodeBrokerTest::RegistrationPipeliningNodeName >> TNodeBrokerTest::ShiftIdRangeRemoveExpired [GOOD] >> TNodeBrokerTest::SeveralNodesSubscribersPerPipe [GOOD] >> TNodeBrokerTest::ExtendLeaseRestartRace >> GroupReconfiguration::ReassignsDoNotCauseErrorMessagesBlock4Plus2 [GOOD] >> TChargeBTreeIndex::OneNode_Groups_History >> TNodeBrokerTest::FixedNodeId >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TNodeBrokerTest::NodesMigration1000Nodes >> TPartBtreeIndexIteration::FewNodes_History [GOOD] >> TFlatTableExecutor_ColumnGroups::TestManyRows [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TNodeBrokerTest::ConfigPipelining >> TKeyValueTest::TestInlineCopyRangeWorks >> GroupSizeInUnits::SimplestErasureNone [GOOD] >> TPartBtreeIndexIteration::FewNodes_Sticky >> TFlatTableExecutor_CompactionScan::TestCompactionScan [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-false >> IncorrectQueries::InvalidPartID >> TFlatTableExecutor_CompressedSelectRows::TestCompressedSelectRows [GOOD] >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionDirect >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionDirect [GOOD] >> TFlatTableExecutorGC::TestGCVectorDeduplicaton [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_4_Table [GOOD] |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |80.9%| [LD] {RESULT} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |80.9%| [TA] $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/test-results/unittest/{meta.json ... results_accumulator.log} |80.9%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NoEffectBeforeCommit [GOOD] Test command err: 2025-06-30T09:20:53.376853Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.380060Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.380094Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.380114Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.380141Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.380167Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.380183Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.383298Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.383346Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.383380Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.383412Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.383450Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.383472Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.383491Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.383562Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.383592Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.383607Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.384414Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.384437Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.384454Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.384470Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.384484Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.384527Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.385397Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.385478Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.385520Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.385549Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.385579Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.385593Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.385607Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.385618Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.386122Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386199Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386221Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386242Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386257Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386289Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386316Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.386414Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386438Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386820Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386856Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386868Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386887Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386947Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.386993Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.389632Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.389650Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.390161Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.390246Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.390291Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.390401Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.390413Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.390664Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.390826Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.390948Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.390976Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.391125Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.392865Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.393079Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.393148Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.393233Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.393351Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.393424Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.393789Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.394187Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.406205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:53.406226Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:53.409790Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:53.410116Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:53.410187Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:53.410337Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:53.410671Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:53.410694Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:53.410725Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:53.410751Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:53.410757Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:53.410771Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Comp ... : StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 } 2025-06-30T09:20:53.665766Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:23:2070], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }, by path# { Subscriber: { Subscriber: [1:647:2222] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:20:53.665803Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:23:2070], cacheItem# { Subscriber: { Subscriber: [1:647:2222] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:53.665895Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:654:2223], recipient# [1:646:2185], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:53.665925Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:53.665948Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:53.665965Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [1:646:2185], Recipient [1:571:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:53.665970Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:53.665988Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:53.665991Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host1:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:53.666074Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v2 host1:1001 to database state=Active resolvehost=host1.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:53.666116Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:53.666122Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 1 to 2 2025-06-30T09:20:53.666126Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=2 ... waiting for commit ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... waiting for commit (done) 2025-06-30T09:20:53.676500Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:656:2225], Recipient [1:571:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.676530Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:571:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:53.676535Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.676546Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:53.676596Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:658:2227], Recipient [1:571:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.676616Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:637:2213], Recipient [1:571:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:53.676620Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:53.676632Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR 2025-06-30T09:20:53.677219Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:53.677230Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:53.677237Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 1 to 2 2025-06-30T09:20:53.677241Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v2 host1:1001 to epoch cache 2025-06-30T09:20:53.677258Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v2 to update nodes log 2025-06-30T09:20:53.677283Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-0" } 2025-06-30T09:20:53.677369Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:662:2231], Recipient [1:571:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.677387Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:571:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:53.677390Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.677397Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.2 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:53.677456Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:664:2233], Recipient [1:571:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.677472Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:637:2213], Recipient [1:571:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:53.677476Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:53.677489Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-0" } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::SeveralNodesSubscribersPerPipe [GOOD] Test command err: 2025-06-30T09:20:53.390780Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.394231Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.394268Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.394299Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.394336Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.394362Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.394380Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.398071Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.398142Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.398187Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.398220Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.398258Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.398284Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.398304Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.398368Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.398408Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.398430Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.399363Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.399396Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.399425Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.399445Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.399463Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.399512Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.400439Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.400534Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.400573Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.400606Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.400639Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.400654Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.400669Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.400681Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.401179Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.401257Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.401283Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.401299Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.401321Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.401354Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.401383Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.401500Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.401528Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.401946Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.402011Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.402036Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.402047Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.402087Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.404990Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.405012Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.405022Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.405554Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.405583Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.405611Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.406075Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.406235Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.406292Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.406335Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.406423Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.418630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:53.418651Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:53.422483Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:53.422835Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:53.422904Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:53.423064Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:53.423745Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:53.423897Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:53.423940Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:53.423954Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:53.423960Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:53.423974Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:53.423987Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:53.423991Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:53.423995Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:53.423998Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:53.424010Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:53.424014Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:53.455540Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:53.455577Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.025000Z 2025-06-30T09:20:53.455587Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z, approximate epoch start #1.1 nodes=0 expired=0 2025-06-30T09:20:53.455595Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z nodes=0 expired=0 removed=0 2025-06-30T09:20:53.640489Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:615:2206], Rec ... istNodes 2025-06-30T09:20:53.716230Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:53.716321Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039952, Sender [1:629:2215], Recipient [1:561:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest { SeqNo: 1 } 2025-06-30T09:20:53.716327Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:262: StateWork, processing event TEvNodeBroker::TEvSyncNodesRequest 2025-06-30T09:20:53.716395Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:663:2241], Recipient [1:561:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.716409Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2215], Recipient [1:561:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:53.716415Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.716421Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:53.716479Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039952, Sender [1:647:2226], Recipient [1:561:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest { SeqNo: 1 } 2025-06-30T09:20:53.716484Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:262: StateWork, processing event TEvNodeBroker::TEvSyncNodesRequest 2025-06-30T09:20:53.716540Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:665:2243], Recipient [1:561:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.716560Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2215], Recipient [1:561:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:53.716565Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.716572Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:53.716634Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:667:2245], Recipient [1:561:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.716653Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2215], Recipient [1:561:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:53.716658Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.716665Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:53.716722Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:645:2224], Recipient [1:561:2185]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:53.716730Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:647:2226], seqNo: 1, server pipe id: [1:645:2224] 2025-06-30T09:20:53.716735Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:629:2215], seqNo: 1, server pipe id: [1:645:2224] 2025-06-30T09:20:53.716766Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:669:2247], Recipient [1:561:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.716806Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:629:2215], Recipient [1:561:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host3" Port: 1001 ResolveHost: "host3.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:53.716812Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:53.716823Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host3" Port: 1001 ResolveHost: "host3.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:53.716871Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:23:2070], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:53.716900Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:23:2070], cacheItem# { Subscriber: { Subscriber: [1:633:2218] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:53.716952Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:671:2248], recipient# [1:670:2185], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:53.716971Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:53.716986Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host3" Port: 1001 ResolveHost: "host3.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:53.717003Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [1:670:2185], Recipient [1:561:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:53.717008Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:53.717027Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:53.717033Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host3:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:53.717064Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1026.v4 host3:1001 to database state=Active resolvehost=host3.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=2 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:53.717149Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1026.v4 host3:1001 2025-06-30T09:20:53.717157Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 3 to 4 2025-06-30T09:20:53.717162Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=4 2025-06-30T09:20:53.728618Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:53.728648Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1026.v4 host3:1001 2025-06-30T09:20:53.728662Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 3 to 4 2025-06-30T09:20:53.728668Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1026.v4 host3:1001 to epoch cache 2025-06-30T09:20:53.728701Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1026.v4 to update nodes log 2025-06-30T09:20:53.728755Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1026 Host: "host3" Port: 1001 ResolveHost: "host3.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200025000 Name: "slot-2" } 2025-06-30T09:20:53.739035Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:561:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:53.739067Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:54.255647Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:710:2258], Recipient [1:561:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:54.255698Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:629:2215], Recipient [1:561:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 3 SeqNo: 2 } 2025-06-30T09:20:54.255704Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:54.255711Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:629:2215], seqNo: 2, version: 3, server pipe id: [1:710:2258] 2025-06-30T09:20:54.255719Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v3 -> v4 to [1:629:2215] 2025-06-30T09:20:54.255729Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:711:2259], Recipient [1:561:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:54.255740Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2215], Recipient [1:561:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:54.255743Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:54.255752Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.4 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ShiftIdRangeRemoveExpired [GOOD] Test command err: 2025-06-30T09:20:52.325268Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.329138Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.329190Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.329220Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.329258Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.329292Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.329314Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.334111Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.334191Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.334226Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.334259Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.334319Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.334348Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.334375Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.334442Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.334477Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.334495Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.335464Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.335503Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.335541Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.335560Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.335580Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.335636Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.336551Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.336642Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.336685Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.336716Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.336750Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.336766Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.336780Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.336793Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.337365Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.337451Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.337476Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.337496Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.337512Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.337542Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.337571Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.337606Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.337629Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.338167Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.338197Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.338246Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.338266Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.338277Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.338287Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.338376Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.341073Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.341544Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.341568Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.341577Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.341704Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.341740Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.341983Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.342245Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.342359Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.342378Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.342489Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.342550Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.342650Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.343372Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.343440Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.343631Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.343658Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.358978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:52.359002Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:52.364556Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:52.365147Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:52.365257Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:52.365507Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:52.366105Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:52.366144Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:52.366197Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:52.366216Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:52.366222Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:52.366239Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:52.366284Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:52.366290Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:52.366295Z node 1 :NODE_BROKER DEB ... erverConnected 2025-06-30T09:20:53.805406Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:53.805409Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.805412Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.11 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:53.805449Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:803:2323], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.805459Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 10 } 2025-06-30T09:20:53.805462Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.805465Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.11 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:53.805500Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:805:2325], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.805509Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:53.805511Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.805514Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.11 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:53.805561Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:807:2327], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.805572Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 9 } 2025-06-30T09:20:53.805575Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.805578Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.11 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:53.805625Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:809:2329], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.805637Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:53.805640Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.805643Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.11 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:53.805680Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:811:2331], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.805696Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 8 } 2025-06-30T09:20:53.805702Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.805708Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.11 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:53.805765Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:813:2333], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.805780Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 11 SeqNo: 2 } 2025-06-30T09:20:53.805783Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:53.805788Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:635:2213], seqNo: 2, version: 11, server pipe id: [1:813:2333] 2025-06-30T09:20:53.805792Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v11 -> v11 to [1:635:2213] 2025-06-30T09:20:53.805848Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:813:2333], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:53.805854Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:635:2213], seqNo: 2, server pipe id: [1:813:2333] 2025-06-30T09:20:53.805880Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:815:2335], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.805892Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 10 SeqNo: 3 } 2025-06-30T09:20:53.805894Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:53.805897Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:635:2213], seqNo: 3, version: 10, server pipe id: [1:815:2335] 2025-06-30T09:20:53.805900Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v10 -> v11 to [1:635:2213] 2025-06-30T09:20:53.805944Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:815:2335], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:53.805947Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:635:2213], seqNo: 3, server pipe id: [1:815:2335] 2025-06-30T09:20:53.805961Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:817:2337], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.805972Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 9 SeqNo: 4 } 2025-06-30T09:20:53.805975Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:53.805977Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:635:2213], seqNo: 4, version: 9, server pipe id: [1:817:2337] 2025-06-30T09:20:53.805980Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v9 -> v11 to [1:635:2213] 2025-06-30T09:20:53.806011Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:817:2337], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:53.806014Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:635:2213], seqNo: 4, server pipe id: [1:817:2337] 2025-06-30T09:20:53.806028Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:819:2339], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.806040Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 8 SeqNo: 5 } 2025-06-30T09:20:53.806045Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:53.806049Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:635:2213], seqNo: 5, version: 8, server pipe id: [1:819:2339] 2025-06-30T09:20:53.806062Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v8 -> v11 to [1:635:2213] 2025-06-30T09:20:53.806098Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:819:2339], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:53.806102Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:635:2213], seqNo: 5, server pipe id: [1:819:2339] 2025-06-30T09:20:53.806130Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:821:2341], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.806145Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:53.806149Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:53.806172Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 14400023000 Name: "slot-0" } } 2025-06-30T09:20:53.806235Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:823:2343], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.806251Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1025 } 2025-06-30T09:20:53.806254Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:53.806264Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 14400023000 Name: "slot-1" } } 2025-06-30T09:20:53.806302Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:825:2345], Recipient [1:753:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.806312Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:635:2213], Recipient [1:753:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1026 } 2025-06-30T09:20:53.806315Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:53.806319Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> TVersions::Wreck0Reverse [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:34.781703Z 00000.005 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.005 II| FAKE_ENV: Starting storage for BS group 0 00000.005 II| FAKE_ENV: Starting storage for BS group 1 00000.005 II| FAKE_ENV: Starting storage for BS group 2 00000.005 II| FAKE_ENV: Starting storage for BS group 3 00000.006 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.006 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} release 4194304b of static, Memory{0 dyn 0} 00000.006 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.007 NN| TABLET_SAUSAGECACHE: Update config MemoryLimit: 8388608 ReplacementPolicy: ThreeLeveledLRU 00000.007 NN| TABLET_SAUSAGECACHE: Switch replacement policy from S3FIFO to ThreeLeveledLRU 00000.007 NN| TABLET_SAUSAGECACHE: Switch replacement policy done from S3FIFO to ThreeLeveledLRU 00000.007 II| TABLET_SAUSAGECACHE: Limit memory consumer with 16777216TiB 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{2, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{3, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 1 for step 4 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{4, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 1 for step 5 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{5, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 1 for step 6 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{6, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 1 for step 7 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{7, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:9} commited cookie 1 for step 8 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{8, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:10} commited cookie 1 for step 9 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{9, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:11} commited cookie 1 for step 10 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{10, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:12} commited cookie 1 for step 11 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{11, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:13} commited cookie 1 for step 12 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{12, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:14} commited cookie 1 for step 13 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{13, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:15} commited cookie 1 for step 14 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{14, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{ ... ual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 3 ] Cookie: 4 2025-06-30T09:20:36.113996Z node 35 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1265: Bootstrap with config MemoryLimit: 456 AsyncQueueInFlyLimit: 19 ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-30T09:20:36.114219Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:1] 2025-06-30T09:20:36.114228Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [35:5:2052] 2025-06-30T09:20:36.114248Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:5:2052] cookie 1 class AsyncLoad from cache [ ] already requested [ ] to request [ 1 2 3 4 5 ] 2025-06-30T09:20:36.114256Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 1 2 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 2025-06-30T09:20:36.114300Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:5:2052] cookie 2 class AsyncLoad from cache [ ] already requested [ ] to request [ 6 7 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-30T09:20:36.114314Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:2] 2025-06-30T09:20:36.114318Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:2] owner [35:5:2052] 2025-06-30T09:20:36.114325Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [35:5:2052] cookie 3 class AsyncLoad from cache [ ] already requested [ ] to request [ 10 11 12 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-30T09:20:36.114341Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [35:6:2053] 2025-06-30T09:20:36.114351Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:6:2053] cookie 4 class AsyncLoad from cache [ ] already requested [ 1 5 ] to request [ 9 10 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 2 ] Cookie: 20 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 2 ] Cookie: 20 ... waiting for NKikimr::NSharedCache::TEvUnregister 2025-06-30T09:20:36.114395Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:803: Unregister owner [35:5:2052] 2025-06-30T09:20:36.114404Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:2] owner [35:5:2052] class AsyncLoad error RACE cookie 3 2025-06-30T09:20:36.114410Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:816: Remove page collection [1:0:256:0:0:0:2] owner [35:5:2052] 2025-06-30T09:20:36.114416Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:1] owner [35:5:2052] class AsyncLoad error RACE cookie 1 2025-06-30T09:20:36.114421Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:1] owner [35:5:2052] class AsyncLoad error RACE cookie 2 2025-06-30T09:20:36.114427Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:816: Remove page collection [1:0:256:0:0:0:1] owner [35:5:2052] 2025-06-30T09:20:36.114431Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:823: Remove owner [35:5:2052] ... waiting for NKikimr::NSharedCache::TEvUnregister (done) ... waiting for results #4 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 1 PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 1 PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 ... waiting for fetches #4 2025-06-30T09:20:36.114495Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 1 2 ] 2025-06-30T09:20:36.114504Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:1] class AsyncLoad cookie 1 2025-06-30T09:20:36.114511Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 5 9 ] ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #4 (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 5 9 ] Cookie: 20 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 5 9 ] Cookie: 20 ... waiting for fetches #4 2025-06-30T09:20:36.114548Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 5 9 ] 2025-06-30T09:20:36.114554Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:1] class AsyncLoad cookie 2 2025-06-30T09:20:36.114560Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 10 ] 2025-06-30T09:20:36.114567Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:2] class AsyncLoad cookie 3 2025-06-30T09:20:36.114582Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1012: Drop page collection [1:0:256:0:0:0:1] pages [ 2 ] owner [35:6:2053] ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #4 (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 10 ] Cookie: 10 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 10 ] Cookie: 10 ... waiting for results #4 2025-06-30T09:20:36.114619Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 10 ] 2025-06-30T09:20:36.114624Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:1050: Send page collection result [1:0:256:0:0:0:1] owner [35:6:2053] class AsyncLoad pages [ 1 5 9 10 ] cookie 4 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 5 9 10 ] Cookie: 4 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 5 9 10 ] Cookie: 4 2025-06-30T09:20:36.126998Z node 36 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1265: Bootstrap with config MemoryLimit: 456 AsyncQueueInFlyLimit: 19 ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-30T09:20:36.127092Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:1] 2025-06-30T09:20:36.127100Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [36:5:2052] 2025-06-30T09:20:36.127115Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [36:5:2052] cookie 1 class AsyncLoad from cache [ ] already requested [ ] to request [ 1 ] 2025-06-30T09:20:36.127124Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 1 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-30T09:20:36.127151Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:2] 2025-06-30T09:20:36.127156Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:2] owner [36:6:2053] 2025-06-30T09:20:36.127164Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [36:6:2053] cookie 2 class AsyncLoad from cache [ ] already requested [ ] to request [ 10 11 ] 2025-06-30T09:20:36.127170Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:2] async queue pages [ 10 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 2025-06-30T09:20:36.127203Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [36:6:2053] cookie 3 class AsyncLoad from cache [ ] already requested [ ] to request [ 12 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for fetches #3 ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #3 (done) Checking fetches#3 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 10 PageCollection: [1:0:256:0:0:0:2] Pages: [ 10 ] Cookie: 10 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 10 PageCollection: [1:0:256:0:0:0:2] Pages: [ 10 ] Cookie: 10 ... waiting for NKikimr::NSharedCache::TEvUnregister 2025-06-30T09:20:36.127270Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:803: Unregister owner [36:6:2053] 2025-06-30T09:20:36.127280Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:2] owner [36:6:2053] class AsyncLoad error RACE cookie 2 2025-06-30T09:20:36.127285Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:2] owner [36:6:2053] class AsyncLoad error RACE cookie 3 2025-06-30T09:20:36.127291Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:816: Remove page collection [1:0:256:0:0:0:2] owner [36:6:2053] 2025-06-30T09:20:36.127296Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:823: Remove owner [36:6:2053] ... waiting for NKikimr::NSharedCache::TEvUnregister (done) ... waiting for results #3 ... waiting for results #3 (done) Checking results#3 Expected: PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 Actual: PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 ... waiting for results #3 2025-06-30T09:20:36.127346Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 1 ] 2025-06-30T09:20:36.127354Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:1050: Send page collection result [1:0:256:0:0:0:1] owner [36:5:2052] class AsyncLoad pages [ 1 ] cookie 1 2025-06-30T09:20:36.127364Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:2] class AsyncLoad cookie 2 2025-06-30T09:20:36.127369Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:2] class AsyncLoad cookie 3 ... waiting for results #3 (done) Checking results#3 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 1 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 1 Checking fetches#3 Expected: Actual: 2025-06-30T09:20:36.137603Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:2] status OK pages [ 10 ] Checking results#3 Expected: Actual: Checking fetches#3 Expected: Actual: >> TNodeBrokerTest::NodesMigration999Nodes [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> TFlatTableExecutorGC::TestGCVectorDeduplicaton [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:35.976742Z 00000.004 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.004 II| FAKE_ENV: TNanny initiates TDummy tablet 72057594037927937 birth 00000.004 II| FAKE_ENV: Starting storage for BS group 0 00000.004 II| FAKE_ENV: Starting storage for BS group 1 00000.004 II| FAKE_ENV: Starting storage for BS group 2 00000.004 II| FAKE_ENV: Starting storage for BS group 3 00000.005 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.006 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.027 II| TABLET_EXECUTOR: LSnap{1:2, on 2:301, 5757b, wait} done, Waste{2:0, 678532b +(0, 0b), 300 trc} 00000.031 II| TABLET_EXECUTOR: Leader{1:2:343} starting compaction 00000.031 II| TABLET_EXECUTOR: Leader{1:2:344} starting Scan{1 on 3, Compact{1.2.343, eph 1}} 00000.031 II| TABLET_EXECUTOR: Leader{1:2:344} started compaction 1 00000.031 II| TABLET_OPS_HOST: Scan{1 on 3, Compact{1.2.343, eph 1}} begin on TSubset{head 2, 1m 0p 0c} 00000.032 II| TABLET_OPS_HOST: Scan{1 on 3, Compact{1.2.343, eph 1}} end=Done, 113r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 17 of 18 ~1p 00000.032 II| OPS_COMPACT: Compact{1.2.343, eph 1} end=Done, 13 blobs 86r (max 113), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 17 +9, (157344 6705 229124)b }, ecr=1.000 00000.032 II| TABLET_EXECUTOR: Leader{1:2:345} Compact 1 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 343, product {1 parts epoch 2} done 00000.038 II| TABLET_EXECUTOR: Leader{1:2:382} starting compaction 00000.038 II| TABLET_EXECUTOR: Leader{1:2:383} starting Scan{3 on 2, Compact{1.2.382, eph 1}} 00000.038 II| TABLET_EXECUTOR: Leader{1:2:383} started compaction 3 00000.038 II| TABLET_OPS_HOST: Scan{3 on 2, Compact{1.2.382, eph 1}} begin on TSubset{head 2, 1m 0p 0c} 00000.039 II| TABLET_OPS_HOST: Scan{3 on 2, Compact{1.2.382, eph 1}} end=Done, 105r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 17 of 23 ~1p 00000.039 II| OPS_COMPACT: Compact{1.2.382, eph 1} end=Done, 9 blobs 75r (max 105), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 17 +5, (160529 40248 201748)b }, ecr=1.000 00000.039 II| TABLET_EXECUTOR: Leader{1:2:384} Compact 3 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 382, product {1 parts epoch 2} done 00000.067 II| TABLET_EXECUTOR: LSnap{1:2, on 2:601, 8677b, wait} done, Waste{2:0, 1708123b +(149, 81693b), 300 trc} 00000.076 II| TABLET_EXECUTOR: Leader{1:2:686} starting compaction 00000.076 II| TABLET_EXECUTOR: Leader{1:2:687} starting Scan{5 on 3, Compact{1.2.686, eph 2}} 00000.076 II| TABLET_EXECUTOR: Leader{1:2:687} started compaction 5 00000.076 II| TABLET_OPS_HOST: Scan{5 on 3, Compact{1.2.686, eph 2}} begin on TSubset{head 3, 1m 0p 0c} 00000.077 II| TABLET_OPS_HOST: Scan{5 on 3, Compact{1.2.686, eph 2}} end=Done, 108r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 16 of 19 ~1p 00000.078 II| OPS_COMPACT: Compact{1.2.686, eph 2} end=Done, 8 blobs 108r (max 108), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 16 +6, (160597 0 197476)b }, ecr=1.000 00000.078 II| TABLET_EXECUTOR: Leader{1:2:688} Compact 5 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 686, product {1 parts epoch 3} done 00000.078 II| TABLET_EXECUTOR: Leader{1:2:689} starting compaction 00000.078 II| TABLET_EXECUTOR: Leader{1:2:690} starting Scan{7 on 3, Compact{1.2.689, eph 2}} 00000.078 II| TABLET_EXECUTOR: Leader{1:2:690} started compaction 7 00000.078 II| TABLET_OPS_HOST: Scan{7 on 3, Compact{1.2.689, eph 2}} begin on TSubset{head 0, 0m 2p 0c} 00000.080 II| TABLET_OPS_HOST: Scan{7 on 3, Compact{1.2.689, eph 2}} end=Done, 164r seen, TFwd{fetch=309KiB,saved=309KiB,usage=309KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=6}, trace 37 of 48 ~3p 00000.080 II| OPS_COMPACT: Compact{1.2.689, eph 2} end=Done, 4 blobs 132r (max 194), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 37 +0, (272170 6705 329118)b }, ecr=1.000 00000.080 II| TABLET_EXECUTOR: Leader{1:2:692} Compact 7 on TGenCompactionParams{3: gen 1 epoch 0, 2 parts} step 689, product {1 parts epoch 0} done 00000.082 II| TABLET_EXECUTOR: Leader{1:2:707} starting compaction 00000.082 II| TABLET_EXECUTOR: Leader{1:2:708} starting Scan{9 on 2, Compact{1.2.707, eph 2}} 00000.082 II| TABLET_EXECUTOR: Leader{1:2:708} started compaction 9 00000.082 II| TABLET_OPS_HOST: Scan{9 on 2, Compact{1.2.707, eph 2}} begin on TSubset{head 3, 1m 0p 0c} 00000.082 II| TABLET_OPS_HOST: Scan{9 on 2, Compact{1.2.707, eph 2}} end=Done, 107r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 18 of 20 ~1p 00000.083 II| OPS_COMPACT: Compact{1.2.707, eph 2} end=Done, 9 blobs 107r (max 107), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 18 +5, (152759 20255 212331)b }, ecr=1.000 00000.083 II| TABLET_EXECUTOR: Leader{1:2:709} Compact 9 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 707, product {1 parts epoch 3} done 00000.083 II| TABLET_EXECUTOR: Leader{1:2:711} starting compaction 00000.083 II| TABLET_EXECUTOR: Leader{1:2:712} starting Scan{11 on 2, Compact{1.2.711, eph 2}} 00000.083 II| TABLET_EXECUTOR: Leader{1:2:712} started compaction 11 00000.083 II| TABLET_OPS_HOST: Scan{11 on 2, Compact{1.2.711, eph 2}} begin on TSubset{head 0, 0m 2p 0c} 00000.085 II| TABLET_OPS_HOST: Scan{11 on 2, Compact{1.2.711, eph 2}} end=Done, 152r seen, TFwd{fetch=304KiB,saved=304KiB,usage=285KiB,after=19.6KiB,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=7}, trace 37 of 45 ~3p 00000.085 II| OPS_COMPACT: Compact{1.2.711, eph 2} end=Done, 4 blobs 126r (max 182), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 37 +0, (239405 40400 346393)b }, ecr=1.000 00000.085 II| TABLET_EXECUTOR: Leader{1:2:714} Compact 11 on TGenCompactionParams{2: gen 1 epoch 0, 2 parts} step 711, product {1 parts epoch 0} done 00000.116 II| TABLET_EXECUTOR: LSnap{1:2, on 2:901, 10391b, wait} done, Waste{2:0, 2485186b +(171, 869067b), 300 trc} 00000.132 II| TABLET_EXECUTOR: Leader{1:2:1036} starting compaction 00000.132 II| TABLET_EXECUTOR: Leader{1:2:1037} starting Scan{13 on 3, Compact{1.2.1036, eph 3}} 00000.132 II| TABLET_EXECUTOR: Leader{1:2:1037} started compaction 13 00000.132 II| TABLET_OPS_HOST: Scan{13 on 3, Compact{1.2.1036, eph 3}} begin on TSubset{head 4, 1m 0p 0c} 00000.133 II| TABLET_OPS_HOST: Scan{13 on 3, Compact{1.2.1036, eph 3}} end=Done, 108r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 16 of 21 ~1p 00000.134 II| OPS_COMPACT: Compact{1.2.1036, eph 3} end=Done, 10 blobs 108r (max 108), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 16 +6, (168752 26956 205627)b }, ecr=1.000 00000.134 II| TABLET_EXECUTOR: Leader{1:2:1038} Compact 13 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 1036, product {1 parts epoch 4} done 00000.141 II| TABLET_EXECUTOR: Leader{1:2:1063} starting compaction 00000.141 II| TABLET_EXECUTOR: Leader{1:2:1064} starting Scan{15 on 2, Compact{1.2.1063, eph 3}} 00000.141 II| TABLET_EXECUTOR: Leader{1:2:1064} started compaction 15 00000.141 II| TABLET_OPS_HOST: Scan{15 on 2, Compact{1.2.1063, eph 3}} begin on TSubset{head 4, 1m 0p 0c} 00000.142 II| TABLET_OPS_HOST: Scan{15 on 2, Compact{1.2.1063, eph 3}} end=Done, 106r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 17 of 24 ~1p 00000.142 II| OPS_COMPACT: Compact{1.2.1063, eph 3} end=Done, 11 blobs 106r (max 106), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 17 +7, (157692 13526 213551)b }, ecr=1.000 00000.142 II| TABLET_EXECUTOR: Leader{1:2:1065} Compact 15 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 1063, product {1 parts epoch 4} done 00000.164 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1201, 11637b, wait} done, Waste{2:0, 3476267b +(158, 131604b), 300 trc} 00000.190 II| TABLET_EXECUTOR: Leader{1:2:1357} starting compaction 00000.190 II| TABLET_EXECUTOR: Leader{1:2:1358} starting Scan{17 on 2, Compact{1.2.1357, eph 4}} 00000.190 II| TABLET_EXECUTOR: Leader{1:2:1358} started compaction 17 00000.190 II| TABLET_OPS_HOST: Scan{17 on 2, Compact{1.2.1357, eph 4}} begin on TSubset{head 5, 1m 0p 0c} 00000.190 II| TABLET_OPS_HOST: Scan{17 on 2, Compact{1.2.1357, eph 4}} end=Done, 99r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 16 of 22 ~1p 00000.191 II| OPS_COMPACT: Compact{1.2.1357, eph 4} end=Done, 6 blobs 99r (max 99), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 16 +2, (140098 40198 164997)b }, ecr=1.000 00000.191 II| TABLET_EXECUTOR: Leader{1:2:1358} Compact 17 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 1357, product {1 parts epoch 5} done 00000.191 II| TABLET_EXECUTOR: Leader{1:2:1360} starting compaction 00000.191 II| TABLET_EXECUTOR: Leader{1:2:1361} starting Scan{19 on 2, Compact{1.2.1360, eph 4}} 00000.191 II| TABLET_EXECUTOR: Leader{1:2:1361} started compaction 19 00000.191 II| TABLET_OPS_HOST: Scan{19 on 2, Compact{1.2.1360, eph 4}} begin on TSubset{head 0, 0m 2p 0c} 00000.193 II| TABLET_OPS_HOST: Scan{19 on 2, Compact{1.2.1360, eph 4}} end=Done, 164r seen, TFwd{fetch=289KiB,saved=289KiB,usage=283KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=6}, trace 34 of 42 ~3p 00000.193 II| OPS_COMPACT: Compact{1.2.1360, eph 4} end=Done, 4 blobs 164r (max 205), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 34 +0, (241028 47131 309096)b }, ecr=1.000 00000.193 II| TABLET_EXECUTOR: Leader{1:2:1363} Compact 19 on TGenCompactionParams{2: gen 1 epoch 0, 2 parts} step 1360, product {1 parts epoch 0} done 00000.204 II| TABLET_EXECUTOR: Leader{1:2:1394} starting compaction 00000.204 II| TABLET_EXECUTOR: Leader{1:2:1395} starting Scan{21 on 3, Compact{1.2.1394, eph 4}} 00000.204 II| TABLET_EXECUTOR: Leader{1:2:1395} started compaction 21 00000.204 II| TABLET_OPS_HOST: Scan{21 on 3, Compact{1.2.1394, eph 4}} begin on TSubset{head 5, 1m 0p 0c} 00000.205 II| TABLET_OPS_HOST: Scan{21 on 3, Compact{1.2.1394, eph 4}} end=Done, 103r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 10 of 13 ~1p 00000.205 II| OPS_COMPACT: Compact{1.2.1394, eph 4} end=Done, 13 blobs 103r (max 103), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 10 +9, (163727 33604 160754)b }, ecr=1.000 00000.205 II| TABLET_EXECUTOR: Leader{1:2:1396} Compact 21 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 1394, product {1 parts epoch 5} done 00000.206 II| TABLET_EXECUTOR: Leader{1:2:1397} starting compaction 00000.206 II| TABLET_EXECUTOR: Leader{1:2:1398} starting Scan{23 on 3, Compact{1.2.1397, eph 4}} 00000.206 II| TABLET_EXECUTOR: Leader{1:2:1398} started compaction 23 00000.206 II| TABLET_OPS_HOST: Scan{23 on 3, Compact{1.2.1397, eph 4}} begin on TSubset{head 0, 0m 2p 0c} 00000.207 II| TABLET_OPS_HOST: Scan{23 on 3, Compact{1.2.1397, eph 4}} end=Done, 173r seen, TFwd{fetch=323KiB,saved=323KiB,usage=317KiB,after=6.82KiB,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=7}, trace 33 of 41 ~3p 00000.208 II| OPS_COMPACT: Compact{1.2.1397, eph 4} end=Done, 4 blobs 173r (max 211), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 33 +0, (267219 53576 293669)b }, ecr=1.000 00000.208 II| TABLET_EXECUTOR: Leader{1:2:1400} Compact 23 on TGenCompactionParams{3: gen 1 epoch 0, 2 parts} step 1397, product {1 parts epoch 0} done 00000.231 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1501, 13061b, wait} done, Waste{2: ... ] 00000.023 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:250:1:12288:161:0] owner [37:418:2424] class Scan pages [ 0 ] cookie 0 00000.023 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:313:1:12288:161:0] owner [37:418:2424] 00000.023 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:313:1:12288:161:0] owner [37:418:2424] cookie 0 class Scan from cache [ 0 ] 00000.023 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:313:1:12288:161:0] owner [37:418:2424] class Scan pages [ 0 ] cookie 0 00000.023 DD| TABLET_SAUSAGECACHE: Save page collection [1:2:315:1:12288:163:0] owner [37:419:2424] compacted pages [ 2 ] 00000.023 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:315:1:12288:163:0] 00000.023 DD| TABLET_SAUSAGECACHE: Unregister owner [37:418:2424] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:188:1:12288:161:0] owner [37:418:2424] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:418:2424] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:126:1:12288:161:0] owner [37:418:2424] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:313:1:12288:161:0] owner [37:418:2424] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:250:1:12288:161:0] owner [37:418:2424] 00000.023 DD| TABLET_SAUSAGECACHE: Remove owner [37:418:2424] 00000.023 II| TABLET_EXECUTOR: Leader{1:2:316} Compact 63 on TGenCompactionParams{101: gen 2 epoch 0, 5 parts} step 315, product {1 parts epoch 0} done 00000.023 DD| TABLET_EXECUTOR: TGenCompactionStrategy CompactionFinished for 1: compaction 63, generation 2 00000.023 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 2, state Free, final id 0, final level 2 00000.023 DD| RESOURCE_BROKER: Finish task gen2-table-101-tablet-1 (32 by [37:30:2062]) (release resources {1, 0}) 00000.023 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_compaction_gen2 from 4.687500 to 0.000000 (remove task gen2-table-101-tablet-1 (32 by [37:30:2062])) 00000.023 DD| TABLET_SAUSAGECACHE: Attach page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.023 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.023 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:313:1:12288:161:0] owner [37:30:2062] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:313:1:12288:161:0] owner [37:30:2062] 00000.023 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:250:1:12288:161:0] owner [37:30:2062] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:250:1:12288:161:0] owner [37:30:2062] 00000.023 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:188:1:12288:161:0] owner [37:30:2062] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:188:1:12288:161:0] owner [37:30:2062] 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:317} commited cookie 3 for step 316 00000.023 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:126:1:12288:161:0] owner [37:30:2062] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:126:1:12288:161:0] owner [37:30:2062] 00000.023 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:64:1:12288:161:0] owner [37:30:2062] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:30:2062] 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:317} switch applied on followers, step 316 00000.023 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:315:1:12288:163:0] owner [37:30:2062] pages [ 2 ] 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:64:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:64:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:64:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:126:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:126:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:126:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:188:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:188:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:188:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:250:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:250:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:250:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:261:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:261:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:261:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:273:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:273:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:273:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:285:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:285:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:285:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:297:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:297:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:297:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| RESOURCE_BROKER: Finish task Scan{58 on 101}::1 (29 by [37:30:2062]) (release resources {1, 0}) 00000.024 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_scan from 11.718750 to 0.000000 (remove task Scan{58 on 101}::1 (29 by [37:30:2062])) 00000.024 DD| TABLET_SAUSAGECACHE: Unregister owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:261:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:285:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:126:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:297:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:250:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:188:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:273:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove owner [37:405:2414] 00000.024 II| TABLET_EXECUTOR: Leader{1:2:317} suiciding, Waste{2:0, 7661b +(30, 11928b), 16 trc, -42337b acc} 00000.024 DD| TABLET_SAUSAGECACHE: Unregister owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Remove owner [37:30:2062] 00000.024 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.024 NN| TABLET_SAUSAGECACHE: Poison cache serviced 38 reqs hit {38 21480b} miss {0 0b} 00000.024 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.024 II| FAKE_ENV: DS.0 gone, left {2353b, 18}, put {31665b, 317} 00000.024 II| FAKE_ENV: DS.1 gone, left {23846b, 37}, put {57236b, 346} 00000.024 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.024 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.024 II| FAKE_ENV: All BS storage groups are stopped 00000.024 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.024 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 2287}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:55.213426Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.001 II| FAKE_ENV: Starting storage for BS group 2 00000.001 II| FAKE_ENV: Starting storage for BS group 3 00000.010 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.010 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.010 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.010 II| FAKE_ENV: DS.0 gone, left {536b, 6}, put {556b, 7} 00000.010 II| FAKE_ENV: DS.1 gone, left {30495b, 8}, put {30495b, 8} 00000.010 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.010 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.010 II| FAKE_ENV: All BS storage groups are stopped 00000.010 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.010 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-30T09:20:55.229597Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.001 II| FAKE_ENV: Starting storage for BS group 2 00000.001 II| FAKE_ENV: Starting storage for BS group 3 00000.066 CC| TABLET_EXECUTOR: Tablet 1 unhandled exception std::runtime_error: test ??+0 (0xCF0CD52) ??+0 (0xCF0CCF7) NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Exceptions::TTxExecuteThrowException::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&)+57 (0xCA48039) NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*)+1312 (0xF520E00) NKikimr::NTabletFlatExecutor::TExecutor::DoExecute(TAutoPtr, NKikimr::NTabletFlatExecutor::TExecutor::ETxMode)+3356 (0xF51F2AC) non-virtual thunk to NKikimr::NTabletFlatExecutor::TExecutor::Execute(TAutoPtr, NActors::TActorContext const&)+35 (0xF522323) ??+0 (0xCA47F50) NKikimr::NFake::TDummy::Inbox(TAutoPtr&)+480 (0xC9F9B50) NActors::IActor::Receive(TAutoPtr&)+85 (0xD7C15E5) 00000.066 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.066 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.066 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.066 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {62b, 2} 00000.066 II| FAKE_ENV: DS.1 gone, left {35b, 1}, put {35b, 1} 00000.066 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.066 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.066 II| FAKE_ENV: All BS storage groups are stopped 00000.066 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.066 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 1 Error 0 Left 15}, stopped >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-false [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationReuseExpiredID [GOOD] Test command err: 2025-06-30T09:20:51.571974Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.576487Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.576540Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.576579Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.576610Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.576636Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.576654Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.580550Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.580612Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.580655Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.580690Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.580743Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.580787Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.580819Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.580906Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.580955Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.581002Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.581992Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.582031Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.582060Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.582083Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.582102Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.582157Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.583350Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.583502Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.583560Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.583617Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.583669Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.583693Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.583717Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.583736Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.584509Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.584627Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.584661Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.584687Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.584716Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.584788Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.584834Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.584917Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.584957Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.585730Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.585817Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.585873Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.585937Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.589525Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.589544Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.589753Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.590310Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.590471Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.590504Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.590546Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.590570Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.590635Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.590686Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.591261Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.591514Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.591600Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.591872Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.592083Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.592688Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.593016Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.593231Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.606673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:51.606692Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:51.610947Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:51.611346Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:51.611427Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:51.611622Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:51.612030Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:51.612054Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:51.612086Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:51.612098Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:51.612101Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:51.612112Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:51.612136Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:51.612140Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:51.612143Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:51.612147Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:51.612162Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:134 ... 8 to 9 2025-06-30T09:20:53.077921Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:53.077950Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:53.077954Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 1 2025-06-30T09:20:53.077959Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #6.9 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:53.077972Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #6.8 2025-06-30T09:20:53.077980Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v9 host2:1001 to database state=Active resolvehost=host2.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 07:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:53.078002Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:53.089779Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:53.089832Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T06:00:00.023000Z 2025-06-30T09:20:53.089842Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #6.9 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z, approximate epoch start #6.8 nodes=1 expired=0 2025-06-30T09:20:53.089875Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##6.9 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z nodes=1 expired=0 removed=0 2025-06-30T09:20:53.089879Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v9 to update nodes log 2025-06-30T09:20:53.089995Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:758:2287], Recipient [1:750:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.090066Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:761:2290] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:53.090081Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:758:2287] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:53.090103Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:760:2289], Recipient [1:750:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.090121Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:762:2291] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:53.090131Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:764:2293] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:53.090138Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:761:2290], Recipient [1:750:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.090143Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:760:2289] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:53.090158Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:762:2291], Recipient [1:750:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.090170Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:764:2293], Recipient [1:750:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.090205Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:758:2287] 2025-06-30T09:20:53.090210Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.090217Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.9 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:53.090241Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:760:2289] 2025-06-30T09:20:53.090245Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.090251Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.9 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:53.090274Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [3:83:2072], Recipient [1:761:2290] 2025-06-30T09:20:53.090279Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.090285Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.9 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:53.090334Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:762:2291] 2025-06-30T09:20:53.090337Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.090341Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.9 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:53.090352Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:764:2293] 2025-06-30T09:20:53.090354Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.090358Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.9 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:53.090491Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:791:2315], Recipient [1:750:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.090514Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:750:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:53.090519Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.090523Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.9 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:53.090570Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:793:2317], Recipient [1:750:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.090581Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:750:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:53.090584Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.090587Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.9 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:53.090637Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:795:2319], Recipient [1:750:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.090646Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:750:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:53.090649Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.090652Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.9 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:53.090694Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:797:2321], Recipient [1:750:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.090710Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:750:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 8 } 2025-06-30T09:20:53.090713Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:53.090716Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #6.9 1970-01-01T05:00:00.023000Z - 1970-01-01T06:00:00.023000Z - 1970-01-01T07:00:00.023000Z 2025-06-30T09:20:53.090780Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:799:2323], Recipient [1:750:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.090805Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:633:2213], Recipient [1:750:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 8 SeqNo: 2 } 2025-06-30T09:20:53.090810Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:53.090816Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:633:2213], seqNo: 2, version: 8, server pipe id: [1:799:2323] 2025-06-30T09:20:53.090821Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v8 -> v9 to [1:633:2213] 2025-06-30T09:20:53.090872Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:799:2323], Recipient [1:750:2281]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:53.090876Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:633:2213], seqNo: 2, server pipe id: [1:799:2323] 2025-06-30T09:20:53.090894Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:801:2325], Recipient [1:750:2281]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:53.090909Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:633:2213], Recipient [1:750:2281]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:53.090913Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:53.090938Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 25200023000 Name: "slot-0" } } >> TxUsage::Sinks_Oltp_WriteToTopic_4_Query >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedClient [GOOD] >> TNodeBrokerTest::NodesMigrationExtendLease >> TPartBtreeIndexIteration::FewNodes_Sticky [GOOD] >> TPartBtreeIndexIteration::FewNodes_Slices >> TConsoleConfigSubscriptionTests::TestNotificationForTimeoutedNotificationResponse >> IncorrectQueries::InvalidPartID [GOOD] >> IncorrectQueries::Incompatible >> IncorrectQueries::Incompatible [GOOD] >> IncorrectQueries::Proto >> TConsoleTests::TestCreateServerlessTenant [GOOD] >> TConsoleTests::TestCreateServerlessTenantWrongSharedDb |80.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigration999Nodes [GOOD] Test command err: 2025-06-30T09:20:52.918106Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.921508Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.921562Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.921588Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.921625Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.921650Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.921668Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.925392Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.925451Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.925486Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.925519Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.925557Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.925582Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.925603Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.925668Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.925702Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.925724Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.926646Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.926679Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.926700Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.926720Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.926755Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.926808Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.927804Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.927916Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.927965Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.928000Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.928034Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.928050Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.928066Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.928080Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.928602Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.928682Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.928705Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.928726Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.928743Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.928774Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.928804Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:52.928910Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.928935Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.929296Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.929539Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.929573Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.929639Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.929660Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.929715Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.933116Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.933199Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.933745Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.933785Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.933796Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.933806Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.934196Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.934475Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.934621Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.934678Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.934848Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.934953Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.936332Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.936583Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.937606Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.937893Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.938678Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.939028Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.951485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:52.951520Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:52.956317Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:52.956818Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:52.956906Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:52.957072Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:52.957715Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:52.957866Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:52.957906Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:52.957917Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:52.957921Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:52.957932Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:52.957943Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:52.957947Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:52.957950Z node 1 :NODE_BROKER DEB ... -30T09:20:54.620743Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1565.v504 to update nodes log 2025-06-30T09:20:54.620747Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1564.v504 to update nodes log 2025-06-30T09:20:54.620751Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1563.v504 to update nodes log 2025-06-30T09:20:54.620754Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1562.v504 to update nodes log 2025-06-30T09:20:54.620758Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1561.v504 to update nodes log 2025-06-30T09:20:54.620761Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1560.v504 to update nodes log 2025-06-30T09:20:54.620765Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1559.v504 to update nodes log 2025-06-30T09:20:54.620769Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1558.v504 to update nodes log 2025-06-30T09:20:54.620774Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1557.v504 to update nodes log 2025-06-30T09:20:54.620777Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1556.v504 to update nodes log 2025-06-30T09:20:54.620781Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1555.v504 to update nodes log 2025-06-30T09:20:54.620784Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1554.v504 to update nodes log 2025-06-30T09:20:54.620788Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1553.v504 to update nodes log 2025-06-30T09:20:54.620791Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1552.v504 to update nodes log 2025-06-30T09:20:54.620795Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1551.v504 to update nodes log 2025-06-30T09:20:54.620798Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1550.v504 to update nodes log 2025-06-30T09:20:54.620802Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1549.v504 to update nodes log 2025-06-30T09:20:54.620805Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1548.v504 to update nodes log 2025-06-30T09:20:54.620809Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1547.v504 to update nodes log 2025-06-30T09:20:54.620812Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1546.v504 to update nodes log 2025-06-30T09:20:54.620816Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1545.v504 to update nodes log 2025-06-30T09:20:54.620819Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1544.v504 to update nodes log 2025-06-30T09:20:54.620822Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1543.v504 to update nodes log 2025-06-30T09:20:54.620826Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1542.v504 to update nodes log 2025-06-30T09:20:54.620830Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1541.v504 to update nodes log 2025-06-30T09:20:54.620833Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1540.v504 to update nodes log 2025-06-30T09:20:54.620837Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1536.v504 to update nodes log 2025-06-30T09:20:54.620841Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1787.v504 to update nodes log 2025-06-30T09:20:54.621098Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2776:3796], Recipient [1:2766:3790]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:54.621116Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2777:3797], Recipient [1:2766:3790]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:54.621222Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2776:3796] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:54.621239Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2777:3797] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:54.621248Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2779:3799] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:54.621266Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2779:3799], Recipient [1:2766:3790]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:54.621287Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2780:3800] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:54.621295Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2780:3800], Recipient [1:2766:3790]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:54.621301Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2782:3802] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:54.621310Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2783:3803] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:54.621337Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2782:3802], Recipient [1:2766:3790]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:54.621361Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2783:3803], Recipient [1:2766:3790]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:54.621426Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:2780:3800] 2025-06-30T09:20:54.621432Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:54.621443Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:54.621463Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [3:83:2072], Recipient [1:2776:3796] 2025-06-30T09:20:54.621465Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:54.621470Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:54.621478Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [4:112:2072], Recipient [1:2777:3797] 2025-06-30T09:20:54.621481Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:54.621484Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:54.624488Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:2779:3799] 2025-06-30T09:20:54.624502Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:54.624515Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:54.626065Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:2782:3802] 2025-06-30T09:20:54.626077Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:54.626087Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:54.627413Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:2783:3803] 2025-06-30T09:20:54.627421Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:54.627427Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:54.630572Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2825:3840], Recipient [1:2766:3790]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:54.630628Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2215], Recipient [1:2766:3790]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:54.630634Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:54.630643Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:54.632125Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2827:3842], Recipient [1:2766:3790]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:54.632165Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2215], Recipient [1:2766:3790]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:54.632169Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:54.632176Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:54.635016Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2829:3844], Recipient [1:2766:3790]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:54.635075Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2215], Recipient [1:2766:3790]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:54.635083Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:54.635096Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z 2025-06-30T09:20:54.636295Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2831:3846], Recipient [1:2766:3790]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:54.636333Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2215], Recipient [1:2766:3790]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 503 } 2025-06-30T09:20:54.636337Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:54.636343Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.025000Z - 1970-01-01T03:00:00.025000Z - 1970-01-01T04:00:00.025000Z |80.9%| [LD] {RESULT} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeIndexWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:18:39.173705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:18:39.173739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:39.173746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:18:39.173753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:18:39.173761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:18:39.173766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:18:39.173778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:18:39.173798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:18:39.173935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:18:39.174041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:18:39.192925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:18:39.192959Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:39.193090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:18:39.197403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:18:39.198814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:18:39.198883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:18:39.202580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:18:39.202660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:18:39.202836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:39.202935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:18:39.204692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:39.204763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:18:39.205106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:18:39.205118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:18:39.205129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:18:39.205138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:18:39.205145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:18:39.205181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:18:39.207675Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:18:39.226515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:18:39.226617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:39.226695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:18:39.226704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:18:39.226781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:18:39.226800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:18:39.227694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:39.227753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:18:39.227826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:39.227839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:18:39.227845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:18:39.227850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:18:39.228289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:39.228303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:18:39.228310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:18:39.228658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:39.228668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:18:39.228674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:18:39.228681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:18:39.229379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:18:39.229743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:18:39.229786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:18:39.230019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:18:39.230049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... SizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:53.992631Z node 104 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:20:53.992753Z node 104 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 151us result status StatusSuccess 2025-06-30T09:20:53.992964Z node 104 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-false [GOOD] Test command err: 2025-06-30T09:20:55.304813Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.307546Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.308101Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.308338Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.321806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:55.321846Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:55.325299Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:55.325660Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:55.325716Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:55.325883Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:55.326492Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:55.326519Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:55.326551Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:55.326562Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:55.326565Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:55.326577Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:55.326591Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:55.326596Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:55.326600Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:55.326604Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:55.326618Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:55.326623Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:55.347667Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:55.347698Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.025000Z 2025-06-30T09:20:55.347704Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z, approximate epoch start #1.1 nodes=0 expired=0 2025-06-30T09:20:55.347713Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z nodes=0 expired=0 removed=0 2025-06-30T09:20:55.388435Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:204:2199], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.388467Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:204:2199] Leader: 1 Dead: 0 Generation: 2 VersionInfo:  } ... waiting for nameservers are connected (done) 2025-06-30T09:20:55.389609Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:18:2065], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 0 SeqNo: 0 } 2025-06-30T09:20:55.389621Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:55.389628Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:18:2065], seqNo: 0, version: 0, server pipe id: [1:204:2199] 2025-06-30T09:20:55.389635Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v0 -> v1 to [1:18:2065] ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 2025-06-30T09:20:55.389687Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:208:2203], Recipient [1:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.389714Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:206:2201], Recipient [1:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:55.389718Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:55.389725Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:55.389767Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:55.389781Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:16:2063], path# /dc-1, domainOwnerId# 72057594046678944 2025-06-30T09:20:55.394045Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 } 2025-06-30T09:20:55.394120Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }, by path# { Subscriber: { Subscriber: [1:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:20:55.394165Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:16:2063], cacheItem# { Subscriber: { Subscriber: [1:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:55.394238Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:217:2205], recipient# [1:209:2178], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 7205759404 ... Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:55.825257Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:217:2205], recipient# [2:209:2178], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:55.825267Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:55.825276Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:55.825285Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [2:209:2178], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:55.825288Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:55.825308Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:55.825312Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host1:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:55.825335Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v2 host1:1001 to database state=Active resolvehost=host1.host1.host1 address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:55.825375Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:55.825379Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 1 to 2 2025-06-30T09:20:55.825382Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=2 2025-06-30T09:20:55.836225Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:55.836245Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:55.836253Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 1 to 2 2025-06-30T09:20:55.836260Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v2 host1:1001 to epoch cache 2025-06-30T09:20:55.836283Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v2 to update nodes log 2025-06-30T09:20:55.836321Z node 2 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200024000 Name: "slot-0" } 2025-06-30T09:20:55.836405Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [2:221:2209], Recipient [2:173:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.836429Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [2:206:2201], Recipient [2:173:2178]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:55.836433Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:55.836440Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:55.836474Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:55.836496Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:16:2063], cacheItem# { Subscriber: { Subscriber: [2:210:2204] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:55.836536Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:223:2210], recipient# [2:222:2178], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:55.836568Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:55.836579Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:55.836589Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [2:222:2178], Recipient [2:173:2178]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:55.836592Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:55.836600Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:55.836603Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host2:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:55.836624Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v3 host2:1001 to database state=Active resolvehost=host2.host2.host2 address=1.2.3.5 dc=1 location=DC=1/M=2/R=3/U=5/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:55.836663Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:55.836666Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 2 to 3 2025-06-30T09:20:55.836669Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=3 2025-06-30T09:20:55.847495Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:55.847517Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:55.847526Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 2 to 3 2025-06-30T09:20:55.847532Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v3 host2:1001 to epoch cache 2025-06-30T09:20:55.847559Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v3 to update nodes log 2025-06-30T09:20:55.847604Z node 2 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 7200024000 Name: "slot-1" } ... waiting for cache miss 2025-06-30T09:20:55.847668Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1024 Deadline: 2.107024s } 2025-06-30T09:20:55.847687Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:649: New cache miss: nodeId# 1024, deadline# 2.107024s 2025-06-30T09:20:55.847692Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:653: Schedule wakeup for new earliest deadline 2.107024s 2025-06-30T09:20:55.847701Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:548: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1025 Deadline: 1.107024s } 2025-06-30T09:20:55.847707Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:649: New cache miss: nodeId# 1025, deadline# 1.107024s 2025-06-30T09:20:55.847711Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:653: Schedule wakeup for new earliest deadline 1.107024s ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) 2025-06-30T09:20:55.919134Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:896: HandleWakeup at 1.108024s 2025-06-30T09:20:55.919165Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:134: Cache miss failed: nodeId=1025, error=Deadline exceeded 2025-06-30T09:20:55.959890Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:896: HandleWakeup at 2.108024s 2025-06-30T09:20:55.959919Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:134: Cache miss failed: nodeId=1024, error=Deadline exceeded >> TNodeBrokerTest::NodesSubscriberDisconnect >> TEnumerationTest::TestPublish [GOOD] >> TLocalTests::TestAddTenant >> TNodeBrokerTest::ConfigPipelining [GOOD] |80.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |80.9%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker >> THealthCheckTest::OrangeGroupIssueWhenDegradedGroupStatus >> TLocalTests::TestAddTenant [GOOD] >> TNodeBrokerTest::FixedNodeId [GOOD] >> THealthCheckTest::Basic ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ConfigPipelining [GOOD] Test command err: 2025-06-30T09:20:51.113134Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.116623Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.116678Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.116727Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.116770Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.116806Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.116832Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.120732Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.120825Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.120874Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.120924Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.120982Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.121027Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.121063Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.121159Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.121212Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.121240Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.122235Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.122276Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.122298Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.122318Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.122337Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.122394Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.123444Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.123562Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.123616Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.123655Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.123696Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.123715Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.123732Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.123747Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.124342Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.124430Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.124459Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.124479Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.124497Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.124532Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.124562Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:51.124680Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.124708Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.125034Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.125202Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.125224Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.125277Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.125297Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.125348Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.128561Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.129099Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.129119Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.129148Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.129191Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.129291Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.129587Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.129965Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.130106Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.130185Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.130409Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.130507Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.132627Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.132766Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.132792Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.133487Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.133609Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.133654Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.133687Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.134391Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.134478Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.134655Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:51.147369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:51.147389Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:51.151459Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:51.151898Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:51.151963Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:51.152145Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:51.152613Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:51.152644Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:51.152680Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:51.152693Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Star ... Mode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:55.622337Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:55.622348Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:55.622359Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [9:644:2185], Recipient [9:565:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:55.622363Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:55.622371Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:55.622373Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host1:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:55.622382Z node 9 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host1:1001: ERROR_TEMP: No free node IDs 2025-06-30T09:20:55.622444Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [9:655:2230], Recipient [9:565:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.622463Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039946, Sender [9:633:2215], Recipient [9:565:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSetConfigRequest { Config { BannedNodeIds { From: 1024 To: 1024 } BannedNodeIds { From: 1026 To: 1027 } } } 2025-06-30T09:20:55.622467Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:260: StateWork, processing event TEvNodeBroker::TEvSetConfigRequest 2025-06-30T09:20:55.622472Z node 9 :NODE_BROKER DEBUG: node_broker__update_config.cpp:53: TTxUpdateConfig Execute Config { BannedNodeIds { From: 1024 To: 1024 } BannedNodeIds { From: 1026 To: 1027 } } 2025-06-30T09:20:55.622481Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:1301: [DB] Update config in database config=BannedNodeIds { From: 1024 To: 1024 } BannedNodeIds { From: 1026 To: 1027 } 2025-06-30T09:20:55.622512Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [9:656:2231], Recipient [9:565:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.622528Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [9:633:2215], Recipient [9:565:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:55.622532Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:55.622541Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:55.622563Z node 9 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [9:23:2070], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:55.622573Z node 9 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [9:23:2070], cacheItem# { Subscriber: { Subscriber: [9:645:2226] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:55.622604Z node 9 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [9:658:2232], recipient# [9:657:2185], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:55.622613Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:55.622620Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:55.622627Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [9:657:2185], Recipient [9:565:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:55.622630Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:55.622634Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:55.622636Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host1:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:55.622655Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v2 host1:1001 to database state=Active resolvehost=host1.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:55.622682Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v2 host1:1001 2025-06-30T09:20:55.622687Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 1 to 2 2025-06-30T09:20:55.622691Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=2 ... waiting for commit ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... waiting for commit (done) ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR 2025-06-30T09:20:55.633507Z node 9 :NODE_BROKER DEBUG: node_broker__update_config.cpp:85: TTxUpdateConfig Complete 2025-06-30T09:20:55.633540Z node 9 :NODE_BROKER TRACE: node_broker__update_config.cpp:92: TTxUpdateConfig reply with: NKikimr::NNodeBroker::TEvNodeBroker::TEvSetConfigResponse { Status { Code: OK } } 2025-06-30T09:20:55.633549Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:55.633557Z node 9 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: ERROR_TEMP Reason: "No free node IDs" } 2025-06-30T09:20:55.633565Z node 9 :NODE_BROKER DEBUG: node_broker__update_config.cpp:85: TTxUpdateConfig Complete 2025-06-30T09:20:55.633570Z node 9 :NODE_BROKER TRACE: node_broker__update_config.cpp:92: TTxUpdateConfig reply with: NKikimr::NNodeBroker::TEvNodeBroker::TEvSetConfigResponse { Status { Code: OK } } 2025-06-30T09:20:55.633574Z node 9 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:55.633579Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v2 host1:1001 2025-06-30T09:20:55.633587Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 1 to 2 2025-06-30T09:20:55.633591Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v2 host1:1001 to epoch cache 2025-06-30T09:20:55.633605Z node 9 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v2 to update nodes log 2025-06-30T09:20:55.633622Z node 9 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200025000 Name: "slot-0" } 2025-06-30T09:20:55.633713Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [9:662:2236], Recipient [9:565:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.633732Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [9:633:2215], Recipient [9:565:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:55.633736Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:55.633743Z node 9 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.2 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:55.633788Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [9:664:2238], Recipient [9:565:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.633801Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [9:633:2215], Recipient [9:565:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1025 } 2025-06-30T09:20:55.633804Z node 9 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:55.633815Z node 9 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1025 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200025000 Name: "slot-0" } } >> TNodeBrokerTest::RegistrationPipeliningNodeName [GOOD] >> IncorrectQueries::Proto [GOOD] >> IncorrectQueries::BaseReadingTest >> IncorrectQueries::BaseReadingTest [GOOD] >> IncorrectQueries::EmptyGetTest [GOOD] >> IncorrectQueries::ProtoBlobGet >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPendingExtSubdomain [GOOD] |80.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore >> THealthCheckTest::RedGroupIssueWhenDisintegratedGroupStatus >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Table [GOOD] >> THealthCheckTest::Basic [GOOD] >> THealthCheckTest::Issues100GroupsListing >> IncorrectQueries::ProtoBlobGet [GOOD] >> THealthCheckTest::BasicNodeCheckRequest >> IncorrectQueries::BasePutTest [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunning >> IncorrectQueries::MultiPutBaseTest >> THealthCheckTest::StorageLimit95 >> THealthCheckTest::OneIssueListing >> THealthCheckTest::StaticGroupIssue |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |81.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore >> IncorrectQueries::MultiPutBaseTest [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDropIndex [GOOD] >> TSchemeshardBackgroundCleaningTest::TempInTemp >> THealthCheckTest::YellowGroupIssueWhenPartialGroupStatus >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query >> THealthCheckTest::BasicNodeCheckRequest [GOOD] >> TConsoleTests::TestListTenants [GOOD] >> IncorrectQueries::MultiPutCrcTest >> TConsoleTests::TestListTenantsExtSubdomain >> TNodeBrokerTest::NodeNameReuseRestartWithHostChanges [GOOD] >> TConsoleTests::TestSetConfig [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForTimeoutedNotificationResponse [GOOD] >> TNodeBrokerTest::NodesMigration1000Nodes [GOOD] >> TNodeBrokerTest::NodesMigrationExtendLease [GOOD] >> TNodeBrokerTest::NodesV2BackMigration [GOOD] >> TPartBtreeIndexIteration::FewNodes_Slices [GOOD] >> THealthCheckTest::OrangeGroupIssueWhenDegradedGroupStatus [GOOD] >> THealthCheckTest::Issues100Groups100VCardListing >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query [GOOD] >> THealthCheckTest::RedGroupIssueWhenDisintegratedGroupStatus [GOOD] >> TNodeBrokerTest::NodesSubscriberDisconnect [GOOD] >> THealthCheckTest::OneIssueListing [GOOD] >> THealthCheckTest::StaticGroupIssue [GOOD] >> TNodeBrokerTest::ExtendLeaseRestartRace [GOOD] >> THealthCheckTest::StorageLimit95 [GOOD] >> THealthCheckTest::BlueGroupIssueWhenPartialGroupStatusAndReplicationDisks >> TConsoleTests::TestCreateServerlessTenantWrongSharedDb [GOOD] >> THealthCheckTest::Issues100GroupsListing [GOOD] >> IncorrectQueries::MultiPutCrcTest [GOOD] >> LocalPartition::DirectWriteWithoutDescribeResourcesPermission [GOOD] >> TNodeBrokerTest::TestListNodes [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsAndTags [GOOD] >> THealthCheckTest::YellowGroupIssueWhenPartialGroupStatus [GOOD] >> TConsoleTests::TestCreateTenantWrongName >> TConsoleTests::TestListTenantsExtSubdomain [GOOD] >> THealthCheckTest::TestTabletIsDead >> TConsoleTests::TestTenantGeneration >> THealthCheckTest::BlueGroupIssueWhenPartialGroupStatusAndReplicationDisks [GOOD] >> THealthCheckTest::OnlyDiskIssueOnSpaceIssues >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedServer >> THealthCheckTest::OnlyDiskIssueOnFaultyPDisks >> THealthCheckTest::StorageLimit50 >> THealthCheckTest::StorageLimit87 >> TPartBtreeIndexIteration::FewNodes_Groups_Slices >> THealthCheckTest::ProtobufUnderLimitFor70LargeVdisksIssues >> THealthCheckTest::Issues100VCardListing >> THealthCheckTest::Issues100VCardListing [GOOD] >> THealthCheckTest::Issues100GroupsMerging >> IncorrectQueries::MultiPutWithoutBlobs [GOOD] >> IncorrectQueries::EmptyProtoMultiPut >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsOnly >> THealthCheckTest::TestTabletIsDead [GOOD] >> TConsoleTests::TestModifyUsedZoneKind >> THealthCheckTest::TestReBootingTabletIsDead >> THealthCheckTest::Issues100Groups100VCardListing [GOOD] >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster >> THealthCheckTest::GreenStatusWhenCreatingGroup >> THealthCheckTest::OnlyDiskIssueOnFaultyPDisks [GOOD] >> THealthCheckTest::OnlyDiskIssueOnSpaceIssues [GOOD] >> THealthCheckTest::StorageLimit50 [GOOD] >> THealthCheckTest::SpecificServerless >> THealthCheckTest::StorageLimit87 [GOOD] >> THealthCheckTest::ProtobufUnderLimitFor70LargeVdisksIssues [GOOD] >> IncorrectQueries::EmptyProtoMultiPut [GOOD] >> THealthCheckTest::Issues100GroupsMerging [GOOD] >> THealthCheckTest::Issues100VCardMerging >> TPartBtreeIndexIteration::FewNodes_Groups_Slices [GOOD] >> THealthCheckTest::Issues100Groups100VCardMerging >> THealthCheckTest::GreenStatusWhenCreatingGroup [GOOD] >> THealthCheckTest::DontIgnoreServerlessWithExclusiveNodesWhenNotSpecific >> THealthCheckTest::Issues100Groups100VCardMerging [GOOD] >> THealthCheckTest::OnlyDiskIssueOnInitialPDisks >> THealthCheckTest::SpecificServerless [GOOD] >> THealthCheckTest::SpecificServerlessWithExclusiveNodes >> THealthCheckTest::StorageLimit80 >> THealthCheckTest::ServerlessBadTablets >> IncorrectQueries::ManyQueriesThroughOneBSQueue [GOOD] >> THealthCheckTest::NoStoragePools >> TPartBtreeIndexIteration::FewNodes_History_Slices ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesSubscriberDisconnect [GOOD] Test command err: 2025-06-30T09:20:57.014803Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.019032Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.019085Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.019119Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.019162Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.019200Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.019224Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.022798Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.022883Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.022931Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.022983Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.023045Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.023075Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.023105Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.023179Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.023217Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.023237Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.024155Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.024190Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.024214Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.024235Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.024253Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.024328Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.025280Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.025382Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.025425Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.025462Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.025500Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.025517Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.025534Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.025549Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.026163Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.026270Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.026301Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.026325Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.026345Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.026382Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.026413Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:57.026426Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.026453Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.026668Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.027379Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.027420Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.027537Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.030873Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.031491Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.031509Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.031600Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.032218Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.032304Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.032343Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.032389Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.032487Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.032504Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.032634Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.032700Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.034531Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.034674Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.034968Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.035071Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:57.047696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:57.047717Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:57.051781Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:57.052325Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:57.052408Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:57.052614Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:57.053708Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:57.053747Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:57.053785Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:57.053797Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.024000Z - 1970-01-01T01:00:00.024000Z - 1970-01-01T02:00:00.024000Z 2025-06-30T09:20:57.053801Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:57.053813Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:57.053855Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:57.053859Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:57.053862Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:57.053866Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.024000Z - 1970-01-01T01:00:00.024000Z - 1970-01-01T02:00:00.024000Z 2025-06-30T09:20:57.053879Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:57.053884Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:57.085999Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:57.086046Z node 1 :NODE_BROKER TRACE: node_brok ... one> 2025-06-30T09:20:57.334034Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1026.v4 host3:1001 2025-06-30T09:20:57.334043Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 3 to 4 2025-06-30T09:20:57.334049Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=4 2025-06-30T09:20:57.345013Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:57.345038Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1026.v4 host3:1001 2025-06-30T09:20:57.345049Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 3 to 4 2025-06-30T09:20:57.345056Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1026.v4 host3:1001 to epoch cache 2025-06-30T09:20:57.345078Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1026.v4 to update nodes log 2025-06-30T09:20:57.345124Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1026 Host: "host3" Port: 1001 ResolveHost: "host3.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200024000 Name: "slot-2" } 2025-06-30T09:20:57.355367Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:564:2184]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:57.355393Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:57.878105Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:704:2248], Recipient [1:564:2184]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.878180Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:632:2214], Recipient [1:564:2184]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 3 SeqNo: 2 } 2025-06-30T09:20:57.878189Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:57.878198Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:632:2214], seqNo: 2, version: 3, server pipe id: [1:704:2248] 2025-06-30T09:20:57.878209Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v3 -> v4 to [1:632:2214] 2025-06-30T09:20:57.878221Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:705:2249], Recipient [1:564:2184]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.878235Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:632:2214], Recipient [1:564:2184]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:57.878240Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.878251Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.4 1970-01-01T00:00:00.024000Z - 1970-01-01T01:00:00.024000Z - 1970-01-01T02:00:00.024000Z 2025-06-30T09:20:57.878359Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:707:2251], Recipient [1:564:2184]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.878396Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:632:2214], Recipient [1:564:2184]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host4" Port: 1001 ResolveHost: "host4.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:57.878403Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:57.878414Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host4" Port: 1001 ResolveHost: "host4.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:57.878462Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:23:2070], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:57.878488Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:23:2070], cacheItem# { Subscriber: { Subscriber: [1:636:2217] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:57.878542Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:709:2252], recipient# [1:708:2184], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:57.878565Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:57.878578Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host4" Port: 1001 ResolveHost: "host4.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:57.878593Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [1:708:2184], Recipient [1:564:2184]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:57.878598Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:57.878619Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:57.878624Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host4:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:57.878653Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1027.v5 host4:1001 to database state=Active resolvehost=host4.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=3 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:57.878708Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1027.v5 host4:1001 2025-06-30T09:20:57.878716Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 4 to 5 2025-06-30T09:20:57.878720Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=5 2025-06-30T09:20:57.889634Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:57.889659Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1027.v5 host4:1001 2025-06-30T09:20:57.889666Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 4 to 5 2025-06-30T09:20:57.889671Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1027.v5 host4:1001 to epoch cache 2025-06-30T09:20:57.889690Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1027.v5 to update nodes log 2025-06-30T09:20:57.889744Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1027 Host: "host4" Port: 1001 ResolveHost: "host4.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200024000 Name: "slot-3" } 2025-06-30T09:20:57.889863Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:713:2256], Recipient [1:564:2184]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.889877Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:632:2214], Recipient [1:564:2184]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:57.889881Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.889889Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.5 1970-01-01T00:00:00.024000Z - 1970-01-01T01:00:00.024000Z - 1970-01-01T02:00:00.024000Z 2025-06-30T09:20:57.900092Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:564:2184]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:57.900122Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:20:57.900135Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v4 -> v5 to [1:632:2214] 2025-06-30T09:20:57.900304Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:715:2258], Recipient [1:564:2184]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.900341Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:632:2214], Recipient [1:564:2184]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:57.900350Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.900362Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.5 1970-01-01T00:00:00.024000Z - 1970-01-01T01:00:00.024000Z - 1970-01-01T02:00:00.024000Z 2025-06-30T09:20:57.900436Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039952, Sender [1:632:2214], Recipient [1:564:2184]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest { SeqNo: 2 } 2025-06-30T09:20:57.900442Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:262: StateWork, processing event TEvNodeBroker::TEvSyncNodesRequest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigration1000Nodes [GOOD] Test command err: 2025-06-30T09:20:55.507552Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.510940Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.510981Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.511008Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.511042Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.511071Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.511093Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.514792Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.514853Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.514897Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.514932Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.514971Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.515005Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.515030Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.515095Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.515131Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.515151Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.516041Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.516066Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.516085Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.516103Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.516118Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.516164Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.517154Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.517256Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.517304Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.517338Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.517373Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.517389Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.517405Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.517420Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.517996Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518079Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518104Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518125Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518143Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518177Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518203Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.518242Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518271Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518868Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518903Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518956Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518976Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518987Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.518998Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.519095Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.521887Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.521964Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.521979Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.522009Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.522027Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.522577Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.522834Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.523056Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.523297Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.523405Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.523507Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.523616Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.523690Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.524295Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.524343Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.524530Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.524553Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.524713Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.524831Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.537672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:55.537693Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:55.541187Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:55.541529Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:55.541593Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:55.541740Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:55.542101Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:55.542124Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:55.542151Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:55.542163Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:55.542167Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:55.542178Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Comp ... dd node #1582.v504 to update nodes log 2025-06-30T09:20:57.192739Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1581.v504 to update nodes log 2025-06-30T09:20:57.192746Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1580.v504 to update nodes log 2025-06-30T09:20:57.192752Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1579.v504 to update nodes log 2025-06-30T09:20:57.192760Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1578.v504 to update nodes log 2025-06-30T09:20:57.192767Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1577.v504 to update nodes log 2025-06-30T09:20:57.192773Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1576.v504 to update nodes log 2025-06-30T09:20:57.192781Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1575.v504 to update nodes log 2025-06-30T09:20:57.192788Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1574.v504 to update nodes log 2025-06-30T09:20:57.192795Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1573.v504 to update nodes log 2025-06-30T09:20:57.192801Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1572.v504 to update nodes log 2025-06-30T09:20:57.192809Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1571.v504 to update nodes log 2025-06-30T09:20:57.192816Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1570.v504 to update nodes log 2025-06-30T09:20:57.192822Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1569.v504 to update nodes log 2025-06-30T09:20:57.192830Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1568.v504 to update nodes log 2025-06-30T09:20:57.192836Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1567.v504 to update nodes log 2025-06-30T09:20:57.192843Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1566.v504 to update nodes log 2025-06-30T09:20:57.192850Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1565.v504 to update nodes log 2025-06-30T09:20:57.192857Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1564.v504 to update nodes log 2025-06-30T09:20:57.192865Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1563.v504 to update nodes log 2025-06-30T09:20:57.192872Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1562.v504 to update nodes log 2025-06-30T09:20:57.192878Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1561.v504 to update nodes log 2025-06-30T09:20:57.192885Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1560.v504 to update nodes log 2025-06-30T09:20:57.192892Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1559.v504 to update nodes log 2025-06-30T09:20:57.192899Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1558.v504 to update nodes log 2025-06-30T09:20:57.192905Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1557.v504 to update nodes log 2025-06-30T09:20:57.192912Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1556.v504 to update nodes log 2025-06-30T09:20:57.192918Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1555.v504 to update nodes log 2025-06-30T09:20:57.192925Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1554.v504 to update nodes log 2025-06-30T09:20:57.192932Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1553.v504 to update nodes log 2025-06-30T09:20:57.192938Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1552.v504 to update nodes log 2025-06-30T09:20:57.192945Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1551.v504 to update nodes log 2025-06-30T09:20:57.192952Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1550.v504 to update nodes log 2025-06-30T09:20:57.192959Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1549.v504 to update nodes log 2025-06-30T09:20:57.192966Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1548.v504 to update nodes log 2025-06-30T09:20:57.192974Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1547.v504 to update nodes log 2025-06-30T09:20:57.192981Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1546.v504 to update nodes log 2025-06-30T09:20:57.192987Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1545.v504 to update nodes log 2025-06-30T09:20:57.192994Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1544.v504 to update nodes log 2025-06-30T09:20:57.193000Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1543.v504 to update nodes log 2025-06-30T09:20:57.193007Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1542.v504 to update nodes log 2025-06-30T09:20:57.193014Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1541.v504 to update nodes log 2025-06-30T09:20:57.193021Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1540.v504 to update nodes log 2025-06-30T09:20:57.193028Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1537.v504 to update nodes log 2025-06-30T09:20:57.193035Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1787.v504 to update nodes log 2025-06-30T09:20:57.193310Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2789:3805], Recipient [1:2781:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.193429Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2791:3807], Recipient [1:2781:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.193464Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2792:3808] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:57.193484Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2794:3810] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:57.193498Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2792:3808], Recipient [1:2781:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.193510Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2789:3805] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:57.193525Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:2791:3807] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:57.193549Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2794:3810], Recipient [1:2781:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.193658Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:2794:3810] 2025-06-30T09:20:57.193666Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.193680Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.193711Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:2789:3805] 2025-06-30T09:20:57.193716Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.193724Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.193740Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [2:54:2072], Recipient [1:2791:3807] 2025-06-30T09:20:57.193745Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.193752Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.196933Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:2792:3808] 2025-06-30T09:20:57.196960Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.196979Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.205704Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2846:3857], Recipient [1:2781:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.205764Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:2781:3800]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:57.205771Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.205784Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.208198Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2848:3859], Recipient [1:2781:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.208232Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:2781:3800]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:57.208238Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.208252Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.211609Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2850:3861], Recipient [1:2781:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.211645Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:2781:3800]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:57.211650Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.211658Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.212626Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:2852:3863], Recipient [1:2781:3800]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.212654Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:2781:3800]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 503 } 2025-06-30T09:20:57.212658Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.212662Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.504 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ExtendLeaseRestartRace [GOOD] Test command err: 2025-06-30T09:20:55.424430Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.428292Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.428369Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.428406Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.428454Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.428494Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.428523Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.434153Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.434240Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.434285Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.434323Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.434372Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.434401Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.434439Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.434518Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.434554Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.434582Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.436218Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.436293Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.436335Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.436372Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.436410Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.436515Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.437875Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.438010Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.438080Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.438126Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.438168Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.438190Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.438242Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.438263Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.438954Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.439060Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.439093Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.439121Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.439149Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.439194Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.439232Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.439276Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.439306Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.439652Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.440330Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.440410Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.440432Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.440449Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.440465Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.444421Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.444999Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.445072Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.445091Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.445162Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.445460Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.445844Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.445915Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.446073Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.446096Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.446232Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.446292Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.446382Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.446416Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.447288Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.447492Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.450788Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.450995Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.464500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:55.464523Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:55.468453Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:55.468865Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:55.468927Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:55.469108Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:55.469492Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:55.469515Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:55.469545Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:55.469555Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:55.469559Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:55.469569Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:55.469591Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:55.469596Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:55.469599Z node 1 :NODE_BROKER DEB ... rConnected 2025-06-30T09:20:57.525000Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039939, Sender [1:635:2213], Recipient [1:766:2272]: NKikimr::NNodeBroker::TEvNodeBroker::TEvExtendLeaseRequest { NodeId: 1024 } 2025-06-30T09:20:57.525006Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:257: StateWork, processing event TEvNodeBroker::TEvExtendLeaseRequest 2025-06-30T09:20:57.525017Z node 1 :NODE_BROKER DEBUG: node_broker__extend_lease.cpp:44: TTxExtendLease Execute node #1024 2025-06-30T09:20:57.525029Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:317: [Dirty] Extended lease of #1024.v4 host1:1001 up to Thu, 01 Jan 1970 03:00:00 UTC (lease 2) 2025-06-30T09:20:57.525042Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v4 host1:1001 to database state=Active resolvehost=host1.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=2 expire=Thu, 01 Jan 1970 03:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:57.525088Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 3 to 4 2025-06-30T09:20:57.525093Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=4 2025-06-30T09:20:57.525170Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:819:2316], Recipient [1:766:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.525189Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:820:2317], Recipient [1:766:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.525213Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:25:2072], Recipient [1:766:2272]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { MinEpoch: 3 } 2025-06-30T09:20:57.525218Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.525222Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #3 2025-06-30T09:20:57.525231Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:776:2280] 2025-06-30T09:20:57.525235Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.525240Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #3 2025-06-30T09:20:57.525247Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:778:2282] 2025-06-30T09:20:57.525251Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.525255Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #3 2025-06-30T09:20:57.525264Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [3:83:2072], Recipient [1:779:2283] 2025-06-30T09:20:57.525268Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.525272Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #3 2025-06-30T09:20:57.525279Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:819:2316] 2025-06-30T09:20:57.525283Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.525287Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #3 2025-06-30T09:20:57.525294Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:820:2317] 2025-06-30T09:20:57.525298Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.525302Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #3 2025-06-30T09:20:57.525309Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [2:54:2072], Recipient [1:775:2279] 2025-06-30T09:20:57.525313Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.525317Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #3 2025-06-30T09:20:57.525324Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [4:112:2072], Recipient [1:774:2278] 2025-06-30T09:20:57.525328Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.525332Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #3 2025-06-30T09:20:57.536259Z node 1 :NODE_BROKER DEBUG: node_broker__extend_lease.cpp:78: TTxExtendLease Complete 2025-06-30T09:20:57.536316Z node 1 :NODE_BROKER TRACE: node_broker__extend_lease.cpp:82: TTxExtendLease reply with: NKikimr::NNodeBroker::TEvNodeBroker::TEvExtendLeaseResponse { Status { Code: OK } NodeId: 1024 Expire: 10800023000 Epoch { Id: 2 Version: 4 Start: 3600023000 End: 7200023000 NextEnd: 10800023000 } } 2025-06-30T09:20:57.536336Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:317: [Committed] Extended lease of #1024.v4 host1:1001 up to Thu, 01 Jan 1970 03:00:00 UTC (lease 2) 2025-06-30T09:20:57.536346Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 3 to 4 2025-06-30T09:20:57.536351Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v4 host1:1001 to epoch cache 2025-06-30T09:20:57.536373Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v4 to update nodes log ... waiting for epoch update 2025-06-30T09:20:57.536506Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:842:2335], Recipient [1:766:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.536535Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:766:2272]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:57.536541Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.536553Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #2.4 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z 2025-06-30T09:20:57.768319Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435072, Sender [1:766:2272], Recipient [1:766:2272]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvUpdateEpoch 2025-06-30T09:20:57.768339Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:263: StateWork, processing event TEvPrivate::TEvUpdateEpoch 2025-06-30T09:20:57.768359Z node 1 :NODE_BROKER DEBUG: node_broker__update_epoch.cpp:20: TTxUpdateEpoch Execute 2025-06-30T09:20:57.768369Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:548: [Dirty] Move to new epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z, approximate epoch start #3.5 2025-06-30T09:20:57.768374Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.768398Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #3.5 2025-06-30T09:20:57.941761Z node 1 :NODE_BROKER DEBUG: node_broker__update_epoch.cpp:31: TTxUpdateEpoch Complete 2025-06-30T09:20:57.941791Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:548: [Committed] Move to new epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z, approximate epoch start #3.5 2025-06-30T09:20:57.941806Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T03:00:00.023000Z 2025-06-30T09:20:57.941825Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z, approximate epoch start #3.5 nodes=1 expired=0 2025-06-30T09:20:57.941852Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z nodes=1 expired=0 removed=0 2025-06-30T09:20:57.941871Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v4 to update nodes log 2025-06-30T09:20:57.941889Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.941898Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.941906Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.941914Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.941921Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.941929Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.941937Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.941945Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.962689Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:865:2346], Recipient [1:766:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.962730Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:766:2272]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:57.962751Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.962761Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-30T09:20:57.962814Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:867:2348], Recipient [1:766:2272]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:57.962826Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:635:2213], Recipient [1:766:2272]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:57.962829Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:57.962833Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.5 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z >> THealthCheckTest::GreenStatusWhenInitPending >> THealthCheckTest::OnlyDiskIssueOnInitialPDisks [GOOD] >> THealthCheckTest::ProtobufBelowLimitFor10VdisksIssues >> THealthCheckTest::StorageLimit80 [GOOD] >> THealthCheckTest::ServerlessBadTablets [GOOD] >> TConsoleTests::TestTenantGeneration [GOOD] >> TConsoleTests::TestTenantGenerationExtSubdomain >> THealthCheckTest::NoStoragePools [GOOD] >> TPartBtreeIndexIteration::FewNodes_History_Slices [GOOD] >> THealthCheckTest::DontIgnoreServerlessWithExclusiveNodesWhenNotSpecific [GOOD] >> THealthCheckTest::StorageNoQuota >> THealthCheckTest::ServerlessWhenTroublesWithSharedNodes >> THealthCheckTest::NoBscResponse >> THealthCheckTest::CLusterNotBootstrapped >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices >> THealthCheckTest::ServerlessWhenTroublesWithSharedNodes [GOOD] >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes >> THealthCheckTest::ProtobufBelowLimitFor10VdisksIssues [GOOD] >> THealthCheckTest::ProtobufUnderLimitFor100LargeVdisksIssues ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TLocalTests::TestAddTenant [GOOD] Test command err: 2025-06-30T09:20:57.084367Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:120:2153] Bootstrap 2025-06-30T09:20:57.104324Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:120:2153] Become StateWork (SchemeCache [1:126:2159]) 2025-06-30T09:20:57.111602Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:57.112931Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:57.112968Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:20:57.113366Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:57.113464Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:20:57.113554Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:57.113559Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:20:57.113580Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:20:57.115843Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:20:57.115878Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:20:57.115887Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:20:57.115898Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:57.115907Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:20:57.115940Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:57.137540Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:20:57.137584Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:57.148483Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:20:57.148539Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:57.148560Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:20:57.148574Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:57.148605Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:20:57.148615Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:57.148621Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:20:57.148630Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:57.159397Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:20:57.159439Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:57.170159Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:20:57.170205Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:20:57.170348Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:20:57.170352Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:20:57.171712Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:20:57.171725Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:20:57.171995Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-30T09:20:57.172199Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/0045ee/r3tmp/tmpfKco4I/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } } } } 2025-06-30T09:20:57.172257Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/cl3w/0045ee/r3tmp/tmpfKco4I/pdisk_1.dat 2025-06-30T09:20:57.172407Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-30T09:20:57.172435Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:57.172447Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-30T09:20:57.172479Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-30T09:20:57.172490Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-30T09:20:57.172856Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-30T09:20:57.172893Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-30T09:20:57.183711Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-30T09:20:57.183844Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:20:57.183930Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:20:57.183943Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:20:57.183972Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:20:57.183987Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:20:57.184066Z node 1 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:57.184073Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:57.184076Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:57.184100Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:338:2307] 2025-06-30T09:20:57.184419Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:20:57.184423Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:332:2303] 2025-06-30T09:20:57.184535Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:338:2307]} 2025-06-30T09:20:57.184543Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:57.184550Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:57.184552Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:57.198536Z node 1 :LOCAL DEBUG: local.cpp:1258: TDomainLocal(dc-1): TDomainLocal::TEvClientConnected for dc-1 shard 72057594046578944 2025-06-30T09:20:57.198558Z node 1 :LOCAL DEBUG: local.cpp:1117: TDomainLocal(dc-1): Send resolve request for /dc-1/users/tenant-1 to schemeshard 72057594046578944 2025-06-30T09:20:57.204518Z node 1 :LOCAL DEBUG: local.cpp:1285: TDomainLocal(dc-1): HandleResolve from schemeshard 72057594046578944: Status: StatusSuccess Path: "/dc-1/users/tenant-1" PathDescription { Self { Name: "/dc-1/users/tenant-1" PathId: 100 SchemeshardId: 72057594046578944 PathType: EPathTypeSubDomain } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 100 DomainKey { SchemeShard: 72057594046578944 PathId: 100 } } } 2025-06-30T09:20:57.204563Z node 1 :LOCAL DEBUG: local.cpp:1223: TDomainLocal(dc-1): Binding tenant /dc-1/users/tenant-1 to hive 72057594046578946 (allocated resources: CPU: 5 Memory: 1 Network: 1) 2025-06-30T09:20:57.204680Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:57.204685Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:57.204698Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:394:2343] 2025-06-30T09:20:57.205083Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:394:2343]} 2025-06-30T09:20:57.205134Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:57.205141Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:57.205143Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:57.206858Z node 1 :LOCAL DEBUG: local.cpp:1117: TDomainLocal(dc-1): Send resolve request for /dc-1/users/tenant-2 to schemeshard 72057594046578944 2025-06-30T09:20:57.206919Z node 1 :LOCAL DEBUG: local.cpp:1285: TDomainLocal(dc-1): HandleResolve from schemeshard 72057594046578944: Status: StatusSuccess Path: "/dc-1/users/tenant-2" PathDescription { Self { Name: "/dc-1/users/tenant-2" PathId: 101 SchemeshardId: 72057594046578944 PathType: EPathTypeSubDomain } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 101 DomainKey { SchemeShard: 72057594046578944 PathId: 101 } } } 2025-06-30T09:20:57.206931Z node 1 :LOCAL DEBUG: local.cpp:1223: TDomainLocal(dc-1): Binding tenant /dc-1/users/tenant-2 to hive 72057594046578946 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:20:57.206980Z node 1 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:20:57.206984Z node 1 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:20:57.207007Z node 1 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:427:2363] 2025-06-30T09:20:57.207123Z node 1 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:427:2363]} 2025-06-30T09:20:57.207135Z node 1 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:20:57.207140Z node 1 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:20:57.207142Z node 1 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:20:57.207197Z node 1 :LOCAL DEBUG: local.cpp:1117: TDomainLocal(dc-1): Send resolve request for /dc-1/users/tenant-unknown to schemeshard 72057594046578944 2025-06-30T09:20:57.207210Z node 1 :LOCAL DEBUG: local.cpp:1285: TDomainLocal(dc-1): HandleResolve from schemeshard 72057594046578944: Status: StatusPathDoesNotExist Path: "/dc-1/users/tenant-unknown" 2025-06-30T09:20:57.207214Z node 1 :LOCAL ERROR: local.cpp:1301: TDomainLocal(dc-1): Receive TEvDescribeSchemeResult with bad status StatusPathDoesNotExist reason is <> while resolving subdomain dc-1 2025-06-30T09:20:57.207224Z node 1 :LOCAL ERROR: local.cpp:1551: Unknown domain dc-3 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query [GOOD] Test command err: 2025-06-30T09:19:14.224953Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669514411767075:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:14.225118Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:19:14.227525Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a45/r3tmp/tmpKvG0b7/pdisk_1.dat 2025-06-30T09:19:14.266459Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669514411766867:2080] 1751275154152961 != 1751275154152964 2025-06-30T09:19:14.272793Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21798, node 1 2025-06-30T09:19:14.303080Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004a45/r3tmp/yandexnVzHvV.tmp 2025-06-30T09:19:14.303097Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004a45/r3tmp/yandexnVzHvV.tmp 2025-06-30T09:19:14.306892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004a45/r3tmp/yandexnVzHvV.tmp 2025-06-30T09:19:14.306983Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:14.310722Z INFO: TTestServer started on Port 6069 GrpcPort 21798 TClient is connected to server localhost:6069 2025-06-30T09:19:14.332543Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:14.332580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:14.335569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected PQClient connected to localhost:21798 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:14.399939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:14.407311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:14.420532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:19:14.428230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:19:14.534887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:14.544732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:19:14.840061Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669514411767633:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:14.840089Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669514411767652:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:14.840105Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:14.841165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:14.844044Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669514411767656:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:19:14.919192Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669514411767720:2435] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:14.934178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:14.964828Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669514411767728:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:19:14.965974Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=Mjg5ODRlMWUtYWRmNWU0NGMtMzkwZGM4NmEtNDFjNzM3MTY=, ActorId: [1:7521669514411767612:2293], ActorState: ExecuteState, TraceId: 01jz024bcmfm1nphtcxbn0y003, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:19:14.966628Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:19:14.967771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:15.024266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669518706735308:2610] 2025-06-30T09:19:15.151419Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:19.163177Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669514411767075:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:19.163212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:19:20.262225Z :WithoutPartitionWithSplit INFO: TTopicSdkTestSetup started 2025-06-30T09:19:20.267666Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:19:20.274277Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521669540181572004:2699] connected; active server actors: 1 2025-06-30T09:19:20.274340Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:19:20.275220Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:19:20.275295Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-30T09:19:20.275467Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 720751862240 ... ctor.cpp:125: session cookie 3 consumer test-consumer session test-consumer_11_3_9417756028265192986_v1 grpc read failed 2025-06-30T09:20:58.939704Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer test-consumer session test-consumer_11_3_9417756028265192986_v1 grpc closed 2025-06-30T09:20:58.939719Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer test-consumer session test-consumer_11_3_9417756028265192986_v1 is DEAD 2025-06-30T09:20:58.939813Z :DEBUG: [/Root] 0x00001013393DF290 TDirectReadSessionManager ServerSessionId=test-consumer_11_1_3310318403227595413_v1 Close 2025-06-30T09:20:58.939790Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [11:7521669951932504810:3174]: session cookie 4 consumer test-consumer session test-consumer_11_3_9417756028265192986_v1 grpc read done: success# 0, data# { } 2025-06-30T09:20:58.939800Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [11:7521669951932504810:3174]: session cookie 4 consumer test-consumer session test-consumer_11_3_9417756028265192986_v1grpc read failed 2025-06-30T09:20:58.939804Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [11:7521669951932504810:3174]: session cookie 4 consumer test-consumer session test-consumer_11_3_9417756028265192986_v1 grpc closed 2025-06-30T09:20:58.939807Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [11:7521669951932504810:3174]: session cookie 4 consumer test-consumer session test-consumer_11_3_9417756028265192986_v1 proxy is DEAD 2025-06-30T09:20:58.939836Z :DEBUG: [/Root] 0x00001013393DF290 TDirectReadSessionManager ServerSessionId=test-consumer_11_1_3310318403227595413_v1 Close 2025-06-30T09:20:58.939847Z :NOTICE: [/Root] [/Root] [6acba7fc-3ced4f90-4a069320-ce2d2eae] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:20:58.939843Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037896] Destroy direct read session test-consumer_11_3_9417756028265192986_v1 2025-06-30T09:20:58.939856Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521669951932504804:3171] destroyed 2025-06-30T09:20:58.939868Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_3_9417756028265192986_v1 2025-06-30T09:20:58.939870Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037897][topic_B] pipe [11:7521669951932504801:3168] disconnected; active server actors: 1 2025-06-30T09:20:58.939873Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037897][topic_B] pipe [11:7521669951932504801:3168] client test-consumer disconnected session test-consumer_11_3_9417756028265192986_v1 2025-06-30T09:20:58.939893Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_3310318403227595413_v1 grpc read done: success# 0, data# { } 2025-06-30T09:20:58.939895Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_11_1_3310318403227595413_v1 grpc read failed 2025-06-30T09:20:58.939900Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 1 consumer test-consumer session test-consumer_11_1_3310318403227595413_v1 closed 2025-06-30T09:20:58.939929Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_11_1_3310318403227595413_v1 is DEAD 2025-06-30T09:20:58.939977Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_11_1_3310318403227595413_v1 2025-06-30T09:20:58.939986Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669943342570111:3127] destroyed 2025-06-30T09:20:58.939994Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:398: Direct read cache: close session for proxy [11:7521669943342570117:3130] 2025-06-30T09:20:58.940002Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [11:7521669943342570108:3124] disconnected; active server actors: 1 2025-06-30T09:20:58.940021Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|b715c0d4-c677fdb5-f36a97b9-5c50c735_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:20:58.940031Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|b715c0d4-c677fdb5-f36a97b9-5c50c735_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:20:58.940005Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [11:7521669943342570108:3124] client test-consumer disconnected session test-consumer_11_1_3310318403227595413_v1 2025-06-30T09:20:58.940039Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|b715c0d4-c677fdb5-f36a97b9-5c50c735_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:20:58.940006Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_1_3310318403227595413_v1 2025-06-30T09:20:58.940120Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|b715c0d4-c677fdb5-f36a97b9-5c50c735_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:20:58.940104Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [11:7521669943342570117:3130]: session cookie 2 consumer test-consumer session test-consumer_11_1_3310318403227595413_v1 grpc read done: success# 0, data# { } 2025-06-30T09:20:58.940125Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|b715c0d4-c677fdb5-f36a97b9-5c50c735_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:20:58.940113Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [11:7521669943342570117:3130]: session cookie 2 consumer test-consumer session test-consumer_11_1_3310318403227595413_v1grpc read failed 2025-06-30T09:20:58.940117Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [11:7521669943342570117:3130]: session cookie 2 consumer test-consumer session test-consumer_11_1_3310318403227595413_v1 grpc closed 2025-06-30T09:20:58.940120Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [11:7521669943342570117:3130]: session cookie 2 consumer test-consumer session test-consumer_11_1_3310318403227595413_v1 proxy is DEAD 2025-06-30T09:20:58.940286Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|4df98fdf-981e0c5e-ce4636cd-29b64c29_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:20:58.940290Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|4df98fdf-981e0c5e-ce4636cd-29b64c29_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:20:58.940293Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|4df98fdf-981e0c5e-ce4636cd-29b64c29_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:20:58.940282Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message_group_id|b715c0d4-c677fdb5-f36a97b9-5c50c735_0 grpc read done: success: 0 data: 2025-06-30T09:20:58.940290Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message_group_id|b715c0d4-c677fdb5-f36a97b9-5c50c735_0 grpc read failed 2025-06-30T09:20:58.940295Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message_group_id|b715c0d4-c677fdb5-f36a97b9-5c50c735_0 grpc closed 2025-06-30T09:20:58.940297Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message_group_id|b715c0d4-c677fdb5-f36a97b9-5c50c735_0 is DEAD 2025-06-30T09:20:58.940443Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|4df98fdf-981e0c5e-ce4636cd-29b64c29_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:20:58.940452Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|4df98fdf-981e0c5e-ce4636cd-29b64c29_0] PartitionId [0] Generation [1] Write session: OnReadDone gRpcStatusCode: 1, Msg: CANCELLED, Details: , InternalError: 0 2025-06-30T09:20:58.940464Z :TRACE: [/Root] TRACE_EVENT Error status=CLIENT_CANCELLED 2025-06-30T09:20:58.940468Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|4df98fdf-981e0c5e-ce4636cd-29b64c29_0] PartitionId [0] Generation [1] Write session is aborting and will not restart 2025-06-30T09:20:58.940485Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|4df98fdf-981e0c5e-ce4636cd-29b64c29_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:20:58.940474Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:20:58.940488Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:20:58.940518Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|4df98fdf-981e0c5e-ce4636cd-29b64c29_0 grpc read done: success: 0 data: 2025-06-30T09:20:58.940523Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|4df98fdf-981e0c5e-ce4636cd-29b64c29_0 grpc read failed 2025-06-30T09:20:58.940527Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|4df98fdf-981e0c5e-ce4636cd-29b64c29_0 grpc closed 2025-06-30T09:20:58.940528Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|4df98fdf-981e0c5e-ce4636cd-29b64c29_0 is DEAD 2025-06-30T09:20:58.940530Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521669943342569892:3093] destroyed 2025-06-30T09:20:58.940537Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521669943342569895:3093] destroyed 2025-06-30T09:20:58.940546Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:20:58.940675Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:20:58.940690Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:20:58.940724Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669943342569843:3082] destroyed 2025-06-30T09:20:58.940734Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669943342569840:3082] destroyed 2025-06-30T09:20:58.940748Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:20:58.941488Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=293, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:20:59.028038Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037896, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=1076, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunning [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunningExtSubdomain ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> IncorrectQueries::ManyQueriesThroughOneBSQueue [GOOD] Test command err: RandomSeed# 11982547709986403291 GroupId# 2181038084 Disks from different fail realms occupy the same Realm, first VDisk Id# {FailRealm# 0 FailDomain# 0 VDisk# 0} second VDisk Id# {FailRealm# 1 FailDomain# 1 VDisk# 0} 2025-06-30T09:19:12.402003Z 2 00h01m40.001024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000003:_:0:1:0]: (2181038083) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:12.402865Z 2 00h01m40.001024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000003:_:0:1:0]: (2181038083) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 926561524686765660] 2025-06-30T09:19:12.405484Z 2 00h01m40.001024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000003:_:0:1:0]: (2181038083) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:12.419221Z 4 00h01m40.001536s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:0:1:0]: (2181038084) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:12.419994Z 4 00h01m40.001536s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:0:1:0]: (2181038084) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 9840350210464458181] 2025-06-30T09:19:12.421084Z 4 00h01m40.001536s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:0:1:0]: (2181038084) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:12.445255Z 9 00h01m40.002048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:12.446036Z 9 00h01m40.002048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 6053028680757605951] 2025-06-30T09:19:12.455540Z 9 00h01m40.002048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:12.469644Z 2 00h01m40.002560s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000001:_:0:2:0]: (2181038081) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:12.470447Z 2 00h01m40.002560s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000001:_:0:2:0]: (2181038081) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 13930222746216432199] 2025-06-30T09:19:12.471496Z 2 00h01m40.002560s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000001:_:0:2:0]: (2181038081) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:12.483656Z 5 00h01m40.003072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000002:_:0:1:0]: (2181038082) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:12.484464Z 5 00h01m40.003072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000002:_:0:1:0]: (2181038082) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 9546250383017774584] 2025-06-30T09:19:12.485518Z 5 00h01m40.003072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000002:_:0:1:0]: (2181038082) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:12.782221Z 7 00h01m55.001536s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000003:_:1:0:0]: (2181038083) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:12.795493Z 7 00h01m55.001536s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000003:_:1:0:0]: (2181038083) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4620131288160369400] 2025-06-30T09:19:12.796721Z 7 00h01m55.001536s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000003:_:1:0:0]: (2181038083) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:12.822519Z 6 00h01m55.002048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:1:1:0]: (2181038084) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:12.823351Z 6 00h01m55.002048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:1:1:0]: (2181038084) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17806963414975908863] 2025-06-30T09:19:12.824365Z 6 00h01m55.002048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:1:1:0]: (2181038084) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:12.842842Z 10 00h01m55.002560s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:12.843038Z 10 00h01m55.002560s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 12509313273611341479] 2025-06-30T09:19:12.844220Z 10 00h01m55.002560s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:12.860606Z 12 00h01m55.003072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000001:_:1:0:0]: (2181038081) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:12.861417Z 12 00h01m55.003072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000001:_:1:0:0]: (2181038081) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 1311896302408945854] 2025-06-30T09:19:12.862496Z 12 00h01m55.003072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000001:_:1:0:0]: (2181038081) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:12.875890Z 1 00h01m55.003584s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000002:_:1:0:0]: (2181038082) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:12.876665Z 1 00h01m55.003584s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000002:_:1:0:0]: (2181038082) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 13620244192058846954] 2025-06-30T09:19:12.877684Z 1 00h01m55.003584s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000002:_:1:0:0]: (2181038082) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:13.119455Z 13 00h02m10.002048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000003:_:1:2:0]: (2181038083) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:13.120227Z 13 00h02m10.002048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000003:_:1:2:0]: (2181038083) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 9916145473597892299] 2025-06-30T09:19:13.121355Z 13 00h02m10.002048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000003:_:1:2:0]: (2181038083) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:13.136557Z 14 00h02m10.002560s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:1:2:0]: (2181038084) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:13.137289Z 14 00h02m10.002560s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:1:2:0]: (2181038084) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 188656373806576481] 2025-06-30T09:19:13.138241Z 14 00h02m10.002560s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:1:2:0]: (2181038084) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:13.151387Z 14 00h02m10.003072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:13.151843Z 14 00h02m10.003072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 13171191470781092953] 2025-06-30T09:19:13.152451Z 14 00h02m10.003072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:13.163956Z 7 00h02m10.003584s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000001:_:1:1:0]: (2181038081) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:13.164643Z 7 00h02m10.003584s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000001:_:1:1:0]: (2181038081) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 11294777699640407784] 2025-06-30T09:19:13.165588Z 7 00h02m10.003584s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000001:_:1:1:0]: (2181038081) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:13.177775Z 3 00h02m10.004096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000002:_:1:1:0]: (2181038082) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:13.178526Z 3 00h02m10.004096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000002:_:1:1:0]: (2181038082) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5328174393303626250] 2025-06-30T09:19:13.179513Z 3 00h02m10.004096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000002:_:1:1:0]: (2181038082) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:13.409947Z 1 00h02m25.002560s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000003:_:2:2:0]: (2181038083) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:13.410073Z 1 00h02m25.002560s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000003:_:2:2:0]: (2181038083) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16379691082075103267] 2025-06-30T09:19:13.410835Z 1 00h02m25.002560s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000003:_:2:2:0]: (2181038083) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:13.422845Z 3 00h02m25.003072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:2:0:0]: (2181038084) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:13.422945Z 3 00h02m25.003072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:2:0:0]: (2181038084) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4332106628979554936] 2025-06-30T09:19:13.423614Z 3 00h02m25.003072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:2:0:0]: (2181038084) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:13.435459Z 8 00h02m25.003584s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:13.435597Z 8 00h02m25.003584s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 3039841584458219969] 2025-06-30T09:19:13.436557Z 8 00h02m25.003584s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:13.445948Z 9 00h02m25.004096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000001:_:2:0:0]: (2181038081) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:13.446053Z 9 00h02m25.004096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000001:_:2:0:0]: (2181038081) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 12027307428912433065] 2025-06-30T09:19:13.446705Z 9 00h02m25.004096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000001:_:2:0:0]: (2181038081) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:13.667015Z 11 00h02m40.003584s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:2:1:0]: (2181038084) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:13.667196Z 11 00h02m40.003584s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:2:1:0]: (2181038084) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 14315951244127794287] 2025-06-30T09:19:13.668280Z 11 00h02m40.003584s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000004:_:2:1:0]: (2181038084) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-30T09:19:13.681950Z 2 00h02m40.004096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:13.682114Z 2 00h02m40.004096s :BS_SYNCER ERROR: PDiskId# 1000 VDIS ... # status# DEADLINE from# [82000000:1:0:2:0] Marker# BSVSP01 2025-06-30T09:20:55.710225Z 4 00h00m30.020000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:22650:4194304:0] PatchedBlobId# [1:1:11:10:2170:4194304:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-30T09:20:55.710239Z 8 00h00m30.020000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:2302:4194304:0] PatchedBlobId# [1:1:11:10:2302:4194304:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:55.710250Z 2 00h00m30.020000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:96444:4194304:0] PatchedBlobId# [1:1:11:10:2236:4194304:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:55.710259Z 6 00h00m30.020000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:2368:4194304:0] PatchedBlobId# [1:1:11:10:100672:4194304:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:55.710269Z 1 00h00m30.020000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:145706:4194304:0] PatchedBlobId# [1:1:11:10:2346:4194304:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:55.710278Z 7 00h00m30.020000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:2412:4194304:0] PatchedBlobId# [1:1:11:10:51564:4194304:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:55.710301Z 7 00h00m30.020000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:2258:4194304:0] PatchedBlobId# [1:1:11:10:75986:4194304:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:55.710315Z 2 00h00m30.020000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:72022:4194304:0] PatchedBlobId# [1:1:11:10:2390:4194304:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:55.710326Z 3 00h00m30.020000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:47336:4194304:0] PatchedBlobId# [1:1:11:10:2280:4194304:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:55.710337Z 4 00h00m30.020000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:22804:4194304:0] PatchedBlobId# [1:1:11:10:2324:4194304:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-30T09:20:55.710348Z 1 00h00m30.020000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:2192:4194304:0] PatchedBlobId# [1:1:11:10:2192:4194304:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-30T09:20:55.710357Z 5 00h00m30.020000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:22694:4194304:0] PatchedBlobId# [1:1:11:10:2214:4194304:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:2:0] Marker# BSVSP01 2025-06-30T09:20:56.043291Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: ingress mismatch; id# [1:1:0:0:0:100:13] Marker# BSVS11 2025-06-30T09:20:56.196134Z 7 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 100 PartSize# 32 id# [1:1:0:0:0:100:13] Marker# BSVS01 2025-06-30T09:20:56.298677Z 3 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: ingress mismatch; id# [1:1:0:0:0:100:13] Marker# BSVS11 2025-06-30T09:20:56.450684Z 7 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: ingress mismatch; id# [1:1:0:0:0:100:13] Marker# BSVS11 2025-06-30T09:20:56.642393Z 4 00h00m46.310512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVPut: ingress mismatch; id# [1:1:0:0:0:100:13] Marker# BSVS11 2025-06-30T09:20:56.727815Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 58 PartSize# 100 id# [1:1:0:0:0:100:1] Marker# BSVS01 2025-06-30T09:20:56.727992Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 142 PartSize# 100 id# [1:1:0:0:0:100:1] Marker# BSVS01 2025-06-30T09:20:56.728119Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 0 PartSize# 100 id# [1:1:0:0:0:100:1] Marker# BSVS01 2025-06-30T09:20:56.810467Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:20:56.810644Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 0 id# [0:0:0:0:0:0:0] Marker# BSVS01 2025-06-30T09:20:56.810818Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 15 id# [2748:0:4294967040:0:0:15:15] Marker# BSVS01 2025-06-30T09:20:56.810948Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 15 id# [0:0:4294967040:0:0:15:15] Marker# BSVS01 2025-06-30T09:20:56.967454Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:20:56.967694Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 0 id# [0:0:0:0:0:0:0] Marker# BSVS01 2025-06-30T09:20:56.967874Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 32 id# [2748:0:4294967040:0:0:15:15] Marker# BSVS01 2025-06-30T09:20:56.968049Z 4 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 32 id# [0:0:4294967040:0:0:15:15] Marker# BSVS01 2025-06-30T09:20:57.069807Z 2 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:20:57.070044Z 2 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 0 id# [0:0:0:0:0:0:0] Marker# BSVS01 2025-06-30T09:20:57.070208Z 2 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 15 id# [2748:0:4294967040:0:0:15:15] Marker# BSVS01 2025-06-30T09:20:57.070369Z 4 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 15 id# [0:0:4294967040:0:0:15:15] Marker# BSVS01 2025-06-30T09:20:57.225731Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:20:57.225983Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 0 id# [0:0:0:0:0:0:0] Marker# BSVS01 2025-06-30T09:20:57.226163Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 15 id# [2748:0:4294967040:0:0:15:15] Marker# BSVS01 2025-06-30T09:20:57.226336Z 4 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 15 id# [0:0:4294967040:0:0:15:15] Marker# BSVS01 2025-06-30T09:20:57.412187Z 1 00h00m46.310512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:20:57.412386Z 1 00h00m46.310512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 0 id# [0:0:0:0:0:0:0] Marker# BSVS01 2025-06-30T09:20:57.412565Z 7 00h00m46.310512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:2:0:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 15 id# [2748:0:4294967040:0:0:15:15] Marker# BSVS01 2025-06-30T09:20:57.412757Z 8 00h00m46.310512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 42 PartSize# 15 id# [0:0:4294967040:0:0:15:15] Marker# BSVS01 2025-06-30T09:20:57.847406Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMultiPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:20:57.847610Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMultiPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:20:57.847800Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 3 Marker# BSVS39 2025-06-30T09:20:57.847834Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 2 Marker# BSVS39 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExtendLease [GOOD] Test command err: 2025-06-30T09:20:56.401978Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.405691Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.405734Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.405762Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.405806Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.405853Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.405873Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.409652Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.409716Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.409760Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.409802Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.409872Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.409914Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.409942Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.410016Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.410054Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.410076Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.411020Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.411051Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.411076Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.411097Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.411116Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.411171Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.412133Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.412236Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.412283Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.412316Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.412353Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.412371Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.412387Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.412400Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.412998Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.413090Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.413114Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.413138Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.413156Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.413190Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.413221Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:56.413269Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.413295Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.414019Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.414075Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.414108Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.414252Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.414412Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.419353Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.419514Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.420275Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.420599Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.420907Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.421273Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.421510Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.421565Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.421678Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.421735Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.421830Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.423285Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.423925Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.424616Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.425175Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.426174Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.426619Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.428132Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.428390Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:56.444009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:56.444038Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:56.449309Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:56.449852Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:56.449974Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:56.450226Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:56.451062Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:56.451281Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:56.451347Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:56.451367Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:56.451374Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:56.451396Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:56.451417Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:56.451423Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:56.451429Z node 1 :NODE_BROKER DEB ... G: node_broker.cpp:1205: [DB] Loaded nodeV2 #1024.v2 { NodeId: 1024, State: Active, Version: 2, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:56.752063Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1229: [DB] Migrating changed node #1024.v4 { NodeId: 1024, State: Active, Version: 4, Host: host1, Port: 1001, ResolveHost: host1.yandex.net, Address: 1.2.3.4, Lease: 2, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 0, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:56.752070Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1205: [DB] Loaded nodeV2 #1025.v3 { NodeId: 1025, State: Active, Version: 3, Host: host2, Port: 1001, ResolveHost: host2.yandex.net, Address: 1.2.3.4, Lease: 1, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 1, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:56.752074Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1229: [DB] Migrating changed node #1025.v4 { NodeId: 1025, State: Active, Version: 4, Host: host2, Port: 1001, ResolveHost: host2.yandex.net, Address: 1.2.3.4, Lease: 2, Expire: Thu, 01 Jan 1970 02:00:00 UTC, Location: DC=1/M=2/R=3/U=4/, AuthorizedByCertificate: 0, BridgePileId: , SlotIndex: 1, ServicedSubDomain: 72057594046678944:1 } 2025-06-30T09:20:56.752081Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 3 to 4 2025-06-30T09:20:56.752089Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:56.752115Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:56.752119Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 2 2025-06-30T09:20:56.752124Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.4 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:56.752141Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v4 host2:1001 to database state=Active resolvehost=host2.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=2 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:56.752186Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v4 host1:1001 to database state=Active resolvehost=host1.yandex.net address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=2 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:56.752199Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:56.774344Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:56.774387Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.025000Z 2025-06-30T09:20:56.774397Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.4 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z, approximate epoch start #1.1 nodes=2 expired=0 2025-06-30T09:20:56.774427Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##1.4 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z nodes=2 expired=0 removed=0 2025-06-30T09:20:56.774432Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v4 to update nodes log 2025-06-30T09:20:56.774438Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v4 to update nodes log 2025-06-30T09:20:56.774520Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:699:2247], Recipient [1:690:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.774569Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:700:2248], Recipient [1:690:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.774589Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:699:2247] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:56.774623Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:680: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:700:2248] Leader: 1 Dead: 0 Generation: 3 VersionInfo:  } 2025-06-30T09:20:56.774668Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:699:2247] 2025-06-30T09:20:56.774673Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.774678Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:56.774686Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:700:2248] 2025-06-30T09:20:56.774689Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.774691Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #2 2025-06-30T09:20:56.774824Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:730:2272], Recipient [1:690:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.774844Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2215], Recipient [1:690:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:56.774847Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.774852Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.4 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:56.774900Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:732:2274], Recipient [1:690:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.774914Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2215], Recipient [1:690:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:56.774916Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.774920Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.4 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:56.774967Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:734:2276], Recipient [1:690:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.774995Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2215], Recipient [1:690:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:56.774998Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.775001Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.4 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:56.775039Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:736:2278], Recipient [1:690:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.775054Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2215], Recipient [1:690:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 3 } 2025-06-30T09:20:56.775057Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.775060Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.4 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:56.775101Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:738:2280], Recipient [1:690:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.775124Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:637:2215], Recipient [1:690:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 3 SeqNo: 2 } 2025-06-30T09:20:56.775129Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:56.775134Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:637:2215], seqNo: 2, version: 3, server pipe id: [1:738:2280] 2025-06-30T09:20:56.775139Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v3 -> v4 to [1:637:2215] 2025-06-30T09:20:56.775183Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:738:2280], Recipient [1:690:2241]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:56.775187Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:637:2215], seqNo: 2, server pipe id: [1:738:2280] 2025-06-30T09:20:56.775209Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:740:2282], Recipient [1:690:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.775224Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:637:2215], Recipient [1:690:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:56.775229Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:56.775259Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200026000 Name: "slot-0" } } 2025-06-30T09:20:56.775308Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:742:2284], Recipient [1:690:2241]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.775322Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:637:2215], Recipient [1:690:2241]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1025 } 2025-06-30T09:20:56.775324Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:56.775333Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200026000 Name: "slot-1" } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::RegistrationPipeliningNodeName [GOOD] Test command err: 2025-06-30T09:20:55.649635Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.652981Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.653018Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.653039Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.653066Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.653089Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.653105Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.656405Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.656455Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.656488Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.656514Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.656549Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.656572Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.656591Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.656652Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.656685Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.656706Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.657514Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.657535Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.657550Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.657564Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.657577Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.657615Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.658563Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.658652Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.658692Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.658722Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.658780Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.658801Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.658815Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.658828Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.659315Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.659392Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.659414Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.659435Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.659450Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.659481Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.659511Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.659608Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.659631Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.659926Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.660077Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.660102Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.660147Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.660165Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.660211Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.663237Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.664062Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.664115Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.664148Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.664374Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.664414Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.664643Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.664827Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.665034Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.665115Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.665264Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.665427Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.665558Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.665642Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.665936Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.666152Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.666527Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.666683Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.666803Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.666844Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.666951Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.667108Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.667128Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.667344Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.678511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:55.678530Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:55.681806Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:55.682116Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:55.682174Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:55.682304Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:55.682619Z node 1 :NODE_BROKER DEBUG: node_ ... roker_impl.h:249: StateWork, received event# 269877761, Sender [1:750:2302], Recipient [1:575:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.956135Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:697:2267], Recipient [1:575:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:55.956139Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:55.956151Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } 2025-06-30T09:20:55.956191Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:752:2304], Recipient [1:575:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.956202Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:697:2267], Recipient [1:575:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1025 } 2025-06-30T09:20:55.956205Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:55.956209Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } 2025-06-30T09:20:55.956249Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:754:2306], Recipient [1:575:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.956265Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:697:2267], Recipient [1:575:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1026 } 2025-06-30T09:20:55.956267Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:55.956272Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } 2025-06-30T09:20:55.956309Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:756:2308], Recipient [1:575:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.956320Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:697:2267], Recipient [1:575:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1027 } 2025-06-30T09:20:55.956323Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:55.956327Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR 2025-06-30T09:20:55.956871Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:55.956885Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1024.v2 host1:1001 2025-06-30T09:20:55.956894Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 1 to 2 2025-06-30T09:20:55.956897Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v2 host1:1001 to epoch cache 2025-06-30T09:20:55.956913Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v2 to update nodes log 2025-06-30T09:20:55.956936Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-0" } 2025-06-30T09:20:55.956945Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:55.956949Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v3 host2:1001 2025-06-30T09:20:55.956952Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 2 to 3 2025-06-30T09:20:55.956955Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v3 host2:1001 to epoch cache 2025-06-30T09:20:55.956959Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v3 to update nodes log 2025-06-30T09:20:55.956969Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-1" } 2025-06-30T09:20:55.956973Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:55.956981Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-0" } 2025-06-30T09:20:55.956985Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:55.956987Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1026.v4 host3:1001 2025-06-30T09:20:55.956990Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 3 to 4 2025-06-30T09:20:55.956992Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1026.v4 host3:1001 to epoch cache 2025-06-30T09:20:55.956996Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1026.v4 to update nodes log 2025-06-30T09:20:55.957005Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1026 Host: "host3" Port: 1001 ResolveHost: "host3.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-1" } 2025-06-30T09:20:55.957010Z node 1 :NODE_BROKER DEBUG: node_broker__graceful_shutdown.cpp:50: TTxGracefulShutdown Complete 2025-06-30T09:20:55.957015Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:55.957018Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1027.v5 host4:1001 2025-06-30T09:20:55.957021Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 4 to 5 2025-06-30T09:20:55.957023Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1027.v5 host4:1001 to epoch cache 2025-06-30T09:20:55.957027Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1027.v5 to update nodes log 2025-06-30T09:20:55.957035Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1027 Host: "host4" Port: 1001 ResolveHost: "host4.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-1" } 2025-06-30T09:20:55.957125Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:760:2312], Recipient [1:575:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.957136Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:697:2267], Recipient [1:575:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:55.957139Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:55.957145Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.5 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:55.957197Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:762:2314], Recipient [1:575:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.957210Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:697:2267], Recipient [1:575:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:55.957213Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:55.957223Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-0" } } 2025-06-30T09:20:55.957267Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:764:2316], Recipient [1:575:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.957274Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:697:2267], Recipient [1:575:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1025 } 2025-06-30T09:20:55.957277Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:55.957285Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-0" } } 2025-06-30T09:20:55.957322Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:766:2318], Recipient [1:575:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.957334Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:697:2267], Recipient [1:575:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1026 } 2025-06-30T09:20:55.957336Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:55.957344Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1026 Host: "host3" Port: 1001 ResolveHost: "host3.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 } } 2025-06-30T09:20:55.957382Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:768:2320], Recipient [1:575:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.957395Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:697:2267], Recipient [1:575:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1027 } 2025-06-30T09:20:55.957397Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:55.957405Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1027 Host: "host4" Port: 1001 ResolveHost: "host4.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200023000 Name: "slot-1" } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::FixedNodeId [GOOD] Test command err: 2025-06-30T09:20:55.470922Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.474158Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.474207Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.474227Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.474253Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.474276Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.474292Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.477474Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.477525Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.477554Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.477579Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.477613Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.477635Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.477652Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.477709Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.477736Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.477754Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.478565Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.478588Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.478604Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.478618Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.478631Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.478672Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.479517Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.479597Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.479636Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.479664Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.479694Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.479707Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.479720Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.479731Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.480190Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.480259Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.480279Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.480297Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.480312Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.480338Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.480362Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.480451Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.480474Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.480892Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.481051Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.481071Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.481116Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.481133Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.481173Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.483791Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.483820Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.484222Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.484248Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.484281Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.484423Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.484576Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.484965Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.485041Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.485097Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.485186Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.485296Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.486696Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.486814Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.486921Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.486961Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.487014Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.487029Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.487082Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.487286Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.488434Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.488590Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.498205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:55.498221Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:55.501190Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:55.501475Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:55.501522Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:55.501649Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:55.501976Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:55.501998Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:55.502037Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:55.502047Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Star ... TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:55.778206Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: true Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:55.778217Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [1:673:2185], Recipient [1:573:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:55.778220Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:55.778229Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:55.778232Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host2:1001 (fixed) tenant: dc-1 2025-06-30T09:20:55.778241Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:327: [Dirty] Fix ID for node #1025.v4 host2:1001 2025-06-30T09:20:55.778249Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v4 host2:1001 to database state=Active resolvehost=host2.yandex.net address=1.2.3.5 dc=1 location=DC=1/M=2/R=3/U=5/ lease=2 expire=NEVER servicedsubdomain=72057594046678944:1 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:55.778300Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 3 to 4 2025-06-30T09:20:55.778303Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=4 2025-06-30T09:20:55.789011Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:55.789033Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:327: [Committed] Fix ID for node #1025.v4 host2:1001 2025-06-30T09:20:55.789037Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 3 to 4 2025-06-30T09:20:55.789044Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v4 host2:1001 to epoch cache 2025-06-30T09:20:55.789066Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v4 to update nodes log 2025-06-30T09:20:55.789100Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 18446744073709551615 Name: "slot-1" } 2025-06-30T09:20:55.789200Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:678:2242], Recipient [1:573:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.789228Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:639:2213], Recipient [1:573:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1025 } 2025-06-30T09:20:55.789234Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:55.789248Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 18446744073709551615 Name: "slot-1" } } 2025-06-30T09:20:55.789315Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:680:2244], Recipient [1:573:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.789328Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039939, Sender [1:639:2213], Recipient [1:573:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvExtendLeaseRequest { NodeId: 1025 } 2025-06-30T09:20:55.789332Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:257: StateWork, processing event TEvNodeBroker::TEvExtendLeaseRequest 2025-06-30T09:20:55.789342Z node 1 :NODE_BROKER DEBUG: node_broker__extend_lease.cpp:44: TTxExtendLease Execute node #1025 2025-06-30T09:20:55.789354Z node 1 :NODE_BROKER DEBUG: node_broker__extend_lease.cpp:78: TTxExtendLease Complete 2025-06-30T09:20:55.789364Z node 1 :NODE_BROKER TRACE: node_broker__extend_lease.cpp:82: TTxExtendLease reply with: NKikimr::NNodeBroker::TEvNodeBroker::TEvExtendLeaseResponse { Status { Code: OK } NodeId: 1025 Expire: 18446744073709551615 Epoch { Id: 1 Version: 4 Start: 23000 End: 3600023000 NextEnd: 7200023000 } } 2025-06-30T09:20:55.789408Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:682:2246], Recipient [1:573:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.789418Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:639:2213], Recipient [1:573:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1025 } 2025-06-30T09:20:55.789421Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:55.789429Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 18446744073709551615 Name: "slot-1" } } 2025-06-30T09:20:55.789472Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:684:2248], Recipient [1:573:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.789488Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:639:2213], Recipient [1:573:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" } 2025-06-30T09:20:55.789494Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:55.789501Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" 2025-06-30T09:20:55.789536Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:23:2070], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:55.789556Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:23:2070], cacheItem# { Subscriber: { Subscriber: [1:645:2218] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:55.789593Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:686:2249], recipient# [1:685:2185], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:55.789603Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:55.789612Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-30T09:20:55.789621Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [1:685:2185], Recipient [1:573:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:55.789624Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:55.789629Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:55.789632Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host2:1001 (not fixed) tenant: dc-1 2025-06-30T09:20:55.789642Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:55.789651Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 18446744073709551615 Name: "slot-1" } 2025-06-30T09:20:55.789692Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:688:2251], Recipient [1:573:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:55.789703Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039939, Sender [1:639:2213], Recipient [1:573:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvExtendLeaseRequest { NodeId: 1025 } 2025-06-30T09:20:55.789705Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:257: StateWork, processing event TEvNodeBroker::TEvExtendLeaseRequest 2025-06-30T09:20:55.789708Z node 1 :NODE_BROKER DEBUG: node_broker__extend_lease.cpp:44: TTxExtendLease Execute node #1025 2025-06-30T09:20:55.789712Z node 1 :NODE_BROKER DEBUG: node_broker__extend_lease.cpp:78: TTxExtendLease Complete 2025-06-30T09:20:55.789718Z node 1 :NODE_BROKER TRACE: node_broker__extend_lease.cpp:82: TTxExtendLease reply with: NKikimr::NNodeBroker::TEvNodeBroker::TEvExtendLeaseResponse { Status { Code: OK } NodeId: 1025 Expire: 18446744073709551615 Epoch { Id: 1 Version: 4 Start: 23000 End: 3600023000 NextEnd: 7200023000 } } >> TConsoleTests::TestCreateTenantWrongName [GOOD] >> TConsoleTests::TestCreateTenantWrongNameExtSubdomain ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodeNameReuseRestartWithHostChanges [GOOD] Test command err: 2025-06-30T09:20:54.997992Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.002046Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.002115Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.002155Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.002207Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.002251Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.002281Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.008417Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.008507Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.008586Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.008640Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.008706Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.008750Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.008786Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.008882Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.008935Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.008967Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.010607Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.010651Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.010698Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.010726Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.010772Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.010856Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.012635Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.012830Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.012899Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.012956Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.013014Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.013044Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.013073Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.013096Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.013923Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.014067Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.014129Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.014167Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.014200Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.014262Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.014310Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.014490Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.014535Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.015360Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.015436Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.015451Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.015476Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.015572Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.015669Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.019850Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.019874Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.019961Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.020036Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.020053Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.020264Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.020443Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.020704Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.020987Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.021132Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.021329Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.021451Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.021554Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.021730Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.021937Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.022491Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.022643Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.022981Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.022992Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.023278Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.038234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:55.038280Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:55.043847Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:55.044445Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:55.044571Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:55.044832Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:55.045685Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:55.045726Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:55.045793Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:55.045812Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:55.045829Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:55.045852Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Comp ... 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 }, by path# { Subscriber: { Subscriber: [1:801:2326] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:20:56.882519Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:23:2070], cacheItem# { Subscriber: { Subscriber: [1:801:2326] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 9 TableKind: 0 Created: 1 CreateStep: 5000001 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] DomainId: [OwnerId: 72057594046678944, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/my-database TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:56.882560Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:808:2327], recipient# [1:800:2185], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/my-database TableId: [72057594046678944:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:56.882572Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1/my-database TableId: [72057594046678944:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:56.882581Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host5" Port: 19001 ResolveHost: "host5" Address: "" Location { } FixedNodeId: false Path: "/dc-1/my-database": scope id# <72057594046678944:2>: serviced subdomain# 72057594046678944:2 2025-06-30T09:20:56.882592Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [1:800:2185], Recipient [1:571:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:56.882596Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:56.882617Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:56.882620Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host5:19001 (not fixed) tenant: /dc-1/my-database 2025-06-30T09:20:56.882638Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v13 host5:19001 to database state=Active resolvehost=host5 address= dc= location= lease=1 expire=Thu, 01 Jan 1970 05:00:00 UTC servicedsubdomain=72057594046678944:2 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:56.882683Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1024.v13 host5:19001 2025-06-30T09:20:56.882690Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 12 to 13 2025-06-30T09:20:56.882693Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=13 2025-06-30T09:20:56.893800Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:56.893838Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1024.v13 host5:19001 2025-06-30T09:20:56.893850Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 12 to 13 2025-06-30T09:20:56.893856Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v13 host5:19001 to epoch cache 2025-06-30T09:20:56.893874Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v13 to update nodes log 2025-06-30T09:20:56.893914Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host5" Port: 19001 ResolveHost: "host5" Address: "" Location { } Expire: 18000025000 Name: "slot-0" } 2025-06-30T09:20:56.894040Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:812:2331], Recipient [1:571:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.894067Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:671:2245], Recipient [1:571:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host6" Port: 19001 ResolveHost: "host6" Address: "" Location { } FixedNodeId: false Path: "/dc-1/my-database" } 2025-06-30T09:20:56.894074Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-30T09:20:56.894080Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host6" Port: 19001 ResolveHost: "host6" Address: "" Location { } FixedNodeId: false Path: "/dc-1/my-database" 2025-06-30T09:20:56.894120Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:23:2070], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/my-database TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:20:56.894143Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:23:2070], cacheItem# { Subscriber: { Subscriber: [1:801:2326] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 9 TableKind: 0 Created: 1 CreateStep: 5000001 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] DomainId: [OwnerId: 72057594046678944, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/my-database TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:20:56.894187Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:814:2332], recipient# [1:813:2185], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/my-database TableId: [72057594046678944:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:20:56.894200Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1/my-database TableId: [72057594046678944:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:20:56.894209Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host6" Port: 19001 ResolveHost: "host6" Address: "" Location { } FixedNodeId: false Path: "/dc-1/my-database": scope id# <72057594046678944:2>: serviced subdomain# 72057594046678944:2 2025-06-30T09:20:56.894220Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [1:813:2185], Recipient [1:571:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:56.894223Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-30T09:20:56.894236Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-30T09:20:56.894239Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host6:19001 (not fixed) tenant: /dc-1/my-database 2025-06-30T09:20:56.894254Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v14 host6:19001 to database state=Active resolvehost=host6 address= dc= location= lease=1 expire=Thu, 01 Jan 1970 05:00:00 UTC servicedsubdomain=72057594046678944:2 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-30T09:20:56.894301Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v14 host6:19001 2025-06-30T09:20:56.894305Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 13 to 14 2025-06-30T09:20:56.894308Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=14 2025-06-30T09:20:56.905270Z node 1 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-30T09:20:56.905296Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v14 host6:19001 2025-06-30T09:20:56.905308Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 13 to 14 2025-06-30T09:20:56.905314Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v14 host6:19001 to epoch cache 2025-06-30T09:20:56.905333Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v14 to update nodes log 2025-06-30T09:20:56.905382Z node 1 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host6" Port: 19001 ResolveHost: "host6" Address: "" Location { } Expire: 18000025000 Name: "slot-1" } >> THealthCheckTest::GreenStatusWhenInitPending [GOOD] >> THealthCheckTest::SpecificServerlessWithExclusiveNodes [GOOD] >> TExportToS3Tests::ShouldRetryAtFinalStage [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Query [GOOD] >> BuildStatsHistogram::Many_Mixed [GOOD] >> THealthCheckTest::StorageNoQuota [GOOD] >> CompressExecutor::TestExecutorMemUsage [GOOD] >> THealthCheckTest::CLusterNotBootstrapped [GOOD] >> THealthCheckTest::IgnoreOtherGenerations >> THealthCheckTest::Issues100VCardMerging [GOOD] >> THealthCheckTest::LayoutCorrect >> THealthCheckTest::SharedWhenTroublesWithExclusiveNodes >> TxUsage::WriteToTopic_Demo_24_Query >> BuildStatsHistogram::Many_Serial >> THealthCheckTest::TestBootingTabletIsNotDead >> DataStreams::TestControlPlaneAndMeteringData >> DataStreams::TestUpdateStorage >> DataStreams::TestNonChargeableUser >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes [GOOD] >> DataStreams::TestGetShardIterator >> THealthCheckTest::ProtobufUnderLimitFor100LargeVdisksIssues [GOOD] >> DataStreams::TestStreamStorageRetention |81.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |81.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |81.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |81.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> LocalPartition::DirectWriteWithoutDescribeResourcesPermission [GOOD] Test command err: 2025-06-30T09:19:15.216747Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669517984467389:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:15.216925Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:19:15.225745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a28/r3tmp/tmpLCLQHC/pdisk_1.dat 2025-06-30T09:19:15.285427Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:15.294843Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669517984467179:2080] 1751275155110310 != 1751275155110313 TServer::EnableGrpc on GrpcPort 23186, node 1 2025-06-30T09:19:15.312897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004a28/r3tmp/yandexv2SK8x.tmp 2025-06-30T09:19:15.312912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004a28/r3tmp/yandexv2SK8x.tmp 2025-06-30T09:19:15.326460Z INFO: TTestServer started on Port 61331 GrpcPort 23186 2025-06-30T09:19:15.329397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:15.329441Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:15.330512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61331 2025-06-30T09:19:15.427877Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004a28/r3tmp/yandexv2SK8x.tmp 2025-06-30T09:19:15.428136Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration PQClient connected to localhost:23186 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:15.484469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:19:15.503334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:19:15.591476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:15.599512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:19:15.722364Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669517984467954:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:15.722522Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:15.722632Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669517984467967:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:15.723794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:15.731206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-30T09:19:15.731494Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669517984467969:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:19:15.826382Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669517984468033:2436] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:15.831040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:15.858082Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669517984468059:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:19:15.858861Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NWZhYmI4NmItOTMzZmJhOTgtZGY5NThlZTctMTk0OGY0ODQ=, ActorId: [1:7521669517984467951:2296], ActorState: ExecuteState, TraceId: 01jz024c86d4eaxwcs3myf33gf, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:19:15.859369Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:19:15.862137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:15.897098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669517984468322:2609] 2025-06-30T09:19:16.118910Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:20.117287Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669517984467389:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:20.117330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:19:21.145255Z :ReadWithRestarts INFO: TTopicSdkTestSetup started 2025-06-30T09:19:21.163475Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:19:21.189975Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:19:21.191560Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:19:21.191693Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:19:21.191780Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-30T09:19:21.191791Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:19:21.191794Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-30T09:19:21.191800Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:19:21.191808Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037892] disable metering: reason# billin ... EBUG: pq_impl.cpp:383: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:20:59.910645Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-30T09:20:59.910653Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-30T09:20:59.910673Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:20:59.910689Z node 12 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 3 partition: 0 MaxSeqNo: 0 sessionId: test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0 2025-06-30T09:20:59.911159Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|a2433f4b-69a4d233-ffab9897-1437529f_0] PartitionId [0] Generation [1] Write session: OnReadDone gRpcStatusCode: 0 2025-06-30T09:20:59.911207Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|a2433f4b-69a4d233-ffab9897-1437529f_0] PartitionId [0] Generation [1] Write session established. Init response: session_id: "test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0" 2025-06-30T09:20:59.911325Z :TRACE: [/Root] TRACE_EVENT InitResponse partition_id=0 session_id=test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0 2025-06-30T09:20:59.911335Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] Write session: set DirectWriteToPartitionId 0 2025-06-30T09:20:59.911397Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] Write 1 messages with Id from 1 to 1 2025-06-30T09:20:59.911474Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] Write session: close. Timeout 18446744073709.551615s 2025-06-30T09:20:59.911632Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] Write session: try to update token 2025-06-30T09:20:59.911642Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] Send 1 message(s) (0 left), first sequence number is 1 2025-06-30T09:20:59.912028Z node 12 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-30T09:20:59.912110Z node 12 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-30T09:20:59.912177Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-30T09:20:59.912195Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-30T09:20:59.912236Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 1 2025-06-30T09:20:59.912262Z node 12 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-30T09:20:59.914427Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] Write session: OnReadDone gRpcStatusCode: 0 2025-06-30T09:20:59.912320Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-30T09:20:59.912339Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-30T09:20:59.912354Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72075186224037892] got client message topic: test-topic partition: 0 SourceId: '\0test-message_group_id' SeqNo: 1 partNo : 0 messageNo: 1 size 98 offset: -1 2025-06-30T09:20:59.912437Z node 12 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'test-topic' partition 0 part blob processing sourceId '\0test-message_group_id' seqNo 1 partNo 0 2025-06-30T09:20:59.913662Z node 12 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'test-topic' partition 0 part blob complete sourceId '\0test-message_group_id' seqNo 1 partNo 0 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 187 count 1 nextOffset 1 batches 1 2025-06-30T09:20:59.913729Z node 12 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Add new write blob: topic 'test-topic' partition 0 compactOffset 0,1 HeadOffset 0 endOffset 0 curOffset 1 d0000000000_00000000000000000000_00000_0000000001_00000? size 175 WTime 1751275259913 2025-06-30T09:20:59.913768Z node 12 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:20:59.913783Z node 12 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 0 partNo 0 count 1 size 175 2025-06-30T09:20:59.914196Z node 12 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 0 count 1 size 175 actorID [12:7521669966390691589:2415] 2025-06-30T09:20:59.914228Z node 12 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' size 175 2025-06-30T09:20:59.914231Z node 12 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 120 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:20:59.914237Z node 12 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:20:59.914249Z node 12 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message_group_id', Topic: 'test-topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-06-30T09:20:59.914257Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-30T09:21:00.009934Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] Write session got write response: acks { seq_no: 1 written { } } write_statistics { persisting_time { nanos: 1000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-30T09:20:59.914272Z node 12 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'test-topic' partition 0 user test-consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:20:59.914275Z node 12 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-30T09:20:59.914276Z node 12 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'test-topic' partition 0 user test-consumer send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-30T09:21:00.009945Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] OnAck: seqNo=1, txId=? 2025-06-30T09:21:00.009960Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] Write session: acknoledged message 1 2025-06-30T09:20:59.914286Z node 12 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=175, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:20:59.914305Z node 12 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 Topic 'test-topic' partition 0 user test-consumer offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-06-30T09:20:59.914314Z node 12 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 175 count 1 last offset 0, current partition end offset: 1 2025-06-30T09:20:59.914316Z node 12 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-30T09:20:59.914326Z node 12 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 175 accessed 0 times before, last time 2025-06-30T09:20:59.000000Z 2025-06-30T09:20:59.914334Z node 12 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-30T09:20:59.914340Z node 12 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-30T09:20:59.914353Z node 12 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-30T09:20:59.914361Z node 12 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 155 from pos 0 cbcount 1 2025-06-30T09:20:59.914375Z node 12 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'test-topic' partition 0 user test-consumer readTimeStamp done, result 1751275259912 queuesize 0 startOffset 0 2025-06-30T09:21:00.011558Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:21:00.011576Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:21:00.011710Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:21:00.011757Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:21:00.011883Z node 12 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0 grpc read done: success: 0 data: 2025-06-30T09:21:00.011894Z node 12 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 3 sessionId: test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0 grpc read failed 2025-06-30T09:21:00.011904Z node 12 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 3 sessionId: test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0 grpc closed 2025-06-30T09:21:00.011907Z node 12 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: test-message_group_id|6d5be6ab-c9ac53b-4a734815-5236e5c6_0 is DEAD 2025-06-30T09:21:00.012166Z node 12 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:21:00.012274Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [12:7521669966390691714:2436] destroyed 2025-06-30T09:21:00.012297Z node 12 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. |81.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |81.0%| [LD] {RESULT} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::TestListNodes [GOOD] Test command err: 2025-06-30T09:20:55.068349Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.072565Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.072618Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.072649Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.072692Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.072732Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.072758Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.077874Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.077951Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.078003Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.078048Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.078104Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.078145Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.078178Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.078266Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.078314Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.078339Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.079737Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.079789Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.079817Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.079846Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.079874Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.079949Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.081314Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.081445Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.081503Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.081551Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.081599Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.081622Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.081644Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.081662Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.082428Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.082553Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.082594Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.082628Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.082656Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.082708Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.082771Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.082829Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.082866Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.083584Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.083641Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.083723Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.083753Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.083771Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.083789Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.083933Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.088730Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.088811Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.088827Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.088850Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.089853Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.090067Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.090188Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.090274Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.090634Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.090932Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.091137Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.091379Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.091562Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.092735Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.092900Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.093145Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.093785Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.096033Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.096543Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.109175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:55.109203Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:55.113993Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:55.114464Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:55.114543Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:55.114725Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:55.115645Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:55.115852Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:55.115907Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:55.115927Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:55.115934Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:55.115955Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Comp ... ker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:58.432767Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #7 2025-06-30T09:20:58.432772Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [4:112:2072], Recipient [1:627:2208] 2025-06-30T09:20:58.432774Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:58.432776Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #7 2025-06-30T09:20:58.443604Z node 1 :NODE_BROKER DEBUG: node_broker__update_epoch.cpp:31: TTxUpdateEpoch Complete 2025-06-30T09:20:58.443629Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:548: [Committed] Move to new epoch #7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z, approximate epoch start #7.8 2025-06-30T09:20:58.443642Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T07:00:00.025000Z 2025-06-30T09:20:58.443651Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z, approximate epoch start #7.8 nodes=0 expired=0 2025-06-30T09:20:58.443664Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z nodes=0 expired=0 removed=1 2025-06-30T09:20:58.443674Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v7 to update nodes log 2025-06-30T09:20:58.443687Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z 2025-06-30T09:20:58.443694Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z 2025-06-30T09:20:58.443699Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z 2025-06-30T09:20:58.443704Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z 2025-06-30T09:20:58.443708Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z 2025-06-30T09:20:58.443713Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z 2025-06-30T09:20:58.443717Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z 2025-06-30T09:20:58.443723Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z 2025-06-30T09:20:58.464402Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:868:2342], Recipient [1:571:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:58.464462Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:639:2215], Recipient [1:571:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:58.464470Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:58.464486Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z 2025-06-30T09:20:58.464544Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:870:2344], Recipient [1:571:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:58.464555Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:639:2215], Recipient [1:571:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:58.464558Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:58.464562Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #7.8 1970-01-01T06:00:00.025000Z - 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z 2025-06-30T09:20:58.806689Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435072, Sender [1:571:2185], Recipient [1:571:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvUpdateEpoch 2025-06-30T09:20:58.806711Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:263: StateWork, processing event TEvPrivate::TEvUpdateEpoch 2025-06-30T09:20:58.806729Z node 1 :NODE_BROKER DEBUG: node_broker__update_epoch.cpp:20: TTxUpdateEpoch Execute 2025-06-30T09:20:58.806753Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:548: [Dirty] Move to new epoch #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z, approximate epoch start #8.9 2025-06-30T09:20:58.806758Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z 2025-06-30T09:20:58.806781Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #8.9 2025-06-30T09:20:58.918521Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:25:2072], Recipient [1:571:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { MinEpoch: 8 } 2025-06-30T09:20:58.918535Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:58.918540Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #8 2025-06-30T09:20:58.918622Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [3:83:2072], Recipient [1:626:2207] 2025-06-30T09:20:58.918625Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:58.918629Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #8 2025-06-30T09:20:58.918636Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [4:112:2072], Recipient [1:627:2208] 2025-06-30T09:20:58.918638Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:58.918640Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #8 2025-06-30T09:20:58.918645Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:634:2210] 2025-06-30T09:20:58.918647Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:58.918649Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #8 2025-06-30T09:20:58.918654Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:635:2211] 2025-06-30T09:20:58.918657Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:58.918659Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #8 2025-06-30T09:20:58.918664Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:636:2212] 2025-06-30T09:20:58.918666Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:58.918668Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #8 2025-06-30T09:20:58.918672Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:637:2213] 2025-06-30T09:20:58.918674Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:58.918677Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #8 2025-06-30T09:20:58.918682Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [2:54:2072], Recipient [1:633:2209] 2025-06-30T09:20:58.918684Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:58.918687Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #8 2025-06-30T09:20:58.929437Z node 1 :NODE_BROKER DEBUG: node_broker__update_epoch.cpp:31: TTxUpdateEpoch Complete 2025-06-30T09:20:58.929458Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:548: [Committed] Move to new epoch #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z, approximate epoch start #8.9 2025-06-30T09:20:58.929474Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T08:00:00.025000Z 2025-06-30T09:20:58.929483Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z, approximate epoch start #8.9 nodes=0 expired=0 2025-06-30T09:20:58.929493Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z nodes=0 expired=0 removed=1 2025-06-30T09:20:58.929500Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v7 to update nodes log 2025-06-30T09:20:58.929513Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z 2025-06-30T09:20:58.929522Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z 2025-06-30T09:20:58.929529Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z 2025-06-30T09:20:58.929536Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z 2025-06-30T09:20:58.929543Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z 2025-06-30T09:20:58.929550Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z 2025-06-30T09:20:58.929557Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z 2025-06-30T09:20:58.929564Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z 2025-06-30T09:20:58.929571Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #8.9 1970-01-01T07:00:00.025000Z - 1970-01-01T08:00:00.025000Z - 1970-01-01T09:00:00.025000Z |81.0%| [TA] {RESULT} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> DataStreams::TestDeleteStream >> THealthCheckTest::SharedWhenTroublesWithExclusiveNodes [GOOD] |81.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/sharding/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/health_check/ut/unittest >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes [GOOD] Test command err: 2025-06-30T09:20:58.397695Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.397791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.397853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:58.397906Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.397938Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.397959Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001266/r3tmp/tmpWRweM7/pdisk_1.dat 2025-06-30T09:20:58.476171Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27177, node 1 TClient is connected to server localhost:29334 2025-06-30T09:20:58.570339Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:58.570356Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:58.570359Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:58.570404Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:59.625968Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.626035Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.626050Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.626259Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.626270Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.626293Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001266/r3tmp/tmp7M9yaD/pdisk_1.dat 2025-06-30T09:20:59.698789Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10570, node 3 TClient is connected to server localhost:19361 2025-06-30T09:20:59.810437Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:59.810458Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:59.810463Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:59.810611Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:00.610349Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:423:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:00.610406Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:00.610416Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001266/r3tmp/tmpPVWkSC/pdisk_1.dat 2025-06-30T09:21:00.694274Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9061, node 5 TClient is connected to server localhost:8080 2025-06-30T09:21:00.799129Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:00.799144Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:00.799148Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:00.799225Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: MAINTENANCE_REQUIRED issue_log { id: "ORANGE-f489-1231c6b1" status: ORANGE message: "Database has compute issues" location { database { name: "/Root" } } reason: "ORANGE-6fa7-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "ORANGE-6fa7-1231c6b1" status: ORANGE message: "Compute has issues with tablets" location { database { name: "/Root" } } reason: "ORANGE-0df4-1231c6b1-Unknown" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "ORANGE-0df4-1231c6b1-Unknown" status: ORANGE message: "Tablets are restarting too often" location { compute { tablet { type: "Unknown" id: "1" count: 1 } } database { name: "/Root" } node { } } type: "TABLET" level: 4 } issue_log { id: "ORANGE-f489-2135748c" status: ORANGE message: "Database has compute issues" location { database { name: "/Root/shared" } } reason: "ORANGE-6fa7-2135748c" type: "DATABASE" level: 1 } issue_log { id: "ORANGE-6fa7-2135748c" status: ORANGE message: "Compute has issues with tablets" location { database { name: "/Root/shared" } } reason: "ORANGE-0df4-2135748c-Unknown" type: "COMPUTE" level: 2 } issue_log { id: "ORANGE-0df4-2135748c-Unknown" status: ORANGE message: "Tablets are restarting too often" location { compute { tablet { type: "Unknown" id: "1" count: 1 } } database { name: "/Root/shared" } node { } } type: "TABLET" level: 4 } database_status { name: "/Root" overall: ORANGE storage { overall: GREEN pools { id: "static" overall: GREEN groups { id: "0" overall: GREEN } } } compute { overall: ORANGE nodes { id: "5" overall: YELLOW load { overall: YELLOW load: 123.390625 cores: 64 } } tablets { overall: ORANGE type: "Unknown" state: "RESTARTS_TOO_OFTEN" count: 1 id: "1" } } } database_status { name: "/Root/shared" overall: ORANGE storage { overall: GREEN pools { id: "/Root:test" overall: GREEN groups { id: "2147483648" overall: GREEN vdisks { id: "5-1-55" overall: GREEN pdisk { id: "5-1" overall: GREEN } } } } } compute { overall: ORANGE nodes { id: "6" overall: GREY } tablets { overall: ORANGE type: "Unknown" state: "RESTARTS_TOO_OFTEN" count: 1 id: "1" } } } location { id: 5 host: "::1" port: 12001 } 2025-06-30T09:21:01.439100Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:288:2330], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.439152Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:01.439170Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001266/r3tmp/tmpMB3W0y/pdisk_1.dat 2025-06-30T09:21:01.518639Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 1292, node 7 TClient is connected to server localhost:5170 self_check_result: EMERGENCY issue_log { id: "RED-f489" status: RED message: "Database has compute issues" reason: "RED-7469" type: "DATABASE" level: 1 } issue_log { id: "RED-7469" status: RED message: "There are no compute nodes" type: "COMPUTE" level: 2 } database_status { name: "/Root/serverless" overall: RED storage { overall: GREEN pools { id: "/Root:test" overall: GREEN groups { id: "2147483648" overall: GREEN vdisks { id: "7-1-55" overall: GREEN pdisk { id: "7-1" overall: GREEN } } } } } compute { overall: RED } } location { id: 7 host: "::1" port: 12001 } 2025-06-30T09:21:01.981399Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:421:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.981504Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:01.981525Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001266/r3tmp/tmpACyeBm/pdisk_1.dat 2025-06-30T09:21:02.064802Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2571, node 8 TClient is connected to server localhost:25177 2025-06-30T09:21:02.185934Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:02.185949Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:02.185952Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:02.186027Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesV2BackMigration [GOOD] Test command err: 2025-06-30T09:20:55.066396Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.070504Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.070562Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.070606Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.070650Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.070690Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.070718Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.074640Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.074714Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.074790Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.074855Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.074922Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.074956Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.074987Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.075081Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.075131Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.075162Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.076166Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.076208Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.076238Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.076263Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.076288Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.076357Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.077547Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.077696Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.077763Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.077813Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.077878Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.077902Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.077921Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.077940Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.078556Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.078664Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.078696Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.078722Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.078777Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.078824Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.078863Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:55.078905Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.078937Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.079559Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.079593Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.079650Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.079670Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.079682Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.079694Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.079788Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.082817Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.082840Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.083422Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.083436Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.083458Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.083753Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.083973Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.084175Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.084367Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.084389Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.084514Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.084603Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.084696Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.085028Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.085381Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.085404Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.085522Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.085698Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.085725Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:55.102110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:55.102139Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:55.106663Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:55.107068Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:55.107177Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:55.107337Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:55.107740Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:55.107764Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:55.107801Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:55.107816Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:55.107820Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:55.107833Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Comp ... nt# 272039936, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:56.871089Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.871096Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:56.871634Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:905:2391], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.871656Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:56.871660Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.871664Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:56.871713Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:907:2393], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.871724Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:56.871726Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.871730Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:56.871769Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:909:2395], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.871786Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 11 } 2025-06-30T09:20:56.871789Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.871793Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:56.871833Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:911:2397], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.871844Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:56.871847Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.871851Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:56.871903Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:913:2399], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.871915Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 10 } 2025-06-30T09:20:56.871918Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.871922Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:56.871963Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:915:2401], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.871977Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:20:56.871984Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.871989Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:56.872036Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:917:2403], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.872049Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 9 } 2025-06-30T09:20:56.872052Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:20:56.872056Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-30T09:20:56.872097Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:919:2405], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.872111Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 11 SeqNo: 5 } 2025-06-30T09:20:56.872115Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:56.872120Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:637:2213], seqNo: 5, version: 11, server pipe id: [1:919:2405] 2025-06-30T09:20:56.872125Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v11 -> v11 to [1:637:2213] 2025-06-30T09:20:56.872177Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:919:2405], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:56.872181Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:637:2213], seqNo: 5, server pipe id: [1:919:2405] 2025-06-30T09:20:56.872197Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:921:2407], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.872209Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 10 SeqNo: 6 } 2025-06-30T09:20:56.872212Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:56.872214Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:637:2213], seqNo: 6, version: 10, server pipe id: [1:921:2407] 2025-06-30T09:20:56.872218Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v10 -> v11 to [1:637:2213] 2025-06-30T09:20:56.872255Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:921:2407], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:56.872259Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:637:2213], seqNo: 6, server pipe id: [1:921:2407] 2025-06-30T09:20:56.872274Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:923:2409], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.872285Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 9 SeqNo: 7 } 2025-06-30T09:20:56.872289Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-30T09:20:56.872292Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:637:2213], seqNo: 7, version: 9, server pipe id: [1:923:2409] 2025-06-30T09:20:56.872295Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v9 -> v11 to [1:637:2213] 2025-06-30T09:20:56.872331Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:923:2409], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:20:56.872334Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:637:2213], seqNo: 7, server pipe id: [1:923:2409] 2025-06-30T09:20:56.872353Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:925:2411], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.872370Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:20:56.872374Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:56.872397Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 18000023000 Name: "slot-0" } } 2025-06-30T09:20:56.872448Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:927:2413], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.872457Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1025 } 2025-06-30T09:20:56.872460Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:56.872465Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } 2025-06-30T09:20:56.872515Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:929:2415], Recipient [1:869:2362]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:20:56.872541Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:637:2213], Recipient [1:869:2362]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1026 } 2025-06-30T09:20:56.872544Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:20:56.872549Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } |81.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/sharding/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ShouldRetryAtFinalStage [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:20:24.639294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:24.639324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.639344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:24.639350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:24.639365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:24.639370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:24.639382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:24.639408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:24.639545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:24.639626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:24.656420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:24.656448Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:24.660046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:24.660099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:24.660148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:20:24.661918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:24.661987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:24.662102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.662150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:20:24.662804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.662842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:24.663098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.663109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:20:24.663137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:24.663145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:20:24.663152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:24.663170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.664767Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:20:24.688628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:24.688738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.688828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:24.688839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:24.688891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:24.688908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:24.689894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.689950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:24.690014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.690027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:24.690033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:24.690040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:24.690591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.690606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:24.690613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:24.691056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.691069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:24.691076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.691085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:24.691908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:24.692449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:24.692528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:24.692741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:24.692774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:24.692782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.692842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:24.692850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:24.692884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:24.692899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:24.693439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:24.693448Z node 1 :FLAT_TX_SCHEMESHARD ... nerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0009 2025-06-30T09:20:55.401250Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-30T09:20:55.401345Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 70 row count 2 2025-06-30T09:20:55.401377Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Table, is column=0, is olap=0, RowCount 2, DataSize 70 2025-06-30T09:20:55.401445Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-30T09:20:55.401464Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-30T09:20:55.401475Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=0, is column=0, is olap=0, RowCount 0, DataSize 0, with borrowed parts 2025-06-30T09:20:55.401486Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-30T09:20:55.411753Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-30T09:20:58.862095Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 70 rowCount 2 cpuUsage 0.0008 2025-06-30T09:20:58.882826Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0007 2025-06-30T09:20:58.923657Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-30T09:20:58.923740Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 70 row count 2 2025-06-30T09:20:58.923767Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Table, is column=0, is olap=0, RowCount 2, DataSize 70 2025-06-30T09:20:58.923811Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-30T09:20:58.923826Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-30T09:20:58.923835Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=0, is column=0, is olap=0, RowCount 0, DataSize 0, with borrowed parts 2025-06-30T09:20:58.923844Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-30T09:20:58.934032Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-30T09:21:02.227974Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [4:579:2535], attempt# 1 2025-06-30T09:21:02.232402Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:118: [Export] [scanner] Handle TEvExportScan::TEvReset: self# [4:578:2534] 2025-06-30T09:21:02.234080Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [4:579:2535], sender# [4:578:2534] 2025-06-30T09:21:02.234103Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [4:578:2534] 2025-06-30T09:21:02.234133Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [4:579:2535], sender# [4:578:2534], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } 2025-06-30T09:21:02.234201Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:526: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [4:579:2535], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [6e3e0a41fdab8add833862f1bd2954c3,1d8dd09e584ce6a47582a31b591900e2,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:30711 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: FCF09ABF-408E-43F6-B1FD-115A1DDEF9B3 amz-sdk-request: attempt=1 content-length: 459 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv / uploadId=1 2025-06-30T09:21:02.236040Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:623: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [4:579:2535], result# 2025-06-30T09:21:02.236138Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [4:578:2534], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-30T09:21:02.240416Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 455 RawX2: 17179871606 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:21:02.240451Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-06-30T09:21:02.240488Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 455 RawX2: 17179871606 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:21:02.240508Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 455 RawX2: 17179871606 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:21:02.240527Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:02.240533Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:21:02.240543Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-30T09:21:02.240553Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710759:0 129 -> 240 2025-06-30T09:21:02.240615Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:02.241410Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:21:02.241474Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:21:02.241482Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-06-30T09:21:02.241496Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-30T09:21:02.241500Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-30T09:21:02.241504Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-30T09:21:02.241507Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-30T09:21:02.241511Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-06-30T09:21:02.241528Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:128:2152] message: TxId: 281474976710759 2025-06-30T09:21:02.241534Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-30T09:21:02.241539Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710759:0 2025-06-30T09:21:02.241556Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710759:0 2025-06-30T09:21:02.241600Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:21:02.242282Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-30T09:21:02.242305Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710759 2025-06-30T09:21:02.242896Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:21:02.242913Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:600:2552] TestWaitNotification: OK eventTxId 102 |81.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/sharding/ut/unittest |81.0%| [TA] $(B)/ydb/public/sdk/cpp/src/client/topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/health_check/ut/unittest >> THealthCheckTest::ProtobufUnderLimitFor100LargeVdisksIssues [GOOD] Test command err: 2025-06-30T09:20:57.813984Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:57.814052Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:57.814085Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:57.814128Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:57.814155Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:57.814174Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00142a/r3tmp/tmplWHfUO/pdisk_1.dat 2025-06-30T09:20:57.904963Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11788, node 1 TClient is connected to server localhost:3577 2025-06-30T09:20:58.005115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:58.005134Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:58.005138Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:58.005190Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:59.023270Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.023331Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.023345Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.023532Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.023542Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.023563Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00142a/r3tmp/tmpVqyORL/pdisk_1.dat 2025-06-30T09:20:59.095973Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26948, node 3 TClient is connected to server localhost:15156 2025-06-30T09:20:59.201876Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:59.201890Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:59.201893Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:59.201981Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e463-3-3-42" status: YELLOW message: "Available size is less than 12%" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-42" path: "/home/runner/.ya/build/build_root/cl3w/00142a/r3tmp/tmpVqyORL/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "YELLOW-e463-3-3-43" status: YELLOW message: "Available size is less than 12%" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-43" path: "/home/runner/.ya/build/build_root/cl3w/00142a/r3tmp/tmpVqyORL/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "YELLOW-e463-3-3-44" status: YELLOW message: "Available size is less than 12%" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-44" path: "/home/runner/.ya/build/build_root/cl3w/00142a/r3tmp/tmpVqyORL/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } location { id: 3 host: "::1" port: 12001 } 2025-06-30T09:21:00.127428Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:00.127500Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:00.127527Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:00.127727Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:622:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:00.127757Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:00.127775Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00142a/r3tmp/tmpYTj1Au/pdisk_1.dat 2025-06-30T09:21:00.204843Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2504, node 5 TClient is connected to server localhost:17334 2025-06-30T09:21:00.328015Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:00.328034Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:00.328037Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:00.328175Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-b783-5-5-42" status: RED message: "PDisk state is DeviceIoError" location { storage { node { id: 5 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "5-42" path: "/home/runner/.ya/build/build_root/cl3w/00142a/r3tmp/tmpYTj1Au/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-b783-5-5-43" status: RED message: "PDisk state is DeviceIoError" location { storage { node { id: 5 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "5-43" path: "/home/runner/.ya/build/build_root/cl3w/00142a/r3tmp/tmpYTj1Au/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-b783-5-5-44" status: RED message: "PDisk state is DeviceIoError" location { storage { node { id: 5 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "5-44" path: "/home/runner/.ya/build/build_root/cl3w/00142a/r3tmp/tmpYTj1Au/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } location { id: 5 host: "::1" port: 12001 } 2025-06-30T09:21:01.195380Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.195435Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:01.195458Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:01.195645Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.195664Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:01.195681Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00142a/r3tmp/tmp9FQ6dS/pdisk_1.dat 2025-06-30T09:21:01.265943Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28331, node 7 TClient is connected to server localhost:23046 2025-06-30T09:21:01.370539Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:01.370552Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:01.370555Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:01.370599Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:02.297073Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:02.297141Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:02.297159Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:02.297412Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:02.297451Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:02.297463Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00142a/r3tmp/tmp2KincR/pdisk_1.dat 2025-06-30T09:21:02.377779Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20494, node 9 TClient is connected to server localhost:26510 2025-06-30T09:21:02.486322Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:02.486335Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:02.486340Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:02.486473Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/health_check/ut/unittest >> THealthCheckTest::CLusterNotBootstrapped [GOOD] Test command err: 2025-06-30T09:20:57.290058Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669957016403717:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:57.290079Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00136a/r3tmp/tmpMIHQ4i/pdisk_1.dat TServer::EnableGrpc on GrpcPort 8560, node 1 2025-06-30T09:20:57.344903Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669957016403694:2080] 1751275257289921 != 1751275257289924 2025-06-30T09:20:57.347352Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:57.347738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:57.347747Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:57.347749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:57.347788Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9745 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:57.392253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:57.392291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:57.393369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:57.422859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:57.664381Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669958171468401:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:57.664404Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00136a/r3tmp/tmphgNoln/pdisk_1.dat 2025-06-30T09:20:57.678801Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:57.679076Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521669958171468375:2080] 1751275257664249 != 1751275257664252 TServer::EnableGrpc on GrpcPort 62266, node 2 2025-06-30T09:20:57.688241Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:57.688253Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:57.688255Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:57.688287Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21135 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:20:57.768097Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:57.768132Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:57.768542Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:20:57.769008Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:58.718462Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.718534Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.718549Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:58.718766Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.718781Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.718815Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00136a/r3tmp/tmpJDrEHt/pdisk_1.dat 2025-06-30T09:20:58.796285Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8532, node 3 TClient is connected to server localhost:14191 2025-06-30T09:20:58.908109Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:58.908125Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:58.908130Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:58.908230Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:59.963260Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.963328Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.963352Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.963554Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:622:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.963585Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.963602Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00136a/r3tmp/tmpkkWgNp/pdisk_1.dat 2025-06-30T09:21:00.040874Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6172, node 5 TClient is connected to server localhost:64765 2025-06-30T09:21:00.160752Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:00.160769Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:00.160773Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:00.160920Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 5 host: "::1" port: 12001 } 2025-06-30T09:21:00.763747Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:498:2378], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:00.763791Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:00.763809Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00136a/r3tmp/tmpuiSp5p/pdisk_1.dat 2025-06-30T09:21:00.846416Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8946, node 7 TClient is connected to server localhost:17332 2025-06-30T09:21:00.943721Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:00.943737Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:00.943739Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:00.943833Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD database_status { name: "/Root/serverless" overall: GREEN storage { overall: GREEN pools { id: "/Root:test" overall: GREEN groups { id: "2147483648" overall: GREEN vdisks { id: "7-1-55" overall: GREEN pdisk { id: "7-1" overall: GREEN } } } } } compute { overall: GREEN nodes { id: "9" overall: GREEN load { overall: GREEN cores: 64 } } } } database_status { name: "/Root" overall: GREEN storage { overall: GREEN pools { id: "static" overall: GREEN groups { id: "0" overall: GREEN } } } compute { overall: GREEN nodes { id: "7" overall: GREEN load { overall: GREEN cores: 64 } } } } database_status { name: "/Root/shared" overall: GREEN storage { overall: GREEN pools { id: "/Root:test" overall: GREEN groups { id: "2147483648" overall: GREEN } } } compute { overall: GREEN nodes { id: "8" overall: GREEN load { overall: GREEN cores: 64 } } } } location { id: 7 host: "::1" port: 12001 } 2025-06-30T09:21:01.793569Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:420:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.793616Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:01.793631Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00136a/r3tmp/tmppOZcDt/pdisk_1.dat 2025-06-30T09:21:01.887896Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19811, node 10 TClient is connected to server localhost:20475 2025-06-30T09:21:01.993318Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:01.993337Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:01.993343Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:01.993490Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: MAINTENANCE_REQUIRED issue_log { id: "0" status: RED message: "Cluster is not bootstrapped" } location { id: 10 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> CompressExecutor::TestExecutorMemUsage [GOOD] Test command err: 2025-06-30T09:19:46.650492Z :WriteAndReadSomeMessagesWithAsyncCompression INFO: Random seed for debugging is 1751275186650485 2025-06-30T09:19:46.766554Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669652875574038:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:46.766585Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:19:46.769919Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669651997016256:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:46.769949Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:19:46.794434Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004939/r3tmp/tmpf63Xo7/pdisk_1.dat 2025-06-30T09:19:46.801377Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:19:46.820412Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:46.830976Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 30641, node 1 2025-06-30T09:19:46.837427Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004939/r3tmp/yandexOXS63R.tmp 2025-06-30T09:19:46.837441Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004939/r3tmp/yandexOXS63R.tmp 2025-06-30T09:19:46.837525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004939/r3tmp/yandexOXS63R.tmp 2025-06-30T09:19:46.837588Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:46.844672Z INFO: TTestServer started on Port 10503 GrpcPort 30641 TClient is connected to server localhost:10503 PQClient connected to localhost:30641 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:19:46.866982Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:46.867015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:46.868668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:46.897657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:46.897684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:46.898852Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:19:46.899062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:46.899129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... waiting... waiting... waiting... 2025-06-30T09:19:47.159628Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669656291983811:2268], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:47.159641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669657170542207:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:47.159646Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669656291983823:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:47.159665Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:47.159678Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:47.159818Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669657170542220:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:47.161026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:47.161053Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669657170542252:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:47.161065Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:47.162267Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669656291983827:2121] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:47.166722Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669657170542222:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:19:47.166799Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669656291983826:2272], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:19:47.207914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:47.256507Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669656291983895:2154] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:47.259928Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669657170542377:2701] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:47.275395Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669657170542396:2312], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:19:47.275447Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669656291983910:2279], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:19:47.275526Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=Zjg1NGU1OTUtNTUxZTE0YWItYzI5YzdiNTYtYWQ3MTQ5OWU=, ActorId: [2:7521669656291983809:2267], ActorState: ExecuteState, TraceId: 01jz025aymadp88xmwq7mdmh8h, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:19:47.275528Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YTJjZjZiZTctNDBiYjIyNTEtNmI4YWI5ODQtZDllNjBmYTQ=, Actor ... server" ip=ipv6:[::1]:54622 proto=v1 topic=test-topic durationSec=0 2025-06-30T09:21:00.833642Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:21:00.834131Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 3 sessionId: describe result for acl check 2025-06-30T09:21:00.834176Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-30T09:21:00.834183Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-30T09:21:00.834185Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-30T09:21:00.834192Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [15:7521669972257662972:2532] (SourceId=test-message-group-id, PreferedPartition=(NULL)) StartKqpSession 2025-06-30T09:21:00.834845Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [15:7521669972257662972:2532] (SourceId=test-message-group-id, PreferedPartition=(NULL)) Select from the table 2025-06-30T09:21:00.996172Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715702. Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-30T09:21:00.996214Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [15:7521669972257662983:2534] TxId: 281474976715702. Ctx: { TraceId: 01jz027jx2e2hgym85mzv22wtg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=NmFiOTVhM2ItODk3N2Q0MjYtOGVjMGZiMmUtYWViMjRlMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-30T09:21:00.996280Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=15&id=NmFiOTVhM2ItODk3N2Q0MjYtOGVjMGZiMmUtYWViMjRlMDk=, ActorId: [15:7521669972257662973:2534], ActorState: ExecuteState, TraceId: 01jz027jx2e2hgym85mzv22wtg, Create QueryResponse for error on request, msg: 2025-06-30T09:21:00.996647Z node 15 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [15:7521669972257662972:2532] (SourceId=test-message-group-id, PreferedPartition=(NULL)) ReplyError: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=NmFiOTVhM2ItODk3N2Q0MjYtOGVjMGZiMmUtYWViMjRlMDk=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jz027jx314xyzjkbdakygc0b" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 2025-06-30T09:21:00.996667Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 3 reason: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=NmFiOTVhM2ItODk3N2Q0MjYtOGVjMGZiMmUtYWViMjRlMDk=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jz027jx314xyzjkbdakygc0b" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 sessionId: 2025-06-30T09:21:00.996845Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: is DEAD Test retry state: get retry delay 2025-06-30T09:21:00.997050Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|afeaccc0-3a7941b7-d94e4828-fdd42f9a_0] Got error. Status: UNAVAILABLE, Description:
: Error: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=NmFiOTVhM2ItODk3N2Q0MjYtOGVjMGZiMmUtYWViMjRlMDk=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jz027jx314xyzjkbdakygc0b" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 , code: 500001 2025-06-30T09:21:00.997058Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|afeaccc0-3a7941b7-d94e4828-fdd42f9a_0] Write session will restart in 2.000000s 2025-06-30T09:21:00.997073Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|afeaccc0-3a7941b7-d94e4828-fdd42f9a_0] Write session: Do CDS request 2025-06-30T09:21:00.997077Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|afeaccc0-3a7941b7-d94e4828-fdd42f9a_0] Do schedule cds request after 2000 ms 2025-06-30T09:21:01.179179Z node 16 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720687. Failed to resolve tablet: 72075186224037888 after several retries. 2025-06-30T09:21:01.179229Z node 16 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [16:7521669973755879199:2460] TxId: 281474976720687. Ctx: { TraceId: 01jz027k1x4v3jxse1snz0g4jp, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=16&id=ODQwZjMyMGItOTI5ZTA4NGEtNTNhZjJlZTYtYWE3Y2RlMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037888 after several retries. 2025-06-30T09:21:01.179307Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=16&id=ODQwZjMyMGItOTI5ZTA4NGEtNTNhZjJlZTYtYWE3Y2RlMDU=, ActorId: [16:7521669969460911888:2460], ActorState: ExecuteState, TraceId: 01jz027k1x4v3jxse1snz0g4jp, Create QueryResponse for error on request, msg: 2025-06-30T09:21:01.179661Z node 16 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037888 after several retries." severity: 1 } TxMeta { id: "01jz027k2waqtccmc2ma2tv0gd" } } YdbStatus: UNAVAILABLE ConsumedRu: 20 } 2025-06-30T09:21:01.400826Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715704. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-30T09:21:01.400872Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [15:7521669976552630347:2538] TxId: 281474976715704. Ctx: { TraceId: 01jz027k8rd00z8ayq2n8v9jq6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=ZTM1NzJiMTYtMjI3NTE5YzYtMWMyYzEzMWUtMjE3MDI3ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-30T09:21:01.400952Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=15&id=ZTM1NzJiMTYtMjI3NTE5YzYtMWMyYzEzMWUtMjE3MDI3ZQ==, ActorId: [15:7521669976552630334:2538], ActorState: ExecuteState, TraceId: 01jz027k8rd00z8ayq2n8v9jq6, Create QueryResponse for error on request, msg: 2025-06-30T09:21:01.401274Z node 15 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jz027k9p21npjbaw77r6jtz4" } } YdbStatus: UNAVAILABLE ConsumedRu: 19 } 2025-06-30T09:21:01.833072Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|afeaccc0-3a7941b7-d94e4828-fdd42f9a_0] Write session: close. Timeout = 0 ms 2025-06-30T09:21:01.833086Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|afeaccc0-3a7941b7-d94e4828-fdd42f9a_0] Write session will now close 2025-06-30T09:21:01.833096Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|afeaccc0-3a7941b7-d94e4828-fdd42f9a_0] Write session: aborting 2025-06-30T09:21:01.833270Z :WARNING: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|afeaccc0-3a7941b7-d94e4828-fdd42f9a_0] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2025-06-30T09:21:01.833275Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|afeaccc0-3a7941b7-d94e4828-fdd42f9a_0] Write session: destroy 2025-06-30T09:21:01.893936Z node 16 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720689. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-30T09:21:01.894007Z node 16 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [16:7521669973755879283:2468] TxId: 281474976720689. Ctx: { TraceId: 01jz027ks1bq3zpz2zcyfphbzt, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=16&id=ZDBjM2NmNmYtY2ViYTVmMDUtOWQ5YjAyNDEtYTNlZDg4ODU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-30T09:21:01.894101Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=16&id=ZDBjM2NmNmYtY2ViYTVmMDUtOWQ5YjAyNDEtYTNlZDg4ODU=, ActorId: [16:7521669973755879280:2468], ActorState: ExecuteState, TraceId: 01jz027ks1bq3zpz2zcyfphbzt, Create QueryResponse for error on request, msg: 2025-06-30T09:21:01.894501Z node 16 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jz027ks1bq3zpz2zd1vqghnz" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-30T09:21:01.915913Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715706. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-30T09:21:01.915976Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [15:7521669976552630434:2545] TxId: 281474976715706. Ctx: { TraceId: 01jz027ksxe4tpyqafzk89yy35, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=YTkwZThiMWEtZWZmZjQ1YWMtNWJjNTExOWMtN2UyMjdkZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-30T09:21:01.916067Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=15&id=YTkwZThiMWEtZWZmZjQ1YWMtNWJjNTExOWMtN2UyMjdkZDE=, ActorId: [15:7521669976552630431:2545], ActorState: ExecuteState, TraceId: 01jz027ksxe4tpyqafzk89yy35, Create QueryResponse for error on request, msg: 2025-06-30T09:21:01.916449Z node 15 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jz027ksxe4tpyqafznkc08vq" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } >> THealthCheckTest::IgnoreOtherGenerations [GOOD] >> THealthCheckTest::IgnoreServerlessWhenNotSpecific >> THealthCheckTest::LayoutCorrect [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedServer [GOOD] >> TConsoleConfigSubscriptionTests::TestAddSubscriptionIdempotency ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/health_check/ut/unittest >> THealthCheckTest::SharedWhenTroublesWithExclusiveNodes [GOOD] Test command err: 2025-06-30T09:20:58.504792Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.504855Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.504886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:58.504925Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.504949Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.504966Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011c1/r3tmp/tmp6kPPt6/pdisk_1.dat 2025-06-30T09:20:58.580762Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27000, node 1 TClient is connected to server localhost:27204 2025-06-30T09:20:58.675094Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:58.675110Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:58.675114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:58.675157Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: DEGRADED issue_log { id: "YELLOW-70fb-1231c6b1" status: YELLOW message: "Database has multiple issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" reason: "YELLOW-5321-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-1" reason: "YELLOW-e9e2-1231c6b1-2" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-1" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 1 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-2" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 2 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-5321-1231c6b1" status: YELLOW message: "Storage degraded" location { database { name: "/Root" } } reason: "YELLOW-595f-1231c6b1-80c02825" type: "STORAGE" level: 2 } issue_log { id: "YELLOW-595f-1231c6b1-80c02825" status: YELLOW message: "Pool degraded" location { storage { pool { name: "static" } } database { name: "/Root" } } reason: "YELLOW-ef3e-1231c6b1-0" type: "STORAGE_POOL" level: 3 } issue_log { id: "RED-4847-1231c6b1-1-0-3-55-0-55" status: RED message: "VDisk is not available" location { storage { node { id: 1 host: "::1" port: 12001 } pool { name: "static" group { vdisk { id: "0-3-55-0-55" } } } } database { name: "/Root" } } type: "VDISK" level: 5 } issue_log { id: "YELLOW-ef3e-1231c6b1-0" status: YELLOW message: "Group degraded" location { storage { pool { name: "static" group { id: "0" } } } database { name: "/Root" } } reason: "RED-4847-1231c6b1-1-0-3-55-0-55" type: "STORAGE_GROUP" level: 4 } location { id: 1 host: "::1" port: 12001 } 2025-06-30T09:20:59.703093Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.703156Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.703171Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.703382Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.703414Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.703429Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011c1/r3tmp/tmpO8frc0/pdisk_1.dat 2025-06-30T09:20:59.776680Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18687, node 3 TClient is connected to server localhost:17076 2025-06-30T09:20:59.887546Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:59.887562Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:59.887565Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:59.887660Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:00.511017Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:423:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:00.511098Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:00.511114Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011c1/r3tmp/tmpTk4rca/pdisk_1.dat 2025-06-30T09:21:00.589933Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9564, node 5 TClient is connected to server localhost:30384 2025-06-30T09:21:00.698713Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:00.698730Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:00.698733Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:00.698851Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD database_status { name: "/Root/serverless" overall: GREEN storage { overall: GREEN pools { id: "/Root:test" overall: GREEN groups { id: "2147483648" overall: GREEN vdisks { id: "5-1-55" overall: GREEN pdisk { id: "5-1" overall: GREEN } } } } } compute { overall: GREEN nodes { id: "6" overall: GREEN load { overall: GREEN cores: 64 } } } } location { id: 5 host: "::1" port: 12001 } 2025-06-30T09:21:01.380058Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:498:2378], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.380110Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:01.380129Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011c1/r3tmp/tmpPunoXj/pdisk_1.dat 2025-06-30T09:21:01.459280Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62510, node 7 TClient is connected to server localhost:27834 2025-06-30T09:21:01.576798Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:01.576813Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:01.576816Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:01.576916Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD database_status { name: "/Root/serverless" overall: GREEN storage { overall: GREEN pools { id: "/Root:test" overall: GREEN groups { id: "2147483648" overall: GREEN vdisks { id: "7-1-55" overall: GREEN pdisk { id: "7-1" overall: GREEN } } } } } compute { overall: GREEN nodes { id: "9" overall: GREEN load { overall: GREEN cores: 64 } } } } location { id: 7 host: "::1" port: 12001 } 2025-06-30T09:21:02.534235Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:420:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:02.534291Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:02.534306Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011c1/r3tmp/tmp3d6E3F/pdisk_1.dat 2025-06-30T09:21:02.614319Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28668, node 10 TClient is connected to server localhost:7135 2025-06-30T09:21:02.712905Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:02.712923Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:02.712926Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:02.713016Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: EMERGENCY issue_log { id: "RED-f489-d10d1896" status: RED message: "Database has compute issues" location { database { name: "/Root/serverless" } } reason: "RED-7469-d10d1896" type: "DATABASE" level: 1 } issue_log { id: "RED-7469-d10d1896" status: RED message: "There are no compute nodes" location { database { name: "/Root/serverless" } } type: "COMPUTE" level: 2 } database_status { name: "/Root/serverless" overall: RED storage { overall: GREEN pools { id: "/Root:test" overall: GREEN groups { id: "2147483648" overall: GREEN vdisks { id: "10-1-55" overall: GREEN pdisk { id: "10-1" overall: GREEN } } } } } compute { overall: RED } } database_status { name: "/Root" overall: GREEN storage { overall: GREEN pools { id: "static" overall: GREEN groups { id: "0" overall: GREEN } } } compute { overall: GREEN nodes { id: "10" overall: GREEN load { overall: GREEN cores: 64 } } } } database_status { name: "/Root/shared" overall: GREEN storage { overall: GREEN pools { id: "/Root:test" overall: GREEN groups { id: "2147483648" overall: GREEN } } } compute { overall: GREEN nodes { id: "11" overall: GREY } } } location { id: 10 host: "::1" port: 12001 } >> THealthCheckTest::TestReBootingTabletIsDead [GOOD] >> THealthCheckTest::UnknowPDiskState >> DataStreams::TestUpdateStorage [GOOD] >> DataStreams::TestStreamTimeRetention >> DataStreams::TestGetShardIterator [GOOD] >> DataStreams::TestGetRecordsWithoutPermission >> DataStreams::TestStreamStorageRetention [GOOD] >> DataStreams::TestStreamPagination |81.0%| [TA] $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/health_check/ut/unittest >> THealthCheckTest::LayoutCorrect [GOOD] Test command err: 2025-06-30T09:20:58.362307Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.362367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.362400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:58.362436Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.362463Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.362480Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001234/r3tmp/tmpdRnsH5/pdisk_1.dat 2025-06-30T09:20:58.434402Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21053, node 1 TClient is connected to server localhost:10831 2025-06-30T09:20:58.524925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:58.524945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:58.524948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:58.524993Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:59.580901Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.580973Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.580988Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.581244Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.581257Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.581281Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001234/r3tmp/tmpg9zCld/pdisk_1.dat 2025-06-30T09:20:59.667005Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62531, node 3 TClient is connected to server localhost:19091 2025-06-30T09:20:59.780457Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:59.780471Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:59.780475Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:59.780567Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:00.636032Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:00.636104Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:00.636129Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:00.636327Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:622:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:00.636360Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:00.636379Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001234/r3tmp/tmpSkmkTt/pdisk_1.dat 2025-06-30T09:21:00.711792Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29374, node 5 TClient is connected to server localhost:14474 2025-06-30T09:21:00.832533Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:00.832550Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:00.832553Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:00.832680Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:01.803548Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.803612Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:01.803635Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:01.803833Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.803852Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:01.803870Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001234/r3tmp/tmpxBCpQl/pdisk_1.dat 2025-06-30T09:21:01.887149Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16132, node 7 TClient is connected to server localhost:23152 2025-06-30T09:21:02.012616Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:02.012640Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:02.012645Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:02.012714Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:02.814981Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:257:2216], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:02.815027Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:02.815055Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001234/r3tmp/tmp1ouT6n/pdisk_1.dat 2025-06-30T09:21:02.888512Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10909, node 9 TClient is connected to server localhost:61207 2025-06-30T09:21:02.998964Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:02.998984Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:02.998987Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:02.999090Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> DataStreams::TestDeleteStream [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlag >> DataStreams::TestControlPlaneAndMeteringData [GOOD] >> DataStreams::ChangeBetweenRetentionModes >> Worker::Basic >> ReadIteratorExternalBlobs::ExtBlobsMultipleColumns [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithCompactingMiddleRows |81.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |81.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |81.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |81.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview >> THealthCheckTest::TestBootingTabletIsNotDead [GOOD] >> TImportTests::CompletedImportEndTime >> TImportTests::ShouldFailOnInvalidValue >> TImportTests::ShouldRestorePartitioningByLoad >> TRestoreWithRebootsTests::ShouldSucceedOnMultiShardTable[Zstd] >> TRestoreTests::ZeroLengthEncryptedFileTreatedAsCorrupted >> THealthCheckTest::IgnoreServerlessWhenNotSpecific [GOOD] >> THealthCheckTest::HealthCheckConfigUpdate >> DataStreams::TestGetRecordsWithoutPermission [GOOD] >> DataStreams::TestGetRecordsWithCount [GOOD] >> DataStreams::TestInvalidRetentionCombinations >> TRestoreTests::ExportImportWithPermissionsCorruption >> TImportWithRebootsTests::ShouldSucceedOnTableWithChecksum >> TConsoleTests::TestModifyUsedZoneKind [GOOD] >> TConsoleTests::TestMergeConfig >> THealthCheckTest::UnknowPDiskState [GOOD] >> THealthCheckTest::TestSystemStateRetriesAfterReceivingResponse >> TRestoreTests::CancelUponProposeResultShouldSucceed[Raw] >> TImportTests::ShouldSucceedOnIndexedTable1 >> TRestoreTests::ShouldRestoreSpecialFpValues |81.0%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |81.0%| [LD] {RESULT} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |81.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/health_check/ut/unittest >> THealthCheckTest::TestBootingTabletIsNotDead [GOOD] Test command err: 2025-06-30T09:20:58.433996Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.434057Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.434092Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:58.434140Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.434178Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.434198Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001321/r3tmp/tmpFtuMyP/pdisk_1.dat 2025-06-30T09:20:58.506944Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1187, node 1 TClient is connected to server localhost:8726 2025-06-30T09:20:58.609157Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:58.609177Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:58.609182Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:58.609241Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:59.640499Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.640568Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.640583Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.640773Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.640784Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.640805Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001321/r3tmp/tmpAC76Ff/pdisk_1.dat 2025-06-30T09:20:59.714879Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2184, node 3 TClient is connected to server localhost:9778 2025-06-30T09:20:59.829603Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:59.829620Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:59.829623Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:59.829711Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:00.720271Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:00.720349Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:00.720375Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:00.720576Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:622:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:00.720607Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:00.720626Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001321/r3tmp/tmp1sNtFv/pdisk_1.dat 2025-06-30T09:21:00.796071Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11462, node 5 TClient is connected to server localhost:31768 2025-06-30T09:21:00.916143Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:00.916160Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:00.916164Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:00.916300Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:01.789048Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.789119Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:01.789142Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:01.789364Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.789384Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:01.789403Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001321/r3tmp/tmpysfNO3/pdisk_1.dat 2025-06-30T09:21:01.863497Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11914, node 7 TClient is connected to server localhost:7921 2025-06-30T09:21:01.987934Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:01.987952Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:01.987957Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:01.988028Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:03.008431Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:708:2378], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:03.008532Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:03.008568Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:03.008875Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:705:2321], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:03.008925Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:03.008947Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001321/r3tmp/tmpV6WTvD/pdisk_1.dat 2025-06-30T09:21:03.090294Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1374, node 9 TClient is connected to server localhost:26945 2025-06-30T09:21:03.587250Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:03.587269Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:03.587272Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:03.587333Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:03.590867Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:03.590895Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:03.623205Z node 9 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 11 Cookie 11 2025-06-30T09:21:03.623532Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-10" reason: "YELLOW-e9e2-1231c6b1-11" reason: "YELLOW-e9e2-1231c6b1-9" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-9" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 9 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-10" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 10 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-11" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 11 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 9 host: "::1" port: 12001 } >> TConsoleTests::TestCreateTenantWrongNameExtSubdomain [GOOD] >> DataStreams::TestNonChargeableUser [GOOD] >> TConsoleTests::TestCreateTenantWrongPool >> DataStreams::TestPutEmptyMessage |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestBridgeDemotion [GOOD] Test command err: 2025-06-30T09:17:53.253406Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:53.267862Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:53.267951Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:53.268925Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:53.269029Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-30T09:17:53.269908Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-30T09:17:53.269924Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:53.270121Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:50:2076] ControllerId# 72057594037932033 2025-06-30T09:17:53.270129Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:53.270153Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:53.270175Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:53.374928Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:53.374946Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:53.375470Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:58:2081] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.375493Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:59:2082] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.375513Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:60:2083] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.375750Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:61:2084] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.375770Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:62:2085] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.375787Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:63:2086] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.375806Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:49:2075] Create Queue# [1:64:2087] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.375811Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-30T09:17:53.375827Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:50:2076] 2025-06-30T09:17:53.375834Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:50:2076] 2025-06-30T09:17:53.375843Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-30T09:17:53.375852Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:17:53.376034Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:17:53.376050Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-30T09:17:53.376558Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-30T09:17:53.376599Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-30T09:17:53.376739Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-30T09:17:53.376776Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-30T09:17:53.376904Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:75:2076] ControllerId# 72057594037932033 2025-06-30T09:17:53.376909Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-30T09:17:53.376919Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-30T09:17:53.376933Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-30T09:17:53.378534Z node 2 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:17:53.479557Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-30T09:17:53.479577Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-30T09:17:53.480189Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:82:2080] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.480218Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:83:2081] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.480247Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:84:2082] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.480276Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:85:2083] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.480303Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:86:2084] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.480326Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:87:2085] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.480352Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:74:2075] Create Queue# [2:88:2086] targetNodeId# 1 Marker# DSP01 2025-06-30T09:17:53.480358Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-30T09:17:53.480373Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:75:2076] 2025-06-30T09:17:53.480379Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:75:2076] 2025-06-30T09:17:53.480387Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-30T09:17:53.480395Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-30T09:17:53.480468Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-30T09:17:53.480551Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.480560Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:17:53.480588Z node 1 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:17:53.480605Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:53.480649Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [2:75:2076] 2025-06-30T09:17:53.480654Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.480657Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-30T09:17:53.480663Z node 2 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:17:53.480677Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:17:53.568889Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:50:2076] 2025-06-30T09:17:53.568927Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.568937Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-30T09:17:53.570303Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-30T09:17:53.572045Z node 2 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-30T09:17:53.572083Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-30T09:17:53.572095Z node 2 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:17:53.572102Z node 2 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:17:53.572140Z node 2 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:96:2090] 2025-06-30T09:17:53.572206Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.572216Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-30T09:17:53.572241Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-30T09:17:53.574888Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-30T09:17:53.574916Z node 1 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:17:53.576391Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-30T09:17:53.576412Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [2:96:2090] 2025-06-30T09:17:53.576419Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] loo ... 36.045251Z node 22 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:172: [231b85866275a88c] bootstrap ActorId# [22:1926:2389] Group# 2147483659 TabletId# 72075186224037891 Channel# 1 RecordGeneration# 2 PerGenerationCounter# 1 Deadline# 586524-01-19T08:01:49.551615Z CollectGeneration# 2 CollectStep# 0 Collect# true Hard# false RestartCounter# 0 Marker# DSPC03 2025-06-30T09:20:36.045256Z node 22 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:175: [231b85866275a88c] Keep# [72075186224037891:1:2:1:8192:289:0] Marker# DSPC04 2025-06-30T09:20:36.045264Z node 22 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [22:1742:2248] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[72075186224037891:2:1:1] collect=[2:0] Keep: [72075186224037891:1:2:1:8192:289:0] cookie# 0 2025-06-30T09:20:36.045272Z node 22 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:172: [7af63ec6dd26e03e] bootstrap ActorId# [22:1927:2390] Group# 2147483658 TabletId# 72075186224037891 Channel# 1 RecordGeneration# 2 PerGenerationCounter# 1 Deadline# 586524-01-19T08:01:49.551615Z CollectGeneration# 2 CollectStep# 0 Collect# true Hard# false RestartCounter# 0 Marker# DSPC03 2025-06-30T09:20:36.045277Z node 22 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:175: [7af63ec6dd26e03e] Keep# [72075186224037891:1:2:1:8192:289:0] Marker# DSPC04 2025-06-30T09:20:36.045284Z node 22 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [22:1734:2241] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[72075186224037891:2:1:1] collect=[2:0] Keep: [72075186224037891:1:2:1:8192:289:0] cookie# 0 2025-06-30T09:20:36.046344Z node 22 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [e55fa82cf3022ac2] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037889 RecordGeneration# 2 Channel# 0 VDisk# [80000002:1:0:0:0]} Marker# DSPC01 2025-06-30T09:20:36.046365Z node 22 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [e55fa82cf3022ac2] Result# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} Marker# DSPC02 2025-06-30T09:20:36.046382Z node 22 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [303c51bb95bbf8eb] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037889 RecordGeneration# 2 Channel# 1 VDisk# [80000008:1:0:0:0]} Marker# DSPC01 2025-06-30T09:20:36.046387Z node 22 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [303c51bb95bbf8eb] Result# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 1 Status# OK} Marker# DSPC02 2025-06-30T09:20:36.046433Z node 22 :BS_PROXY_BRIDGE NOTICE: {BPB02@bridge.cpp:295} intermediate response RequestId# 1bbbc92da221759d GroupId# 2147483650 Status# OK PileState# SYNCHRONIZED Response# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-30T09:20:36.046442Z node 22 :BS_PROXY_BRIDGE NOTICE: {BPB02@bridge.cpp:295} intermediate response RequestId# c801384d5b845b75 GroupId# 2147483656 Status# OK PileState# SYNCHRONIZED Response# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 1 Status# OK} 2025-06-30T09:20:36.053685Z node 22 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [fe53cb41d9f3f64e] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037889 RecordGeneration# 2 Channel# 0 VDisk# [80000001:1:0:0:0]} Marker# DSPC01 2025-06-30T09:20:36.053716Z node 22 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [fe53cb41d9f3f64e] Result# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} Marker# DSPC02 2025-06-30T09:20:36.053744Z node 22 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [3fa267961874e568] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037889 RecordGeneration# 2 Channel# 1 VDisk# [80000007:1:0:0:0]} Marker# DSPC01 2025-06-30T09:20:36.053751Z node 22 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [3fa267961874e568] Result# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 1 Status# OK} Marker# DSPC02 2025-06-30T09:20:36.053794Z node 22 :BS_PROXY_BRIDGE NOTICE: {BPB02@bridge.cpp:295} intermediate response RequestId# 1bbbc92da221759d GroupId# 2147483649 Status# OK PileState# SYNCHRONIZED Response# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-30T09:20:36.053805Z node 22 :BS_PROXY_BRIDGE DEBUG: {BPB01@bridge.cpp:318} request finished RequestId# 1bbbc92da221759d Response# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-30T09:20:36.053817Z node 22 :BS_PROXY_BRIDGE NOTICE: {BPB02@bridge.cpp:295} intermediate response RequestId# c801384d5b845b75 GroupId# 2147483655 Status# OK PileState# SYNCHRONIZED Response# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 1 Status# OK} 2025-06-30T09:20:36.053822Z node 22 :BS_PROXY_BRIDGE DEBUG: {BPB01@bridge.cpp:318} request finished RequestId# c801384d5b845b75 Response# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 1 Status# OK} 2025-06-30T09:20:36.053970Z node 22 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [8e40b5822c85ed8f] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037891 RecordGeneration# 2 Channel# 0 VDisk# [80000004:1:0:0:0]} Marker# DSPC01 2025-06-30T09:20:36.053980Z node 22 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [8e40b5822c85ed8f] Result# TEvCollectGarbageResult {TabletId# 72075186224037891 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} Marker# DSPC02 2025-06-30T09:20:36.053993Z node 22 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [67212f5f523cfb6e] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037891 RecordGeneration# 2 Channel# 0 VDisk# [80000005:1:0:0:0]} Marker# DSPC01 2025-06-30T09:20:36.053999Z node 22 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [67212f5f523cfb6e] Result# TEvCollectGarbageResult {TabletId# 72075186224037891 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} Marker# DSPC02 2025-06-30T09:20:36.054009Z node 22 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [231b85866275a88c] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037891 RecordGeneration# 2 Channel# 1 VDisk# [8000000b:1:0:0:0]} Marker# DSPC01 2025-06-30T09:20:36.054015Z node 22 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [231b85866275a88c] Result# TEvCollectGarbageResult {TabletId# 72075186224037891 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 1 Status# OK} Marker# DSPC02 2025-06-30T09:20:36.054026Z node 22 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [7af63ec6dd26e03e] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037891 RecordGeneration# 2 Channel# 1 VDisk# [8000000a:1:0:0:0]} Marker# DSPC01 2025-06-30T09:20:36.054031Z node 22 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [7af63ec6dd26e03e] Result# TEvCollectGarbageResult {TabletId# 72075186224037891 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 1 Status# OK} Marker# DSPC02 2025-06-30T09:20:36.054065Z node 22 :BS_PROXY_BRIDGE NOTICE: {BPB02@bridge.cpp:295} intermediate response RequestId# 603b2035dfde7e0b GroupId# 2147483652 Status# OK PileState# SYNCHRONIZED Response# TEvCollectGarbageResult {TabletId# 72075186224037891 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-30T09:20:36.054078Z node 22 :BS_PROXY_BRIDGE NOTICE: {BPB02@bridge.cpp:295} intermediate response RequestId# d97ac707bb905eef GroupId# 2147483659 Status# OK PileState# SYNCHRONIZED Response# TEvCollectGarbageResult {TabletId# 72075186224037891 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 1 Status# OK} 2025-06-30T09:20:36.054101Z node 22 :BS_PROXY_BRIDGE NOTICE: {BPB02@bridge.cpp:295} intermediate response RequestId# 603b2035dfde7e0b GroupId# 2147483653 Status# OK PileState# SYNCHRONIZED Response# TEvCollectGarbageResult {TabletId# 72075186224037891 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-30T09:20:36.054108Z node 22 :BS_PROXY_BRIDGE DEBUG: {BPB01@bridge.cpp:318} request finished RequestId# 603b2035dfde7e0b Response# TEvCollectGarbageResult {TabletId# 72075186224037891 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-30T09:20:36.054118Z node 22 :BS_PROXY_BRIDGE NOTICE: {BPB02@bridge.cpp:295} intermediate response RequestId# d97ac707bb905eef GroupId# 2147483658 Status# OK PileState# SYNCHRONIZED Response# TEvCollectGarbageResult {TabletId# 72075186224037891 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 1 Status# OK} 2025-06-30T09:20:36.054124Z node 22 :BS_PROXY_BRIDGE DEBUG: {BPB01@bridge.cpp:318} request finished RequestId# d97ac707bb905eef Response# TEvCollectGarbageResult {TabletId# 72075186224037891 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 1 Status# OK} 2025-06-30T09:20:36.054277Z node 21 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [21:1928:2548] 2025-06-30T09:20:36.054288Z node 21 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [21:1928:2548] 2025-06-30T09:20:36.054305Z node 21 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-30T09:20:36.054317Z node 21 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 21 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [21:818:2270] 2025-06-30T09:20:36.054333Z node 21 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [21:1928:2548] 2025-06-30T09:20:36.054366Z node 21 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [21:1928:2548] 2025-06-30T09:20:36.054384Z node 21 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [21:1928:2548] 2025-06-30T09:20:36.054391Z node 21 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [21:1928:2548] 2025-06-30T09:20:36.054413Z node 21 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [21:1928:2548] 2025-06-30T09:20:36.054469Z node 21 :HIVE TRACE: hive_impl.cpp:118: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([21:1928:2548]) [21:1929:2549] 2025-06-30T09:20:36.054484Z node 21 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [21:1928:2548] 2025-06-30T09:20:36.054491Z node 21 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [21:1928:2548] 2025-06-30T09:20:36.054497Z node 21 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [21:1928:2548] 2025-06-30T09:20:36.054504Z node 21 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [21:1928:2548] 2025-06-30T09:20:36.054509Z node 21 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [21:1928:2548] 2025-06-30T09:20:36.054521Z node 21 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [21:812:2266] EventType# 268697616 >> TConsoleConfigSubscriptionTests::TestAddSubscriptionIdempotency [GOOD] >> TConsoleConfigSubscriptionTests::TestConfigNotificationRetries >> DataStreams::TestDeleteStreamWithEnforceFlag [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlagFalse >> TImportTests::ShouldFailOnInvalidValue [GOOD] >> TImportTests::ShouldFailOnOutboundKey >> TImportTests::ShouldRestorePartitioningByLoad [GOOD] >> TImportTests::ShouldRestoreMinMaxPartitionsCount >> DataStreams::TestStreamPagination [GOOD] >> DataStreams::TestShardPagination >> DataStreams::ChangeBetweenRetentionModes [GOOD] >> DataStreams::TestCreateExistingStream >> TConsoleTests::TestTenantGenerationExtSubdomain [GOOD] >> TConsoleTests::TestSchemeShardErrorForwarding >> BuildStatsHistogram::Many_Serial [GOOD] |81.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |81.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest |81.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme >> TRestoreTests::ZeroLengthEncryptedFileTreatedAsCorrupted [GOOD] >> TRestoreWithRebootsTests::CancelShouldSucceed[Raw] >> TImportTests::ShouldSucceedOnIndexedTable1 [GOOD] >> TRestoreTests::CancelUponProposeResultShouldSucceed[Raw] [GOOD] >> TRestoreTests::CancelUponProposeResultShouldSucceed[Zstd] >> TImportTests::ShouldSucceedOnIndexedTable2 >> TImportTests::ShouldFailOnOutboundKey [GOOD] >> TRestoreTests::ExportImportWithPermissionsCorruption [GOOD] >> TRestoreTests::ExportImportWithSchemeChecksumCorruption >> TImportTests::ShouldFailOnNonUniqDestinationPaths >> TRestoreTests::ShouldRestoreSpecialFpValues [GOOD] >> TRestoreTests::ShouldRestoreSequence >> TImportTests::ShouldRestoreMinMaxPartitionsCount [GOOD] >> TRestoreTests::CancelUponProposeResultShouldSucceed[Zstd] [GOOD] >> TRestoreTests::CancelHungOperationShouldSucceed[Zstd] >> TImportTests::ShouldRestoreKeyBloomFilter >> THealthCheckTest::HealthCheckConfigUpdate [GOOD] >> DataStreams::TestInvalidRetentionCombinations [GOOD] >> THealthCheckTest::TestSystemStateRetriesAfterReceivingResponse [GOOD] >> TImportTests::ShouldFailOnNonUniqDestinationPaths [GOOD] >> DataStreams::TestPutEmptyMessage [GOOD] >> DataStreams::TestListStreamConsumers |81.1%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> DataStreams::TestDeleteStreamWithEnforceFlagFalse [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo >> TImportTests::ShouldSucceedOnIndexedTable2 [GOOD] >> TImportTests::ShouldSucceedOnIndexedTable3 >> DataStreams::TestCreateExistingStream [GOOD] >> DataStreams::ListStreamsValidation >> TImportTests::ShouldRestoreKeyBloomFilter [GOOD] >> TRestoreTests::CancelHungOperationShouldSucceed[Zstd] [GOOD] |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestInvalidRetentionCombinations [GOOD] Test command err: 2025-06-30T09:21:02.867990Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669980629795420:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:02.868014Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001b8d/r3tmp/tmpFqUpFM/pdisk_1.dat 2025-06-30T09:21:02.932787Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28653, node 1 2025-06-30T09:21:02.942500Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:02.942515Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:02.942518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:02.942555Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61517 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:21:02.968284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:02.968318Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:02.970225Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:03.003203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:03.017983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:61517 2025-06-30T09:21:03.033693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:03.697970Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669984017795049:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:03.697996Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001b8d/r3tmp/tmpyJ8JOO/pdisk_1.dat 2025-06-30T09:21:03.718177Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24152, node 4 2025-06-30T09:21:03.732850Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:03.732868Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:03.732870Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:03.732907Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1301 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:03.798835Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:03.798873Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:03.800520Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:03.803106Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:03.819251Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:1301 2025-06-30T09:21:03.833127Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:03.865714Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:03.867980Z node 4 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [4:7521669984017796215:2810], for# user2@builtin, access# DescribeSchema 2025-06-30T09:21:03.868693Z node 4 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [4:7521669984017796218:2811], for# user2@builtin, access# DescribeSchema 2025-06-30T09:21:03.869465Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:04.542290Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521669987138909520:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:04.542308Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001b8d/r3tmp/tmpiXyZDP/pdisk_1.dat 2025-06-30T09:21:04.556428Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:04.564419Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 24004, node 7 2025-06-30T09:21:04.568069Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:04.568083Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:04.568086Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:04.568132Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63450 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:04.642556Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:04.642583Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:04.644247Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:04.646554Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:04.670965Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:63450 2025-06-30T09:21:04.681257Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting...
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 168, storage 10, code: 500080
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 144, storage 0, code: 500080
: Error: write_speed per second in partition must have values from set {131072,524288,1048576}, got 130048, code: 500080
: Error: write_speed per second in partition must have values from set {131072,524288,1048576}, got 1049600, code: 500080 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/health_check/ut/unittest >> THealthCheckTest::HealthCheckConfigUpdate [GOOD] Test command err: 2025-06-30T09:20:59.151637Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.151696Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.151736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.151774Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.151799Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.151816Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001189/r3tmp/tmpT2yJNa/pdisk_1.dat 2025-06-30T09:20:59.226619Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6777, node 1 TClient is connected to server localhost:18269 2025-06-30T09:20:59.318085Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:59.318100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:59.318103Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:59.318141Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:00.260910Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:00.260992Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:00.261012Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:00.261276Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:00.261288Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:00.261317Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001189/r3tmp/tmp19eDNe/pdisk_1.dat 2025-06-30T09:21:00.345451Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14392, node 3 TClient is connected to server localhost:16047 2025-06-30T09:21:00.462289Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:00.462307Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:00.462312Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:00.462431Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:01.498438Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.498507Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:01.498531Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:01.498725Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:622:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.498774Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:01.498790Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001189/r3tmp/tmpj4eieJ/pdisk_1.dat 2025-06-30T09:21:01.568510Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18113, node 5 TClient is connected to server localhost:1115 2025-06-30T09:21:01.694311Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:01.694333Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:01.694338Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:01.694523Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 5 host: "::1" port: 12001 } 2025-06-30T09:21:02.714321Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:02.714392Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:02.714417Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:02.714621Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:02.714641Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:02.714661Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001189/r3tmp/tmpDW9oxt/pdisk_1.dat 2025-06-30T09:21:02.797996Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2778, node 7 TClient is connected to server localhost:62110 2025-06-30T09:21:02.921073Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:02.921098Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:02.921104Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:02.921179Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-7" reason: "YELLOW-e9e2-1231c6b1-8" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-7" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 7 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-8" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 8 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 7 host: "::1" port: 12001 } 2025-06-30T09:21:03.764478Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:257:2216], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:03.764527Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:03.764545Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001189/r3tmp/tmpZ5mng8/pdisk_1.dat 2025-06-30T09:21:03.858770Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2110, node 9 TClient is connected to server localhost:20964 2025-06-30T09:21:03.969034Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:03.969051Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:03.969054Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:03.969170Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-9" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-9" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 9 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } database_status { name: "/Root" overall: YELLOW storage { overall: GREEN pools { id: "static" overall: GREEN groups { id: "0" overall: GREEN } } } compute { overall: YELLOW nodes { id: "9" overall: YELLOW load { overall: YELLOW load: 123.390625 cores: 64 } } } } database_status { name: "/Root/shared" overall: GREEN storage { overall: GREEN pools { id: "/Root:test" overall: GREEN groups { id: "2147483648" overall: GREEN vdisks { id: "9-1-55" overall: GREEN pdisk { id: "9-1" overall: GREEN } } } } } compute { overall: GREEN nodes { id: "10" overall: GREY } } } location { id: 9 host: "::1" port: 12001 } 2025-06-30T09:21:04.769780Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:421:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:04.769836Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:04.769853Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001189/r3tmp/tmpT5eppt/pdisk_1.dat 2025-06-30T09:21:04.860881Z node 11 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9934, node 11 TClient is connected to server localhost:6249 2025-06-30T09:21:04.989987Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:04.990003Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:04.990007Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:04.990101Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: EMERGENCY issue_log { id: "RED-be81" status: RED message: "Database has storage issues" reason: "RED-caea" type: "DATABASE" level: 1 } issue_log { id: "RED-caea" status: RED message: "There are no storage pools" type: "STORAGE" level: 2 } database_status { name: "/Root/database" overall: RED storage { overall: RED } compute { overall: GREEN nodes { id: "12" overall: GREEN load { overall: GREEN cores: 64 } } } } location { id: 11 } self_check_result: EMERGENCY issue_log { id: "RED-70fb" status: RED message: "Database has multiple issues" reason: "RED-caea" reason: "YELLOW-89f0" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-89f0" status: YELLOW message: "Some nodes are restarting too often" reason: "YELLOW-6bba-12" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-6bba-12" status: YELLOW message: "The number of node restarts has increased" location { compute { node { id: 12 host: "::1" port: 12001 } } } type: "NODE_UPTIME" level: 4 } issue_log { id: "RED-caea" status: RED message: "There are no storage pools" type: "STORAGE" level: 2 } database_status { name: "/Root/database" overall: RED storage { overall: RED } compute { overall: YELLOW nodes { id: "12" overall: GREEN load { overall: GREEN cores: 64 } } } } location { id: 11 } self_check_result: EMERGENCY issue_log { id: "RED-70fb" status: RED message: "Database has multiple issues" reason: "ORANGE-89f0" reason: "RED-caea" type: "DATABASE" level: 1 } issue_log { id: "ORANGE-89f0" status: ORANGE message: "Some nodes are restarting too often" reason: "ORANGE-aa61-12" type: "COMPUTE" level: 2 } issue_log { id: "ORANGE-aa61-12" status: ORANGE message: "Node is restarting too often" location { compute { node { id: 12 host: "::1" port: 12001 } } } type: "NODE_UPTIME" level: 4 } issue_log { id: "RED-caea" status: RED message: "There are no storage pools" type: "STORAGE" level: 2 } database_status { name: "/Root/database" overall: RED storage { overall: RED } compute { overall: ORANGE nodes { id: "12" overall: GREEN load { overall: GREEN cores: 64 } } } } location { id: 11 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_restore/unittest >> TImportTests::ShouldFailOnNonUniqDestinationPaths [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:21:04.459528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:04.459550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.459555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:04.459558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:04.459561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:04.459564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:04.459571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.459585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:04.459664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:04.459719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:04.470987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:21:04.471011Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:04.473959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:04.474001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:04.474044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:04.475635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:04.475695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:04.475793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.475833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:04.476430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.476462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:04.476677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.476684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.476702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:04.476708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:04.476712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:04.476725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.477976Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:04.494385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:04.494453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.494504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:04.494511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:04.494554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:04.494564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:04.495193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.495231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:04.495269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.495276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:04.495280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:04.495284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:04.495604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.495612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:04.495615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:04.495892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.495900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.495904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.495910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:04.496397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:04.496809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:04.496835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:04.496989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.497008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:04.497014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.497057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:04.497062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.497085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:04.497093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:04.497458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.497464Z node 1 :FLAT_TX_SCHEMESHARD ... t /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:05.195083Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:05.195122Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:05.195157Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:05.195166Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:05.195172Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:05.195176Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:05.195461Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:05.195471Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:05.195475Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:05.195759Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:05.195765Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:05.195769Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:05.195775Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:05.195796Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:05.196036Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:05.196060Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:05.196174Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:05.196190Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 12884904046 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:05.196195Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:05.196235Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:05.196243Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:05.196272Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:05.196287Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:05.196642Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:05.196648Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:05.196684Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:05.196687Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-30T09:21:05.196746Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:05.196751Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-30T09:21:05.196761Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:21:05.196764Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:21:05.196769Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:21:05.196771Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:21:05.196774Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-30T09:21:05.196778Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:21:05.196781Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-30T09:21:05.196784Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1:0 2025-06-30T09:21:05.196793Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:21:05.196797Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-30T09:21:05.196800Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-30T09:21:05.196857Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-30T09:21:05.196864Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-30T09:21:05.196868Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-30T09:21:05.196871Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-30T09:21:05.196874Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:05.196883Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-30T09:21:05.197364Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-30T09:21:05.197427Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:05.198397Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:130: TImport::TTxCreate: DoExecute 2025-06-30T09:21:05.198422Z node 3 :IMPORT TRACE: schemeshard_import__create.cpp:131: Message: TxId: 101 DatabaseName: "/MyRoot" Request { ImportFromS3Settings { endpoint: "localhost:22068" scheme: HTTP items { source_prefix: "a" destination_path: "/MyRoot/Table" } items { source_prefix: "b" destination_path: "/MyRoot/Table" } } } 2025-06-30T09:21:05.198454Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:252: TImport::TTxCreate: Reply: status# BAD_REQUEST, error# Duplicate destination_path: /MyRoot/Table 2025-06-30T09:21:05.198458Z node 3 :IMPORT TRACE: schemeshard_import__create.cpp:253: Message: TxId: 101 2025-06-30T09:21:05.198488Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:236: TImport::TTxCreate: DoComplete 2025-06-30T09:21:05.198499Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [3:274:2263] Bootstrap 2025-06-30T09:21:05.199806Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [3:274:2263] Become StateWork (SchemeCache [3:279:2268]) 2025-06-30T09:21:05.199945Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [3:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:21:05.200347Z node 3 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestWaitNotification wait txId: 101 2025-06-30T09:21:05.200383Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-30T09:21:05.200389Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-30T09:21:05.200430Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-30T09:21:05.200443Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-30T09:21:05.200446Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [3:286:2275] TestWaitNotification: OK eventTxId 101 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/health_check/ut/unittest >> THealthCheckTest::TestSystemStateRetriesAfterReceivingResponse [GOOD] Test command err: 2025-06-30T09:20:58.430457Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.430517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.430551Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:58.430589Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.430614Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.430632Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011ee/r3tmp/tmpvgHSol/pdisk_1.dat 2025-06-30T09:20:58.498220Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16544, node 1 TClient is connected to server localhost:12675 2025-06-30T09:20:58.589523Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:58.589537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:58.589540Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:58.589577Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:59.545698Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:371:2221], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.545799Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.545807Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.545842Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:708:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.545884Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.545900Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011ee/r3tmp/tmpPKr6Ff/pdisk_1.dat 2025-06-30T09:20:59.634320Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25448, node 3 TClient is connected to server localhost:29671 2025-06-30T09:21:00.055416Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:00.055437Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:00.055441Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:00.055637Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:00.058119Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:00.058150Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:00.081430Z node 3 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-30T09:21:00.081582Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:00.149160Z node 3 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 5 2025-06-30T09:21:00.149357Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connected -> Disconnected self_check_result: EMERGENCY issue_log { id: "RED-f489-1231c6b1" status: RED message: "Database has compute issues" location { database { name: "/Root" } } reason: "RED-6fa7-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "RED-6fa7-1231c6b1" status: RED message: "Compute has issues with tablets" location { database { name: "/Root" } } reason: "RED-e5e3-1231c6b1-PersQueue" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" reason: "YELLOW-e9e2-1231c6b1-5" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-e5e3-1231c6b1-PersQueue" status: RED message: "Tablets are dead" location { compute { tablet { type: "PersQueue" id: "72075186224037888" count: 1 } } database { name: "/Root" } node { } } type: "TABLET" level: 4 } location { id: 3 host: "::1" port: 12001 } 2025-06-30T09:21:01.483639Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:626:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.483732Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:01.483758Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:01.483938Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:621:2163], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.483983Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:01.483999Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011ee/r3tmp/tmpKezVNR/pdisk_1.dat 2025-06-30T09:21:01.552952Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20496, node 6 TClient is connected to server localhost:20830 2025-06-30T09:21:01.981068Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:01.981123Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:01.981130Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:01.981512Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:01.981738Z node 6 :HIVE TRACE: hive_impl.cpp:118: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([6:1274:2664]) [6:1499:2669] 2025-06-30T09:21:01.982036Z node 6 :HIVE DEBUG: hive_impl.cpp:34: HIVE#72057594037968897 Handle TEvHive::TEvCreateTablet(PersQueue(72057594046578946,0)) 2025-06-30T09:21:01.985490Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:200: HIVE#72057594037968897 THive::TTxCreateTablet::Execute Owner: 72057594046578946 OwnerIdx: 0 TabletType: PersQueue BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } 2025-06-30T09:21:01.985536Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:348: HIVE#72057594037968897 Hive 72057594037968897 allocated TabletId 72075186224037888 from TabletIdIndex 65536 2025-06-30T09:21:01.985762Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:440: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for type PersQueue: {} 2025-06-30T09:21:01.985777Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:447: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for profile 'default': {Memory: 1048576} 2025-06-30T09:21:01.985861Z node 6 :HIVE DEBUG: hive_impl.cpp:2821: HIVE#72057594037968897 CreateTabletFollowers Tablet PersQueue.72075186224037888.Leader.0 2025-06-30T09:21:01.985876Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:173: HIVE#72057594037968897 THive::TTxCreateTablet::Execute TabletId: 72075186224037888 Status: OK 2025-06-30T09:21:01.985957Z node 6 :HIVE DEBUG: hive_impl.cpp:1079: HIVE#72057594037968897 THive::AssignTabletGroups TEvControllerSelectGroups tablet 72075186224037888 GroupParameters { StoragePoolSpecifier { Name: "/Root:test" } } ReturnAllMatchingGroups: true 2025-06-30T09:21:01.986630Z node 6 :HIVE DEBUG: hive_impl.cpp:72: HIVE#72057594037968897 Connected to tablet 72057594037932033 from tablet 72057594037968897 2025-06-30T09:21:01.991026Z node 6 :HIVE TRACE: hive_impl.cpp:118: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([8:1478:2330]) [6:1542:2677] 2025-06-30T09:21:01.994837Z node 6 :HIVE DEBUG: hive_impl.cpp:145: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [8:1477:2330] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Prior ... 2025-06-30T09:21:02.071871Z node 6 :HIVE TRACE: hive_impl.cpp:2571: HIVE#72057594037968897 UpdateTotalResources: ObjectId (72057594046578946,0): {} -> {Memory: 1048576} 2025-06-30T09:21:02.071878Z node 6 :HIVE TRACE: hive_impl.cpp:2577: HIVE#72057594037968897 UpdateTotalResources: Type PersQueue: {} -> {Memory: 1048576} 2025-06-30T09:21:02.071917Z node 6 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-30T09:21:02.071923Z node 6 :HIVE TRACE: hive_impl.cpp:348: HIVE#72057594037968897 ProcessBootQueue - sending 2025-06-30T09:21:02.072001Z node 6 :HIVE TRACE: hive_impl.cpp:332: HIVE#72057594037968897 ProcessBootQueue - executing 2025-06-30T09:21:02.072013Z node 6 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-30T09:21:02.072019Z node 6 :HIVE DEBUG: hive_impl.cpp:226: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-30T09:21:02.072025Z node 6 :HIVE DEBUG: hive_impl.cpp:306: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-30T09:21:02.083336Z node 6 :HIVE DEBUG: tx__update_tablet_status.cpp:216: HIVE#72057594037968897 THive::TTxUpdateTabletStatus::Complete TabletId: 72075186224037888 SideEffects: {Notifications: 0x10040207 [6:1273:2663] {EvTabletCreationResult Status: OK TabletID: 72075186224037888}} 2025-06-30T09:21:02.083369Z node 6 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-30T09:21:02.685403Z node 6 :HIVE DEBUG: hive_impl.cpp:735: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 8: Status: 2 2025-06-30T09:21:02.685443Z node 6 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(8)::Execute 2025-06-30T09:21:02.685450Z node 6 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 8 2025-06-30T09:21:02.685472Z node 6 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(8)::Complete 2025-06-30T09:21:02.685710Z node 6 :HIVE DEBUG: tx__restart_tablet.cpp:32: HIVE#72057594037968897 THive::TTxRestartTablet(PersQueue.72075186224037888.Leader.1)::Execute 2025-06-30T09:21:02.685740Z node 6 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(PersQueue.72075186224037888.Leader.1) VolatileState: Running -> Stopped (Node 8) 2025-06-30T09:21:02.685749Z node 6 :HIVE TRACE: node_info.cpp:118: HIVE#72057594037968897 Node(8, (0,1048576,0,0)->(0,0,0,0)) 2025-06-30T09:21:02.685768Z node 6 :HIVE TRACE: hive_impl.cpp:2571: HIVE#72057594037968897 UpdateTotalResources: ObjectId (72057594046578946,0): {Memory: 1048576} -> {} 2025-06-30T09:21:02.685774Z node 6 :HIVE TRACE: hive_impl.cpp:2577: HIVE#72057594037968897 UpdateTotalResources: Type PersQueue: {Memory: 1048576} -> {} 2025-06-30T09:21:02.685784Z node 6 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(PersQueue.72075186224037888.Leader.1 gen 1) to node 8 2025-06-30T09:21:02.685790Z node 6 :HIVE DEBUG: tablet_info.cpp:125: HIVE#72057594037968897 Tablet(PersQueue.72075186224037888.Leader.1) VolatileState: Stopped -> Booting 2025-06-30T09:21:02.685798Z node 6 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037968897 ProcessBootQueue (1) 2025-06-30T09:21:02.685801Z node 6 :HIVE TRACE: hive_impl.cpp:348: HIVE#72057594037968897 ProcessBootQueue - sending 2025-06-30T09:21:02.685858Z node 6 :HIVE DEBUG: tx__kill_node.cpp:22: HIVE#72057594037968897 THive::TTxKillNode(8)::Execute 2025-06-30T09:21:02.685874Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:21:02.685879Z node 6 :HIVE TRACE: hive_domains.cpp:16: Node(8) DeregisterInDomains (72057594046644480:1) : 1 -> 0 2025-06-30T09:21:02.685885Z node 6 :HIVE DEBUG: hive_impl.cpp:2808: HIVE#72057594037968897 RemoveRegisteredDataCentersNode(3, 8) 2025-06-30T09:21:02.685891Z node 6 :HIVE TRACE: tx__kill_node.cpp:50: HIVE#72057594037968897 THive::TTxKillNode - killing pipe server [6:1542:2677] 2025-06-30T09:21:02.685895Z node 6 :HIVE DEBUG: hive_impl.cpp:109: HIVE#72057594037968897 TryToDeleteNode(8): waiting 3600.000000s 2025-06-30T09:21:02.685979Z node 6 :HIVE TRACE: hive_impl.cpp:126: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerDisconnected([8:1478:2330]) [6:1542:2677] 2025-06-30T09:21:02.686989Z node 6 :HIVE TRACE: hive_impl.cpp:118: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([6:1865:2692]) [6:1866:2697] 2025-06-30T09:21:02.688546Z node 6 :HIVE TRACE: hive_impl.cpp:126: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerDisconnected([6:1865:2692]) [6:1866:2697] 2025-06-30T09:21:02.688876Z node 6 :HIVE TRACE: hive_impl.cpp:118: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([9:1842:2329]) [6:1900:2700] 2025-06-30T09:21:02.690358Z node 6 :HIVE DEBUG: hive_impl.cpp:145: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [9:1841:2329] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-06-30T09:21:02.690384Z node 6 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(9)::Execute 2025-06-30T09:21:02.690413Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:02.690419Z node 6 :HIVE DEBUG: hive_impl.cpp:365: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-30T09:21:02.690422Z node 6 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037968897 ProcessBootQueue (1) 2025-06-30T09:21:02.690425Z node 6 :HIVE DEBUG: hive_impl.cpp:365: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-30T09:21:02.690428Z node 6 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037968897 ProcessBootQueue (1) 2025-06-30T09:21:02.690437Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:02.691569Z node 6 :HIVE DEBUG: hive_impl.cpp:812: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 9 Location DataCenter: "4" Module: "4" Rack: "4" Unit: "4" self_check_result: EMERGENCY issue_log { id: "RED-f489-1231c6b1" status: RED message: "Database has compute issues" location { database { name: "/Root" } } reason: "RED-6fa7-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "RED-6fa7-1231c6b1" status: RED message: "Compute has issues with tablets" location { database { name: "/Root" } } reason: "RED-e5e3-1231c6b1-PersQueue" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-6" reason: "YELLOW-e9e2-1231c6b1-7" reason: "YELLOW-e9e2-1231c6b1-8" reason: "YELLOW-e9e2-1231c6b1-9" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-7" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 7 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-8" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 8 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-9" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 9 host: "::1" port: 12004 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-e5e3-1231c6b1-PersQueue" status: RED message: "Tablets are dead" location { compute { tablet { type: "PersQueue" id: "72075186224037888" count: 1 } } database { name: "/Root" } node { } } type: "TABLET" level: 4 } location { id: 6 host: "::1" port: 12001 } 2025-06-30T09:21:03.940591Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:420:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:03.940657Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:03.940674Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011ee/r3tmp/tmp9H1XPU/pdisk_1.dat 2025-06-30T09:21:04.018787Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2683, node 10 TClient is connected to server localhost:8415 2025-06-30T09:21:04.127483Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:04.127507Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:04.127512Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:04.127633Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:04.822303Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:422:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:04.822348Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:04.822392Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011ee/r3tmp/tmpjtzQ27/pdisk_1.dat 2025-06-30T09:21:04.905352Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8162, node 12 TClient is connected to server localhost:12641 2025-06-30T09:21:05.012781Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:05.012806Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:05.012812Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:05.012900Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> DataStreams::TestShardPagination [GOOD] >> TRestoreTests::ShouldRestoreSequence [GOOD] >> TRestoreTests::ShouldRestoreSequenceWithOverflow >> TRestoreTests::ExportImportWithSchemeChecksumCorruption [GOOD] >> TRestoreTests::ExportImportWithSchemeChecksumAbsence |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> TxUsage::Sinks_Oltp_WriteToTopic_4_Query [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices_Sticky |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_restore/unittest >> TRestoreTests::CancelHungOperationShouldSucceed[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:21:04.615395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:04.615417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.615422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:04.615426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:04.615430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:04.615433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:04.615440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.615451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:04.615568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:04.615627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:04.631246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:21:04.631272Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:04.635184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:04.635251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:04.635302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:04.637218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:04.637298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:04.637411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.637489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:04.638318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.638369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:04.638659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.638671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.638698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:04.638707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:04.638714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:04.638791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.640316Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:04.655407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:04.655459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.655503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:04.655508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:04.655546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:04.655555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:04.656526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.656562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:04.656605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.656611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:04.656615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:04.656618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:04.656958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.656969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:04.656978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:04.657284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.657294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.657300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.657307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:04.657989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:04.658387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:04.658423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:04.658585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.658607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:04.658615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.658667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:04.658674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.658702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:04.658713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:04.659104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.659113Z node 1 :FLAT_TX_SCHEMESHARD ... 06-30T09:21:05.477626Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 129 -> 133 2025-06-30T09:21:05.477733Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:05.477740Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-30T09:21:05.477937Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:21:05.477951Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:21:05.477956Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:21:05.477962Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-30T09:21:05.477968Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:21:05.477985Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-30T09:21:05.478472Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:21:05.478483Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_backup_restore_common.h:324: TRestore TAborting, opId: 102:0 ProgressState at tablet72057594046678944 2025-06-30T09:21:05.478490Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_backup_restore_common.h:351: TRestore Abort, on datashard: 72075186233409546, opId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:21:05.478547Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:21:05.478863Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 102:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 269551625 TEvCancelTxResult for TargetTxId: 102, wait until TargetTxId: 102 TestWaitNotification wait txId: 102 2025-06-30T09:21:05.480402Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:21:05.480414Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-30T09:21:05.480430Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-30T09:21:05.480434Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-30T09:21:05.480516Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:21:05.480523Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-30T09:21:05.480529Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:21:05.480554Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-30T09:21:05.480568Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:21:05.480573Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [3:431:2400] TestWaitNotification: OK eventTxId 103 2025-06-30T09:21:05.491514Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6376: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: ERROR Error { Kind: WRONG_SHARD_STATE Reason: "Interrupted Restore operation [5000003:102] while waiting to finish at 72075186233409546" } TxId: 102 ExecLatency: 5 ProposeLatency: 6 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 6303 } } 2025-06-30T09:21:05.491537Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:21:05.491567Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: ERROR Error { Kind: WRONG_SHARD_STATE Reason: "Interrupted Restore operation [5000003:102] while waiting to finish at 72075186233409546" } TxId: 102 ExecLatency: 5 ProposeLatency: 6 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 6303 } } 2025-06-30T09:21:05.491586Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: ERROR Error { Kind: WRONG_SHARD_STATE Reason: "Interrupted Restore operation [5000003:102] while waiting to finish at 72075186233409546" } TxId: 102 ExecLatency: 5 ProposeLatency: 6 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 6303 } } FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:21:05.491802Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 12884904184 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: false Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-30T09:21:05.491809Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:21:05.491827Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 12884904184 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: false Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-30T09:21:05.491842Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TRestore TAborting, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 311 RawX2: 12884904184 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: false Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-30T09:21:05.491856Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: Aborting, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:05.491861Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:21:05.491867Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:21:05.491874Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 133 -> 240 2025-06-30T09:21:05.491913Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TRestore, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:05.492656Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:21:05.492756Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:21:05.492781Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:21:05.492790Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:21:05.492806Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:21:05.492814Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:21:05.492821Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:21:05.492825Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:21:05.492831Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:21:05.492846Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:340:2317] message: TxId: 102 2025-06-30T09:21:05.492854Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:21:05.492860Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:21:05.492865Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:21:05.492897Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:21:05.493368Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:21:05.493380Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [3:431:2400] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_restore/unittest >> TImportTests::ShouldRestoreKeyBloomFilter [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:21:04.408543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:04.408565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.408570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:04.408573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:04.408578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:04.408581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:04.408587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.408597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:04.408679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:04.408739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:04.419131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:21:04.419151Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:04.421575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:04.421616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:04.421662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:04.423053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:04.423104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:04.423212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.423261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:04.423760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.423793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:04.424036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.424044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.424063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:04.424069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:04.424073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:04.424087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.425187Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:04.439758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:04.439831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.439898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:04.439905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:04.439948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:04.439959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:04.440543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.440581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:04.440638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.440646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:04.440650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:04.440654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:04.440954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.440962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:04.440968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:04.441241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.441248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.441253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.441258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:04.441731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:04.442123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:04.442153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:04.442288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.442308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:04.442312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.442354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:04.442359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.442384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:04.442393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:04.442697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.442703Z node 1 :FLAT_TX_SCHEMESHARD ... 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TRestore, opId# 281474976720758:0, reason# domain is not a serverless db, domain# /MyRoot/User, domainPathId# [OwnerId: 72075186233409546, LocalPathId: 1], IsDomainSchemeShard: 0, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 2], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:21:05.497031Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976720758:0, at schemeshard: 72075186233409546 2025-06-30T09:21:05.497103Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976720758:0, at schemeshard: 72075186233409546 2025-06-30T09:21:05.497112Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72075186233409546] TDone opId# 281474976720758:0 ProgressState 2025-06-30T09:21:05.497126Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976720758:0 progress is 1/1 2025-06-30T09:21:05.497131Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976720758 ready parts: 1/1 2025-06-30T09:21:05.497137Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976720758:0 progress is 1/1 2025-06-30T09:21:05.497141Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976720758 ready parts: 1/1 2025-06-30T09:21:05.497146Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976720758, ready parts: 1/1, is published: true 2025-06-30T09:21:05.497160Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:364:2340] message: TxId: 281474976720758 2025-06-30T09:21:05.497165Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976720758 ready parts: 1/1 2025-06-30T09:21:05.497170Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976720758:0 2025-06-30T09:21:05.497173Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976720758:0 2025-06-30T09:21:05.497192Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-30T09:21:05.497556Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976720758 2025-06-30T09:21:05.497569Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976720758 2025-06-30T09:21:05.497578Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:21:05.497583Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:1476: TImport::TTxProgress: OnNotifyResult: txId# 281474976720758 2025-06-30T09:21:05.497957Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:21:05.497978Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:21:05.497985Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [3:509:2455] TestWaitNotification: OK eventTxId 103 2025-06-30T09:21:05.498547Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/User" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:05.498585Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/User" took 45us result status StatusSuccess 2025-06-30T09:21:05.498655Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/User" PathDescription { Self { Name: "User" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_User_kind_hdd-1" Kind: "common" } StoragePools { Name: "name_User_kind_hdd-2" Kind: "external" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:05.498698Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/User/Table" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-30T09:21:05.498731Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/User/Table" took 33us result status StatusSuccess 2025-06-30T09:21:05.498882Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/User/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72075186233409546 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976720757 CreateStep: 150 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "created_at" Type: "Timestamp" TypeId: 50 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Uint32" TypeId: 2 Id: 4 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } EnableFilterByKey: true } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestShardPagination [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunningExtSubdomain [GOOD] Test command err: 2025-06-30T09:21:02.890889Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669977357200570:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:02.890912Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ae1/r3tmp/tmpOgddjj/pdisk_1.dat 2025-06-30T09:21:02.971395Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:02.977958Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 16552, node 1 2025-06-30T09:21:02.983439Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:02.983453Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:02.983455Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:02.983488Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:02.991506Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:02.991538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:02.993259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9487 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:03.015264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:03.035111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:9487 2025-06-30T09:21:03.052591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting...
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 168, storage 40960, code: 500080 2025-06-30T09:21:03.151170Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669981652169921:3431] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/stream_TestStreamStorageRetention\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypePersQueueGroup, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp:345" severity: 1 } 2025-06-30T09:21:03.766668Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669983428641492:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:03.766698Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ae1/r3tmp/tmpHxkEHs/pdisk_1.dat 2025-06-30T09:21:03.783013Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8651, node 4 2025-06-30T09:21:03.796855Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:03.796870Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:03.796872Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:03.796920Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15651 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:03.866945Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:03.866980Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:03.868452Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:03.870563Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:03.885735Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:15651 2025-06-30T09:21:03.895936Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:04.995932Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521669986957848124:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:04.995964Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ae1/r3tmp/tmpnau18N/pdisk_1.dat 2025-06-30T09:21:05.016801Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64994, node 7 2025-06-30T09:21:05.030452Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:05.030469Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:05.030482Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:05.030524Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23246 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:05.096230Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:05.096259Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:05.097938Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:05.102254Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:05.115872Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:23246 2025-06-30T09:21:05.128186Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... >> TImportTests::ShouldSucceedOnIndexedTable3 [GOOD] >> TConsoleTests::TestAlterUnknownTenant >> Worker::Basic [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_5_Table >> DataStreams::TestListStreamConsumers [GOOD] >> DataStreams::TestListShards1Shard >> TRestoreTests::ExportImportWithSchemeChecksumAbsence [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_restore/unittest >> TImportTests::ShouldSucceedOnIndexedTable3 [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:21:04.587843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:04.587863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.587867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:04.587870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:04.587874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:04.587877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:04.587884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.587901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:04.587970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:04.588017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:04.598706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:21:04.598730Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:04.601432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:04.601472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:04.601497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:04.603174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:04.603237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:04.603330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.603374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:04.603924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.603957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:04.604188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.604196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.604215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:04.604221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:04.604225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:04.604238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.605308Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:04.618522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:04.618587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.618640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:04.618646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:04.618686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:04.618697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:04.619267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.619302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:04.619338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.619346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:04.619349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:04.619353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:04.619700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.619708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:04.619711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:04.620011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.620019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.620024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.620029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:04.620483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:04.620837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:04.620868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:04.621001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.621021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:04.621027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.621069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:04.621074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.621098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:04.621107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:04.621447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.621454Z node 1 :FLAT_TX_SCHEMESHARD ... 94046678944] TDropLock TPropose opId# 281474976710763:0 ProgressState 2025-06-30T09:21:06.027778Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 281474976710763 ready parts: 1/1 2025-06-30T09:21:06.027803Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 281474976710763 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:06.028484Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710759 Unlocking 2025-06-30T09:21:06.028512Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710759 Unlocking TBuildInfo{ IndexBuildId: 281474976710759, Uid: 101-0-0, DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: by_value_1, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [3:129:2153], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710760, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710761, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000005, ApplyTxId: 281474976710762, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710763, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 1 UploadBytes: 18 ReadRows: 1 ReadBytes: 18, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:21:06.028537Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710763:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710763 msg type: 269090816 2025-06-30T09:21:06.028563Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976710763, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:21:06.028596Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-30T09:21:06.028602Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-30T09:21:06.028609Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710763, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710763 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710763 at step: 5000007 2025-06-30T09:21:06.028666Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:06.028683Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710763 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 12884904046 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:06.028692Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710763:0 HandleReply TEvOperationPlan: step# 5000007 2025-06-30T09:21:06.028698Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710763:0 128 -> 240 2025-06-30T09:21:06.029139Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710763:0, at schemeshard: 72057594046678944 2025-06-30T09:21:06.029152Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710763:0 ProgressState 2025-06-30T09:21:06.029165Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-30T09:21:06.029170Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-30T09:21:06.029176Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-30T09:21:06.029182Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-30T09:21:06.029187Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 1/1, is published: true 2025-06-30T09:21:06.029199Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:129:2153] message: TxId: 281474976710763 2025-06-30T09:21:06.029206Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-30T09:21:06.029212Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710763:0 2025-06-30T09:21:06.029216Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710763:0 2025-06-30T09:21:06.029229Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 FAKE_COORDINATOR: Erasing txId 281474976710763 2025-06-30T09:21:06.029635Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710763 2025-06-30T09:21:06.029651Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710763 2025-06-30T09:21:06.029663Z node 3 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2019: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 281474976710759, txId# 281474976710763 2025-06-30T09:21:06.029683Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2022: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 281474976710759, Uid: 101-0-0, DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: by_value_1, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [3:129:2153], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710760, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710761, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000005, ApplyTxId: 281474976710762, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710763, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 1 UploadBytes: 18 ReadRows: 1 ReadBytes: 18, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0}, txId# 281474976710763 2025-06-30T09:21:06.030063Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710759 Unlocking 2025-06-30T09:21:06.030088Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710759 Unlocking TBuildInfo{ IndexBuildId: 281474976710759, Uid: 101-0-0, DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: by_value_1, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [3:129:2153], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710760, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710761, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000005, ApplyTxId: 281474976710762, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710763, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 1 UploadBytes: 18 ReadRows: 1 ReadBytes: 18, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:21:06.030099Z node 3 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-30T09:21:06.030516Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710759 Done 2025-06-30T09:21:06.030542Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710759 Done TBuildInfo{ IndexBuildId: 281474976710759, Uid: 101-0-0, DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: by_value_1, IndexColumn: value, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [3:129:2153], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710760, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710761, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000005, ApplyTxId: 281474976710762, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710763, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 1 UploadBytes: 18 ReadRows: 1 ReadBytes: 18, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:21:06.030548Z node 3 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:336: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 281474976710759, subscribers count# 1 2025-06-30T09:21:06.030569Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-30T09:21:06.030576Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710759 2025-06-30T09:21:06.030586Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:21:06.030594Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:1476: TImport::TTxProgress: OnNotifyResult: txId# 281474976710759 2025-06-30T09:21:06.031053Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:21:06.031076Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-30T09:21:06.031082Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [3:291:2278] TestWaitNotification: OK eventTxId 101 >> TRestoreTests::ShouldRestoreSequenceWithOverflow [GOOD] >> DataStreams::ListStreamsValidation [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_worker/unittest >> Worker::Basic [GOOD] Test command err: 2025-06-30T09:21:04.102687Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669988583988054:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:04.102726Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046e8/r3tmp/tmpxkD5ZI/pdisk_1.dat 2025-06-30T09:21:04.169410Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:21776 TServer::EnableGrpc on GrpcPort 23064, node 1 2025-06-30T09:21:04.191999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:04.192011Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:04.192012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:04.192048Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:04.204402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:04.204434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:04.205636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21776 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:04.248080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:04.327995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275264424 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-30T09:21:04.389057Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7521669988583988801:2414] Handshake: worker# [1:7521669988583988800:2414] 2025-06-30T09:21:04.389096Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handshake: worker# [1:7521669988583988800:2414] 2025-06-30T09:21:04.389162Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:21:04.389210Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 3] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-30T09:21:04.389228Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Send handshake: worker# [1:7521669988583988800:2414] 2025-06-30T09:21:04.389244Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:150: [Worker][1:7521669988583988800:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvHandshake 2025-06-30T09:21:04.389251Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:162: [Worker][1:7521669988583988800:2414] Handshake with writer: sender# [1:7521669988583988802:2414] 2025-06-30T09:21:04.389670Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7521669988583988801:2414] Create read session: session# [1:7521669988583988805:2287] 2025-06-30T09:21:04.389692Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:150: [Worker][1:7521669988583988800:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvHandshake 2025-06-30T09:21:04.389695Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:154: [Worker][1:7521669988583988800:2414] Handshake with reader: sender# [1:7521669988583988801:2414] 2025-06-30T09:21:04.389702Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7521669988583988801:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-30T09:21:04.392108Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7521669988583988801:2414] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_1_5075828741711189215_v1 } } 2025-06-30T09:21:05.104771Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:05.106863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:05.115032Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669992878956284:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:05.115035Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669992878956273:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:05.115054Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:05.115676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:05.117278Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669992878956287:2333], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:21:05.202786Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669992878956327:2514] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:05.292153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:05.345646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB ... schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:05.467728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:05.531073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:05.629361Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7521669988583988801:2414] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 36b Offset: 0 SeqNo: 1 CreateTime: 2025-06-30T09:21:05.627000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-30T09:21:05.629384Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:216: [Worker][1:7521669988583988800:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 0 SeqNo: 1 CreateTime: 2025-06-30T09:21:05.627000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-30T09:21:05.629391Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 0 SeqNo: 1 CreateTime: 2025-06-30T09:21:05.627000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-30T09:21:05.629411Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 0 BodySize: 36 }] } 2025-06-30T09:21:05.629461Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7521669992878956891:2414] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-30T09:21:05.629471Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-30T09:21:05.629486Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7521669992878956891:2414] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-30T09:21:05.630695Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7521669992878956891:2414] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:05.630714Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-30T09:21:05.630721Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [0] } 2025-06-30T09:21:05.630749Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:176: [Worker][1:7521669988583988800:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-30T09:21:05.630757Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7521669988583988801:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-30T09:21:05.744829Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7521669988583988801:2414] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 2 CreateTime: 2025-06-30T09:21:05.742000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-30T09:21:05.744851Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:216: [Worker][1:7521669988583988800:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 2 CreateTime: 2025-06-30T09:21:05.742000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-30T09:21:05.744860Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 2 CreateTime: 2025-06-30T09:21:05.742000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-30T09:21:05.744889Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 36 }] } 2025-06-30T09:21:05.744920Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7521669992878956891:2414] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-30T09:21:05.745959Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7521669992878956891:2414] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:05.745981Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-30T09:21:05.745989Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } 2025-06-30T09:21:05.746001Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:176: [Worker][1:7521669988583988800:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-30T09:21:05.746009Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7521669988583988801:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-30T09:21:05.858657Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7521669988583988801:2414] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 36b Offset: 2 SeqNo: 3 CreateTime: 2025-06-30T09:21:05.856000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-30T09:21:05.858680Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:216: [Worker][1:7521669988583988800:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 2 SeqNo: 3 CreateTime: 2025-06-30T09:21:05.856000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-30T09:21:05.858688Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 2 SeqNo: 3 CreateTime: 2025-06-30T09:21:05.856000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-30T09:21:05.858715Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 2 BodySize: 36 }] } 2025-06-30T09:21:05.858757Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7521669992878956891:2414] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-30T09:21:05.859612Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7521669992878956891:2414] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:05.859632Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-30T09:21:05.859639Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7521669988583988802:2414] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [2] } 2025-06-30T09:21:05.859650Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:176: [Worker][1:7521669988583988800:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-30T09:21:05.859657Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7521669988583988801:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-30T09:21:05.965264Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:119: [RemoteTopicReader][/Root/topic][0][1:7521669988583988801:2414] Handle NKikimr::NReplication::TEvYdbProxy::TEvTopicReaderGone { Result: { status: UNAVAILABLE, issues: {
: Error: PartitionSessionClosed { Partition session id: 1 Topic: "topic" Partition: 0 Reason: ConnectionLost } } } } 2025-06-30T09:21:05.965283Z node 1 :REPLICATION_SERVICE INFO: topic_reader.cpp:131: [RemoteTopicReader][/Root/topic][0][1:7521669988583988801:2414] Leave 2025-06-30T09:21:05.965305Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:235: [Worker][1:7521669988583988800:2414] Reader has gone: sender# [1:7521669988583988801:2414] 2025-06-30T09:21:05.965322Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7521669992878957062:2414] Handshake: worker# [1:7521669988583988800:2414] 2025-06-30T09:21:05.965671Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7521669992878957062:2414] Create read session: session# [1:7521669992878957063:2287] 2025-06-30T09:21:05.965689Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:150: [Worker][1:7521669988583988800:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvHandshake 2025-06-30T09:21:05.965692Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:154: [Worker][1:7521669988583988800:2414] Handshake with reader: sender# [1:7521669992878957062:2414] 2025-06-30T09:21:05.965701Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7521669992878957062:2414] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_restore/unittest >> TRestoreTests::ExportImportWithSchemeChecksumAbsence [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:21:04.590378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:04.590411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.590419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:04.590425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:04.590433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:04.590439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:04.590452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.590476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:04.590602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:04.590681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:04.608797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:21:04.608827Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:04.612834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:04.612898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:04.612941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:04.615015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:04.615098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:04.615240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.615317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:04.616095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.616132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:04.616417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.616430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.616457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:04.616465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:04.616470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:04.616489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.617673Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:04.643593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:04.643673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.643748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:04.643758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:04.643825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:04.643843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:04.644728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.644787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:04.644853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.644866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:04.644872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:04.644879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:04.645499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.645517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:04.645528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:04.645968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.645979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.645984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.645990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:04.646541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:04.646927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:04.646959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:04.647197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.647222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:04.647228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.647281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:04.647287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.647311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:04.647320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:04.647854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.647864Z node 1 :FLAT_TX_SCHEMESHARD ... d: 72057594046678944 2025-06-30T09:21:06.230002Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:211:2211], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 5 REQUEST: HEAD /data_00.csv HTTP/1.1 HEADERS: Host: localhost:4605 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 1C38ED45-65CB-49AA-8F5F-53CB0B6AD355 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /data_00.csv / 11 2025-06-30T09:21:06.230116Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-30T09:21:06.230126Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TRestore TProposedWaitParts, opId: 281474976710761:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:06.230256Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:21:06.230269Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:21:06.230274Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-30T09:21:06.230280Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 3 2025-06-30T09:21:06.230287Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:21:06.230305Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-30T09:21:06.230339Z node 3 :DATASHARD_RESTORE DEBUG: import_s3.cpp:526: [Import] [s3:281474976710761] Handle NKikimr::NWrappers::NExternalStorage::TEvHeadObjectResponse { Key: null Result: HeadObjectResult { ETag: 6e3e0a41fdab8add833862f1bd2954c3 ContentLength: 11 } } 2025-06-30T09:21:06.231193Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-30T09:21:06.241972Z node 3 :DATASHARD_RESTORE DEBUG: import_s3.cpp:605: [Import] [s3:281474976710761] Handle NKikimr::TEvDataShard::TEvS3DownloadInfo { Info: { DataETag: (empty maybe) ProcessedBytes: 0 WrittenBytes: 0 WrittenRows: 0 ChecksumState: DownloadState: } } FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-30T09:21:06.252760Z node 3 :DATASHARD_RESTORE DEBUG: import_s3.cpp:605: [Import] [s3:281474976710761] Handle NKikimr::TEvDataShard::TEvS3DownloadInfo { Info: { DataETag: 6e3e0a41fdab8add833862f1bd2954c3 ProcessedBytes: 0 WrittenBytes: 0 WrittenRows: 0 ChecksumState: DownloadState: } } 2025-06-30T09:21:06.252779Z node 3 :DATASHARD_RESTORE NOTICE: import_s3.cpp:620: [Import] [s3:281474976710761] Process download info at 'DownloadInfo': info# { DataETag: 6e3e0a41fdab8add833862f1bd2954c3 ProcessedBytes: 0 WrittenBytes: 0 WrittenRows: 0 ChecksumState: DownloadState: } 2025-06-30T09:21:06.252799Z node 3 :DATASHARD_RESTORE DEBUG: import_s3.cpp:516: [Import] [s3:281474976710761] GetObject: key# /data_00.csv, range# 0-10 REQUEST: GET /data_00.csv HTTP/1.1 HEADERS: Host: localhost:4605 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 099B51EF-9738-4A20-B5FA-6C8A5B2B718A amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-10 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /data_00.csv / 11 2025-06-30T09:21:06.253824Z node 3 :DATASHARD_RESTORE DEBUG: import_s3.cpp:655: [Import] [s3:281474976710761] Handle NKikimr::NWrappers::NExternalStorage::TEvGetObjectResponse { Key: null Result: 6e3e0a41fdab8add833862f1bd2954c3 Body: 11b } 2025-06-30T09:21:06.253837Z node 3 :DATASHARD_RESTORE TRACE: import_s3.cpp:672: [Import] [s3:281474976710761] Content size: processed-bytes# 0, content-length# 11, body-size# 11 2025-06-30T09:21:06.253868Z node 3 :DATASHARD_RESTORE INFO: import_s3.cpp:805: [Import] [s3:281474976710761] Upload rows: count# 1, size# 36 2025-06-30T09:21:06.254497Z node 3 :DATASHARD_RESTORE DEBUG: import_s3.cpp:813: [Import] [s3:281474976710761] Handle NKikimr::TEvDataShard::TEvS3UploadRowsResponse { Record: TabletID: 72075186233409548 Status: 0 Info: { DataETag: 6e3e0a41fdab8add833862f1bd2954c3 ProcessedBytes: 11 WrittenBytes: 10 WrittenRows: 1 ChecksumState: DownloadState: } } 2025-06-30T09:21:06.254507Z node 3 :DATASHARD_RESTORE NOTICE: import_s3.cpp:620: [Import] [s3:281474976710761] Process download info at 'UploadResponse': info# { DataETag: 6e3e0a41fdab8add833862f1bd2954c3 ProcessedBytes: 11 WrittenBytes: 10 WrittenRows: 1 ChecksumState: DownloadState: } 2025-06-30T09:21:06.254512Z node 3 :DATASHARD_RESTORE NOTICE: import_s3.cpp:961: [Import] [s3:281474976710761] Finish: success# 1, error# , writtenBytes# 10, writtenRows# 1 2025-06-30T09:21:06.266615Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 667 RawX2: 12884904494 } Origin: 72075186233409548 State: 2 TxId: 281474976710761 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:21:06.266636Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 281474976710761, tablet: 72075186233409548, partId: 0 2025-06-30T09:21:06.266657Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 281474976710761:0, at schemeshard: 72057594046678944, message: Source { RawX1: 667 RawX2: 12884904494 } Origin: 72075186233409548 State: 2 TxId: 281474976710761 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:21:06.266673Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TRestore TProposedWaitParts, opId: 281474976710761:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 667 RawX2: 12884904494 } Origin: 72075186233409548 State: 2 TxId: 281474976710761 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:21:06.266690Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710761:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:06.266695Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-30T09:21:06.266704Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710761:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-30T09:21:06.266712Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710761:0 129 -> 240 2025-06-30T09:21:06.266775Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TRestore, opId# 281474976710761:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:06.267255Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-30T09:21:06.267285Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-30T09:21:06.267292Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710761:0 ProgressState 2025-06-30T09:21:06.267302Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-30T09:21:06.267306Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:21:06.267309Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-30T09:21:06.267312Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:21:06.267315Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: true 2025-06-30T09:21:06.267325Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:129:2153] message: TxId: 281474976710761 2025-06-30T09:21:06.267330Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:21:06.267334Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-30T09:21:06.267338Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710761:0 2025-06-30T09:21:06.267362Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-30T09:21:06.267968Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-30T09:21:06.268016Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710761 2025-06-30T09:21:06.268033Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:21:06.268043Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:1476: TImport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-30T09:21:06.268563Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:21:06.268586Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-30T09:21:06.268597Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [3:631:2575] TestWaitNotification: OK eventTxId 104 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> BuildStatsHistogram::Many_Serial [GOOD] Test command err: adding part [0:0:1:0:0:0:0] data size (1.93MiB in total) adding group {0,0} PageId: 934 RowCount: 24000 DataSize: 1692763 GroupDataSize: 413676 ErasedRowCount: 0 LevelCount: 3 IndexSize: 49449 added slice [0, 24000) data size (1.61MiB - 0B) => 1.61MiB added small blobs data size => 1.73MiB added large blobs data size => 2.01MiB building histogram with row resolution 2400, data size resolution 206KiB slicing part [0:0:1:0:0:0:0]: { {rows: [0, 23999] keys: [{7, 10}, {80038, 26687}]} } slicing node Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 => take adding node future events -1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 2400 nextHistogramDataSize: 210643 closedRowCount: 0 closedDataSize: 0 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 2 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 1 closedRowCount: 0 openedRowCount: 24000 nextHistogramRowCount: 2400 adding part [0:0:1:0:0:0:0] data size (1.93MiB in total) adding group {0,0} PageId: 934 RowCount: 24000 DataSize: 1692763 GroupDataSize: 413676 ErasedRowCount: 0 LevelCount: 3 IndexSize: 49449 added slice [0, 24000) data size (1.61MiB - 0B) => 1.61MiB added small blobs data size => 1.73MiB added large blobs data size => 2.01MiB building histogram with row resolution 2400, data size resolution 206KiB slicing part [0:0:1:0:0:0:0]: { {rows: [0, 23999] keys: [{7, 10}, {80038, 26687}]} } slicing node Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 => take adding node future events -1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 2400 nextHistogramDataSize: 210643 closedRowCount: 0 closedDataSize: 0 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 2 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 1 closedRowCount: 0 openedRowCount: 24000 nextHistogramRowCount: 2400 adding event 0 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 1 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 441 Level: 2 BeginRowId: 5712 EndRowId: 8565 BeginDataSize: 503626 EndDataSize: 754239 BeginKey: {19015, 6346} EndKey: {28576, 9533} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 441 Level: 2 BeginRowId: 5712 EndRowId: 8565 BeginDataSize: 503626 EndDataSize: 754239 BeginKey: {19015, 6346} EndKey: {28576, 9533} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 552 Level: 2 BeginRowId: 8565 EndRowId: 11434 BeginDataSize: 754239 EndDataSize: 1008444 BeginKey: {28576, 9533} EndKey: {38122, 12715} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 552 Level: 2 BeginRowId: 8565 EndRowId: 11434 BeginDataSize: 754239 EndDataSize: 1008444 BeginKey: {28576, 9533} EndKey: {38122, 12715} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 663 Level: 2 BeginRowId: 11434 EndRowId: 14280 BeginDataSize: 1008444 EndDataSize: 1257358 BeginKey: {38122, 12715} EndKey: {47692, 15905} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 663 Level: 2 BeginRowId: 11434 EndRowId: 14280 BeginDataSize: 1008444 EndDataSize: 1257358 BeginKey: {38122, 12715} EndKey: {47692, 15905} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 774 Level: 2 BeginRowId: 14280 EndRowId: 17140 BeginDataSize: 1257358 EndDataSize: 1508340 BeginKey: {47692, 15905} EndKey: {57265, 19096} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 774 Level: 2 BeginRowId: 14280 EndRowId: 17140 BeginDataSize: 1257358 EndDataSize: 1508340 BeginKey: {47692, 15905} EndKey: {57265, 19096} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 885 Level: 2 BeginRowId: 17140 EndRowId: 19992 BeginDataSize: 1508340 EndDataSize: 1755252 BeginKey: {57265, 19096} EndKey: {66697, 22240} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 885 Level: 2 BeginRowId: 17140 EndRowId: 19992 BeginDataSize: 1508340 EndDataSize: 1755252 BeginKey: {57265, 19096} EndKey: {66697, 22240} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 933 Level: 2 BeginRowId: 19992 EndRowId: 24000 BeginDataSize: 1755252 EndDataSize: 2106439 BeginKey: {66697, 22240} EndKey: {80038, 26687} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 933 Level: 2 BeginRowId: 19992 EndRowId: 24000 BeginDataSize: 1755252 EndDataSize: 2106439 BeginKey: {66697, 22240} EndKey: {80038, 26687} State: 0 checking stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 2400 nextHistogramDataSize: 210643 closedRowCount: 0 closedDataSize: 0 openedRowCount: 2855 openedDataSize: 252082 openedSortedByRowCount: 1 openedSortedByDataSize: 2 FutureEvents: 16 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 3 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 2400 nextHistogramDataSize: 210643 closedRowCount: 0 closedDataSize: 0 openedRowCount: 2855 openedDataSize: 252082 openedSortedByRowCount: 1 openedSortedByDataSize: 2 FutureEvents: 16 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 1 processing event IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 1 checking stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 2400 nextHistogramDataSize: 210643 closedRowCount: 2855 closedDataSize: 252082 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 1 openedSortedByDataSize: 2 FutureEvents: 15 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 2 iterating stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 4800 nextHistogramDataSize: 421286 closedRowCount: 2855 closedDataSize: 252082 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 1 openedSortedByDataSize: 2 FutureEvents: 15 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 0 checking stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 4800 nextHistogramDataSize: 421286 closedRowCount: 2855 closedDataSize: 252082 openedRowCount: 2857 openedDataSize: 251544 openedSortedByRowCount: 2 openedSortedByDataSize: 3 FutureEvents: 14 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 1 iterating stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 4800 nextHistogramDataSize: 421286 closedRowCount: 2855 closedDataSize: 252082 openedRowCount: 2857 openedDataSize: 251544 openedSortedByRowCount: 2 openedSortedByDataSize: 3 FutureEvents: 14 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 1 processing event IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 1 checking stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 4800 nextHistogramDataSize: 421286 closedRowCount: 5712 closedDataSize: 503626 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 2 openedSortedByDataSize: 3 FutureEvents: 13 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 2 iterating stats.RowCountHistogram: 2 stats.DataSizeHistogram: 2 nextHistogramRowCount: 7200 nextHistogramDataSize: 631929 closedRowCount: 5712 closedDataSize: 503626 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 2 openedSortedByDataSize: 3 FutureEvents: 13 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 441 Level: 2 BeginRowId: 5712 EndRowId: 8565 BeginDataSize: 503626 EndDataSize: 754239 BeginKey: {19015, 6346} EndKey: {28576, 9533} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 441 Level: 2 BeginRowId: 5712 EndRowId: 8565 BeginDataSize: 503626 EndDataSize: 754239 BeginKey: {19015, 6346} EndKey: {28576, 9533} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 2 closedRowCount: 5712 openedRowCount: 2853 nextHistogramRowCount: 7200 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 2 closedRowCount: 5712 openedRowCount: 2853 nextHistogramRowCount: 7200 loading node by row count tri ... 65} State: 1 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84700 closedDataSize: 8822453 openedRowCount: 100 openedDataSize: 10550 openedSortedByRowCount: 848 openedSortedByDataSize: 848 FutureEvents: 305 currentKeyPointer: IsBegin: 0 Part: [0:0:848:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10550 BeginKey: {282451, 94158} EndKey: {282772, 94265} State: 1 processing event IsBegin: 0 Part: [0:0:848:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10550 BeginKey: {282451, 94158} EndKey: {282772, 94265} State: 1 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84800 closedDataSize: 8833003 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 848 openedSortedByDataSize: 848 FutureEvents: 304 currentKeyPointer: IsBegin: 0 Part: [0:0:848:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10550 BeginKey: {282451, 94158} EndKey: {282772, 94265} State: 2 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84800 closedDataSize: 8833003 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 848 openedSortedByDataSize: 848 FutureEvents: 304 currentKeyPointer: IsBegin: 1 Part: [0:0:849:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10111 BeginKey: {282775, 94266} EndKey: {283117, 94380} State: 0 processing event IsBegin: 1 Part: [0:0:849:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10111 BeginKey: {282775, 94266} EndKey: {283117, 94380} State: 0 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84800 closedDataSize: 8833003 openedRowCount: 100 openedDataSize: 10111 openedSortedByRowCount: 849 openedSortedByDataSize: 849 FutureEvents: 303 currentKeyPointer: IsBegin: 1 Part: [0:0:849:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10111 BeginKey: {282775, 94266} EndKey: {283117, 94380} State: 1 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84800 closedDataSize: 8833003 openedRowCount: 100 openedDataSize: 10111 openedSortedByRowCount: 849 openedSortedByDataSize: 849 FutureEvents: 303 currentKeyPointer: IsBegin: 0 Part: [0:0:849:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10111 BeginKey: {282775, 94266} EndKey: {283117, 94380} State: 1 processing event IsBegin: 0 Part: [0:0:849:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10111 BeginKey: {282775, 94266} EndKey: {283117, 94380} State: 1 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84900 closedDataSize: 8843114 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 849 openedSortedByDataSize: 849 FutureEvents: 302 currentKeyPointer: IsBegin: 0 Part: [0:0:849:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10111 BeginKey: {282775, 94266} EndKey: {283117, 94380} State: 2 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84900 closedDataSize: 8843114 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 849 openedSortedByDataSize: 849 FutureEvents: 302 currentKeyPointer: IsBegin: 1 Part: [0:0:850:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10583 BeginKey: {283123, 94382} EndKey: {283444, 94489} State: 0 processing event IsBegin: 1 Part: [0:0:850:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10583 BeginKey: {283123, 94382} EndKey: {283444, 94489} State: 0 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84900 closedDataSize: 8843114 openedRowCount: 100 openedDataSize: 10583 openedSortedByRowCount: 850 openedSortedByDataSize: 850 FutureEvents: 301 currentKeyPointer: IsBegin: 1 Part: [0:0:850:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10583 BeginKey: {283123, 94382} EndKey: {283444, 94489} State: 1 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84900 closedDataSize: 8843114 openedRowCount: 100 openedDataSize: 10583 openedSortedByRowCount: 850 openedSortedByDataSize: 850 FutureEvents: 301 currentKeyPointer: IsBegin: 0 Part: [0:0:850:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10583 BeginKey: {283123, 94382} EndKey: {283444, 94489} State: 1 processing event IsBegin: 0 Part: [0:0:850:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10583 BeginKey: {283123, 94382} EndKey: {283444, 94489} State: 1 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 85000 closedDataSize: 8853697 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 850 openedSortedByDataSize: 850 FutureEvents: 300 currentKeyPointer: IsBegin: 0 Part: [0:0:850:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10583 BeginKey: {283123, 94382} EndKey: {283444, 94489} State: 2 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 9 nextHistogramRowCount: 90000 nextHistogramDataSize: 18446744073709551615 closedRowCount: 85000 closedDataSize: 8853697 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 850 openedSortedByDataSize: 850 FutureEvents: 300 currentKeyPointer: IsBegin: 1 Part: [0:0:851:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10456 BeginKey: {283447, 94490} EndKey: {283771, 94598} State: 0 processing event IsBegin: 1 Part: [0:0:851:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10456 BeginKey: {283447, 94490} EndKey: {283771, 94598} State: 0 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 9 nextHistogramRowCount: 90000 nextHistogramDataSize: 18446744073709551615 closedRowCount: 85000 closedDataSize: 8853697 openedRowCount: 100 openedDataSize: 10456 openedSortedByRowCount: 851 openedSortedByDataSize: 851 FutureEvents: 299 currentKeyPointer: IsBegin: 1 Part: [0:0:851:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10456 BeginKey: {283447, 94490} EndKey: {283771, 94598} State: 1 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 9 nextHistogramRowCount: 90000 nextHistogramDataSize: 18446744073709551615 closedRowCount: 85000 closedDataSize: 8853697 openedRowCount: 100 openedDataSize: 10456 openedSortedByRowCount: 851 openedSortedByDataSize: 851 FutureEvents: 299 currentKeyPointer: IsBegin: 0 Part: [0:0:851:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10456 BeginKey: {283447, 94490} EndKey: {283771, 94598} State: 1 processing event IsBegin: 0 Part: [0:0:851:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10456 BeginKey: {283447, 94490} EndKey: {283771, 94598} State: 1 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 9 nextHistogramRowCount: 90000 nextHistogramDataSize: 18446744073709551615 closedRowCount: 85100 closedDataSize: 8864153 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 851 openedSortedByDataSize: 851 FutureEvents: 298 currentKeyPointer: IsBegin: 0 Part: [0:0:851:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10456 BeginKey: {283447, 94490} EndKey: {283771, 94598} State: 2 finished stats.RowCountHistogram: 9 stats.DataSizeHistogram: 9 nextHistogramRowCount: 18446744073709551615 nextHistogramDataSize: 18446744073709551615 closedRowCount: 85100 closedDataSize: 8864153 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 851 openedSortedByDataSize: 851 FutureEvents: 298 Touched 0% bytes, 0 pages RowCountHistogram: 5% (actual 6%) key = (16984, 5669) value = 5100 (actual 6998 - -1% error) 10% (actual 9%) key = (50416, 16813) value = 15100 (actual 16798 - -1% error) 10% (actual 9%) key = (83701, 27908) value = 25100 (actual 26598 - -1% error) 10% (actual 9%) key = (116986, 39003) value = 35100 (actual 36398 - -1% error) 10% (actual 9%) key = (150319, 50114) value = 45100 (actual 46198 - -1% error) 10% (actual 9%) key = (183700, 61241) value = 55100 (actual 55998 - 0% error) 10% (actual 9%) key = (217081, 72368) value = 65100 (actual 65798 - 0% error) 10% (actual 9%) key = (250486, 83503) value = 75100 (actual 75598 - 0% error) 10% (actual 9%) key = (283771, 94598) value = 85100 (actual 85398 - 0% error) 14% (actual 14%) DataSizeHistogram: 5% (actual 6%) key = (16648, 5557) value = 524891 (actual 723287 - -1% error) 10% (actual 9%) key = (50086, 16703) value = 1569936 (actual 1747238 - -1% error) 9% (actual 9%) key = (83356, 27793) value = 2610698 (actual 2767306 - -1% error) 10% (actual 9%) key = (116647, 38890) value = 3652143 (actual 3787394 - -1% error) 9% (actual 9%) key = (149656, 49893) value = 4685435 (actual 4800597 - -1% error) 10% (actual 9%) key = (183040, 61021) value = 5728420 (actual 5822785 - 0% error) 10% (actual 9%) key = (216727, 72250) value = 6776444 (actual 6848929 - 0% error) 9% (actual 9%) key = (250144, 83389) value = 7813547 (actual 7865227 - 0% error) 9% (actual 9%) key = (283444, 94489) value = 8853697 (actual 8884838 - 0% error) 14% (actual 14%) Checking Flat: Touched 100% bytes, 1000 pages RowCountHistogram: 10% (actual 11%) key = (33379, 11134) value = 10000 (actual 11800 - -1% error) 10% (actual 9%) key = (66721, 22248) value = 20000 (actual 21600 - -1% error) 10% (actual 9%) key = (100015, 33346) value = 30000 (actual 31400 - -1% error) 10% (actual 9%) key = (133258, 44427) value = 40000 (actual 41200 - -1% error) 10% (actual 9%) key = (166621, 55548) value = 50000 (actual 51000 - -1% error) 10% (actual 9%) key = (200041, 66688) value = 60000 (actual 60800 - 0% error) 10% (actual 9%) key = (233449, 77824) value = 70000 (actual 70600 - 0% error) 10% (actual 9%) key = (266824, 88949) value = 80000 (actual 80400 - 0% error) 10% (actual 9%) key = (300073, 100032) value = 90000 (actual 90200 - 0% error) 10% (actual 9%) DataSizeHistogram: 10% (actual 11%) key = (33187, NULL) value = 1041247 (actual 1229534 - -1% error) 10% (actual 9%) key = (66517, NULL) value = 2082456 (actual 2249844 - -1% error) 10% (actual 9%) key = (99709, NULL) value = 3123684 (actual 3270138 - -1% error) 10% (actual 9%) key = (132925, NULL) value = 4164886 (actual 4290603 - -1% error) 10% (actual 9%) key = (166246, NULL) value = 5206111 (actual 5311117 - -1% error) 10% (actual 9%) key = (199678, NULL) value = 6247321 (actual 6331068 - 0% error) 10% (actual 9%) key = (233290, NULL) value = 7288529 (actual 7350869 - 0% error) 10% (actual 9%) key = (266701, NULL) value = 8329759 (actual 8371441 - 0% error) 10% (actual 9%) key = (300052, NULL) value = 9371030 (actual 9392083 - 0% error) 9% (actual 9%) Checking Mixed: Touched 0% bytes, 0 pages RowCountHistogram: 100% (actual 100%) DataSizeHistogram: 100% (actual 100%) |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> TChargeBTreeIndex::OneNode_Groups_History [GOOD] >> TChargeBTreeIndex::FewNodes |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_restore/unittest >> TRestoreTests::ShouldRestoreSequenceWithOverflow [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:21:04.593790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:04.593826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.593832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:04.593835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:04.593840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:04.593842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:04.593850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.593861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:04.593961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:04.594012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:04.606041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:21:04.606063Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:04.609138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:04.609173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:04.609206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:04.610527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:04.610580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:04.610691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.610752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:04.611267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.611297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:04.611540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.611549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.611568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:04.611574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:04.611579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:04.611591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.612822Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:04.639665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:04.639760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.639831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:04.639840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:04.639910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:04.639926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:04.640757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.640804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:04.640865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.640876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:04.640882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:04.640889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:04.641374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.641386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:04.641393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:04.641759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.641769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.641776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.641785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:04.642673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:04.643196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:04.643238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:04.643474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.643507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:04.643515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.643590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:04.643600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.643635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:04.643651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:04.644151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.644162Z node 1 :FLAT_TX_SCHEMESHARD ... 1] Finish: success# 1, error# , writtenBytes# 0, writtenRows# 0 2025-06-30T09:21:06.368252Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 742 RawX2: 12884904561 } Origin: 72075186233409549 State: 2 TxId: 281474976710761 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-30T09:21:06.368281Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 281474976710761, tablet: 72075186233409549, partId: 0 2025-06-30T09:21:06.368315Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 281474976710761:0, at schemeshard: 72057594046678944, message: Source { RawX1: 742 RawX2: 12884904561 } Origin: 72075186233409549 State: 2 TxId: 281474976710761 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-30T09:21:06.368333Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TRestore TProposedWaitParts, opId: 281474976710761:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 742 RawX2: 12884904561 } Origin: 72075186233409549 State: 2 TxId: 281474976710761 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-30T09:21:06.368349Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710761:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:06.368358Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-30T09:21:06.368364Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710761:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-30T09:21:06.368373Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710761:0 129 -> 240 2025-06-30T09:21:06.368432Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TRestore, opId# 281474976710761:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:06.369026Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-30T09:21:06.369129Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-30T09:21:06.369142Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710761:0 ProgressState 2025-06-30T09:21:06.369158Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-30T09:21:06.369164Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:21:06.369170Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-30T09:21:06.369174Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:21:06.369180Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: true 2025-06-30T09:21:06.369197Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:129:2153] message: TxId: 281474976710761 2025-06-30T09:21:06.369206Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:21:06.369213Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-30T09:21:06.369218Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710761:0 2025-06-30T09:21:06.369247Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 4 2025-06-30T09:21:06.369701Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-30T09:21:06.369721Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710761 2025-06-30T09:21:06.369733Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:21:06.369740Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:1476: TImport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-30T09:21:06.370216Z node 3 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:21:06.370241Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:21:06.370250Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [3:704:2640] TestWaitNotification: OK eventTxId 103 2025-06-30T09:21:06.370476Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Restored" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:06.370560Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Restored" took 98us result status StatusSuccess 2025-06-30T09:21:06.370855Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Restored" PathDescription { Self { Name: "Restored" PathId: 7 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710760 CreateStep: 5000006 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Restored" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 DefaultFromSequence: "myseq" NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 0 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } } TableSchemaVersion: 1 IsBackup: false Sequences { Name: "myseq" PathId { OwnerId: 72057594046678944 LocalId: 8 } Version: 1 SequenceShard: 72075186233409546 MinValue: 1 MaxValue: 2 StartValue: 1 Cache: 1 Increment: 1 Cycle: false DataType: "Int64" } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 7 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::ListStreamsValidation [GOOD] Test command err: 2025-06-30T09:21:02.791402Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669980660954546:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:02.791514Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001bc3/r3tmp/tmpte7P9z/pdisk_1.dat 2025-06-30T09:21:02.844972Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:02.856120Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 24013, node 1 2025-06-30T09:21:02.861862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:02.861874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:02.861875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:02.861907Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2763 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:21:02.891999Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:02.892035Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:02.893514Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:02.895999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:02.913834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:2763 2025-06-30T09:21:02.929179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:03.222599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:03.298377Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037890:1][1:7521669984955923340:2327] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:6:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-30T09:21:03.326249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:03.362532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-30T09:21:03.369497Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037894 not found 2025-06-30T09:21:03.369503Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037900 not found 2025-06-30T09:21:03.369506Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037906 not found 2025-06-30T09:21:03.369508Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037905 not found 2025-06-30T09:21:03.369511Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037902 not found 2025-06-30T09:21:03.369514Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037899 not found 2025-06-30T09:21:03.369516Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037903 not found 2025-06-30T09:21:03.369518Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2025-06-30T09:21:03.369521Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2025-06-30T09:21:03.369523Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037908 not found 2025-06-30T09:21:03.369525Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037896 not found 2025-06-30T09:21:03.369527Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037895 not found 2025-06-30T09:21:03.369529Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037904 not found 2025-06-30T09:21:03.369530Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037901 not found 2025-06-30T09:21:03.369532Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found 2025-06-30T09:21:03.369534Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037907 not found 2025-06-30T09:21:04.063704Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669986703978013:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:04.063731Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001bc3/r3tmp/tmpDrujSP/pdisk_1.dat 2025-06-30T09:21:04.085376Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3725, node 4 2025-06-30T09:21:04.099926Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:04.099947Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:04.099948Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:04.099995Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5517 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:04.164345Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:04.164379Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:04.165944Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:04.170870Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:04.185061Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:5517 2025-06-30T09:21:04.196524Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:04.237284Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:04.248029Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:04.259036Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:04.917403Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521669987474315336:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:04.917429Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001bc3/r3tmp/tmpbQjBEw/pdisk_1.dat 2025-06-30T09:21:04.933712Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6555, node 7 2025-06-30T09:21:04.946659Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:04.946674Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:04.946676Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:04.946721Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18871 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:05.017923Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:05.017961Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:05.019336Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:05.022109Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:05.035810Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:18871 2025-06-30T09:21:05.047519Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:05.087058Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521669991769284661:3430] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/stream_TestCreateExistingStream\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypePersQueueGroup, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp:345" severity: 1 } 2025-06-30T09:21:05.781327Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7521669989881515720:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:05.781540Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001bc3/r3tmp/tmpPkSlRv/pdisk_1.dat 2025-06-30T09:21:05.797099Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23489, node 10 2025-06-30T09:21:05.812701Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:05.812737Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:05.812739Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:05.812795Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32192 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:05.882115Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:05.882143Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:05.883888Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:05.885503Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:05.899326Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:32192 2025-06-30T09:21:05.915770Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/sysview/unittest >> DataStreams::TestListShards1Shard [GOOD] >> KqpSystemView::NodesSimple |81.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/sysview/unittest >> TConsoleTests::TestMergeConfig [GOOD] >> TConsoleTests::TestRemoveTenant >> TConsoleTests::TestCreateTenantWrongPool [GOOD] >> TConsoleTests::TestCreateTenantWrongPoolExtSubdomain >> KqpScheme::AlterCompressionLevelInColumnFamily >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query [GOOD] >> TConsoleTests::TestSchemeShardErrorForwarding [GOOD] >> TConsoleTests::TestScaleRecommenderPolicies >> KqpScheme::CreateTableWithTtlSettingsUncompat ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestListShards1Shard [GOOD] Test command err: 2025-06-30T09:21:02.792044Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669978693946200:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:02.792064Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001b9b/r3tmp/tmp6RWJD5/pdisk_1.dat 2025-06-30T09:21:02.864667Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16917, node 1 2025-06-30T09:21:02.880764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:02.880774Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:02.880775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:02.880812Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:02.892377Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:02.892406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:02.893972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15331 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:02.935179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:02.968326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:15331 2025-06-30T09:21:02.982040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:03.062154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "16" shard_id: "shard-000000" } records { sequence_number: "17" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000000" } records { sequence_number: "20" shard_id: "shard-000000" } records { sequence_number: "21" shard_id: "shard-000000" } records { sequence_number: "22" shard_id: "shard-000000" } records { sequence_number: "23" shard_id: "shard-000000" } records { sequence_number: "24" shard_id: "shard-000000" } records { sequence_number: "25" shard_id: "shard-000000" } records { sequence_number: "26" shard_id: "shard-000000" } records { sequence_number: "27" shard_id: "shard-000000" } records { sequence_number: "28" shard_id: "shard-000000" } records { sequence_number: "29" shard_id: "shard-000000" } 2025-06-30T09:21:03.066836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:03.078947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-30T09:21:03.082758Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-30T09:21:03.082765Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-30T09:21:03.082768Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-30T09:21:03.087541Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-30T09:21:03.087568Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-30T09:21:03.087574Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-30T09:21:03.794352Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1751275263054-1","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1751275263,"finish":1751275263},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1751275263}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037888-1751275263054-2","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1751275263,"finish":1751275263},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1751275263}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037890-1751275263075-3","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1751275263,"finish":1751275263},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037890","source_wt":1751275263}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037890-1751275263075-4","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1751275263,"finish":1751275263},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037890","source_wt":1751275263}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1751275263075-5","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1751275263,"finish":1751275263},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1751275263}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037888-1751275263075-6","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1751275263,"finish":1751275263},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1751275263}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1751275263054-1","schema":"yds.resources.reserved.v1","tags":{"reserved_thro ... e of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.671258 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.672184 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.672198 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.672659 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.672675 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.673122 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.673134 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-30T09:21:05.674958Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:05.684112Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) E0000 00:00:1751275265.697935 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.697973 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-30T09:21:05.699261Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) E0000 00:00:1751275265.710749 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.710792 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-30T09:21:05.712096Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) E0000 00:00:1751275265.726156 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.726191 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.727470 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.727488 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-30T09:21:05.730678Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-30T09:21:05.735195Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037892 not found 2025-06-30T09:21:05.735208Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-06-30T09:21:05.735279Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found 2025-06-30T09:21:05.735417Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037893 not found 2025-06-30T09:21:05.735652Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2025-06-30T09:21:05.735661Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found E0000 00:00:1751275265.736946 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275265.736980 219361 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-30T09:21:06.355776Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7521669996511103745:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:06.355807Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001b9b/r3tmp/tmpyRZWPy/pdisk_1.dat 2025-06-30T09:21:06.374224Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22878, node 10 2025-06-30T09:21:06.390758Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:06.390775Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:06.390777Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:06.390826Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24579 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:06.456428Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:06.456461Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:06.458162Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:06.460708Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:06.473334Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:24579 2025-06-30T09:21:06.483474Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... E0000 00:00:1751275266.556767 220538 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275266.557962 220538 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275266.558602 220538 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275266.559127 220538 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1751275266.559668 220538 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn >> KqpScheme::AlterUser >> KqpScheme::CreateTableWithDefaultSettings >> KqpScheme::CreateDroppedTable >> TxUsage::Sinks_Oltp_WriteToTopics_2_Table [GOOD] >> KqpOlapTypes::Timestamp >> KqpScheme::CreateTableWithCompactionPolicyUncompat >> KqpScheme::CreateAndAlterTableWithPartitioningBySizeUncompat >> KqpOlapScheme::DropTable >> TxUsage::Sinks_Oltp_WriteToTopics_2_Query >> KqpScheme::AlterIndexImplTable+VectorIndex >> KqpScheme::DropChangefeedNegative >> KqpScheme::CreateTableWithWrongPartitionAtKeys >> KqpScheme::TouchIndexAfterMoveIndexWrite >> KqpScheme::SchemaVersionMismatchWithRead >> KqpScheme::AlterTableAlterVectorIndex >> THealthCheckTest::ShardsLimit999 >> KqpOlapScheme::AddColumnWithTtl >> KqpScheme::AlterCompressionLevelInColumnFamily [GOOD] >> KqpScheme::AlterDatabaseChangeOwner+EnableAlterDatabase ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query [GOOD] Test command err: 2025-06-30T09:19:15.618524Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669519186949899:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:15.718456Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a23/r3tmp/tmplqWQNm/pdisk_1.dat 2025-06-30T09:19:15.729237Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:19:15.797401Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28973, node 1 2025-06-30T09:19:15.834022Z INFO: TTestServer started on Port 7849 GrpcPort 28973 2025-06-30T09:19:15.823089Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004a23/r3tmp/yandexiRV7xW.tmp 2025-06-30T09:19:15.823105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004a23/r3tmp/yandexiRV7xW.tmp 2025-06-30T09:19:15.823188Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004a23/r3tmp/yandexiRV7xW.tmp 2025-06-30T09:19:15.823208Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:15.827708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:15.827754Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:15.828742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7849 PQClient connected to localhost:28973 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:15.933833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:15.942902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:15.946358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:19:15.947824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:19:16.041299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:16.269724Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669523481917745:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:16.269785Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:16.269893Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669523481917757:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:16.271003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:16.273762Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669523481917759:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:19:16.333324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:16.342329Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669523481917887:2464] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:16.345020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:16.363398Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669523481917909:2315], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:19:16.364623Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=N2U4YTJlYmQtNzFjNzVlOTAtMTQ1NjM2ZC0yZTUzMGNkZQ==, ActorId: [1:7521669523481917742:2296], ActorState: ExecuteState, TraceId: 01jz024csaeqnzgnaybznq2qrj, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:19:16.365197Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:19:16.367060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669523481918112:2611] 2025-06-30T09:19:16.617469Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:20.618811Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669519186949899:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:20.618864Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:19:21.630364Z :WriteToTopic_Demo_20_RestartNo_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:19:21.638174Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:19:21.649877Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521669544956754800:2697] connected; active server actors: 1 2025-06-30T09:19:21.649985Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:19:21.650235Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:19:21.650288Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-30T09:19:21.650779Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-30T09:19:21.675137Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:19:21.675563Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:19 ... one: success# 1, data# { commit_offset_request { commit_offsets { partition_session_id: 1 offsets { end: 10 } } } } 2025-06-30T09:21:05.469580Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) committing to position 10 prev 0 end 10 by cookie 2 2025-06-30T09:21:05.469604Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-30T09:21:05.469611Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037894] got client message batch for topic 'topic_A' partition 0 2025-06-30T09:21:05.469641Z node 11 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 10 (startOffset 0) session test-consumer_11_1_707118311591109060_v1 2025-06-30T09:21:05.469678Z node 11 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:21:05.470165Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 10 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:21:05.470180Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:21:05.470188Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=1001126, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:21:05.470195Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-30T09:21:05.470209Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-30T09:21:05.470217Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 10 endOffset 10 with cookie 2 2025-06-30T09:21:05.470222Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 10 2025-06-30T09:21:05.470337Z :DEBUG: [/Root] [/Root] [c1c3033d-edf01bd6-6af61b54-609714ae] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 10 } } 2025-06-30T09:21:06.460912Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:9:10 2025-06-30T09:21:06.460955Z :INFO: [/Root] [/Root] [c1c3033d-edf01bd6-6af61b54-609714ae] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1000 BytesRead: 1000000 MessagesRead: 10 BytesReadCompressed: 1000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:21:06.462459Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 checking auth because of timeout 2025-06-30T09:21:06.462495Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 auth for : test-consumer 2025-06-30T09:21:06.462669Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 Handle describe topics response 2025-06-30T09:21:06.462717Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 auth is DEAD 2025-06-30T09:21:06.462721Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:21:06.466016Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 checking auth because of timeout 2025-06-30T09:21:06.466050Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 auth for : test-consumer 2025-06-30T09:21:06.466207Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 Handle describe topics response 2025-06-30T09:21:06.466238Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 auth is DEAD 2025-06-30T09:21:06.466241Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:21:07.460933Z :INFO: [/Root] [/Root] [c1c3033d-edf01bd6-6af61b54-609714ae] Closing read session. Close timeout: 0.000000s 2025-06-30T09:21:07.460952Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:9:10 2025-06-30T09:21:07.460963Z :INFO: [/Root] [/Root] [c1c3033d-edf01bd6-6af61b54-609714ae] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2000 BytesRead: 1000000 MessagesRead: 10 BytesReadCompressed: 1000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:21:07.460984Z :NOTICE: [/Root] [/Root] [c1c3033d-edf01bd6-6af61b54-609714ae] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:21:07.460994Z :DEBUG: [/Root] [/Root] [c1c3033d-edf01bd6-6af61b54-609714ae] [] Abort session to cluster 2025-06-30T09:21:07.461230Z :DEBUG: [/Root] 0x0000704DF3DF1B10 TDirectReadSessionManager ServerSessionId=test-consumer_11_1_707118311591109060_v1 Close 2025-06-30T09:21:07.461261Z :DEBUG: [/Root] 0x0000704DF3DF1B10 TDirectReadSessionManager ServerSessionId=test-consumer_11_1_707118311591109060_v1 Close 2025-06-30T09:21:07.461278Z :NOTICE: [/Root] [/Root] [c1c3033d-edf01bd6-6af61b54-609714ae] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:21:07.461513Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 grpc read done: success# 0, data# { } 2025-06-30T09:21:07.461534Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 grpc read failed 2025-06-30T09:21:07.461542Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 grpc closed 2025-06-30T09:21:07.461559Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 is DEAD 2025-06-30T09:21:07.461653Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_11_1_707118311591109060_v1 2025-06-30T09:21:07.461683Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669993522537175:2501] destroyed 2025-06-30T09:21:07.461704Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:398: Direct read cache: close session for proxy [11:7521669993522537182:2504] 2025-06-30T09:21:07.461713Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_1_707118311591109060_v1 2025-06-30T09:21:07.461841Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [11:7521669993522537172:2498] disconnected; active server actors: 1 2025-06-30T09:21:07.461851Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [11:7521669993522537172:2498] client test-consumer disconnected session test-consumer_11_1_707118311591109060_v1 2025-06-30T09:21:07.461924Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d036d298-6478bfa3-f252be81-9eead5ff_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2025-06-30T09:21:07.461933Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d036d298-6478bfa3-f252be81-9eead5ff_0] PartitionId [0] Generation [2] Write session will now close 2025-06-30T09:21:07.461940Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|d036d298-6478bfa3-f252be81-9eead5ff_0] PartitionId [0] Generation [2] Write session: aborting 2025-06-30T09:21:07.462035Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d036d298-6478bfa3-f252be81-9eead5ff_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2025-06-30T09:21:07.462043Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|d036d298-6478bfa3-f252be81-9eead5ff_0] PartitionId [0] Generation [2] Write session: destroy 2025-06-30T09:21:07.462826Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [11:7521669993522537182:2504]: session cookie 2 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 grpc read done: success# 0, data# { } 2025-06-30T09:21:07.462838Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [11:7521669993522537182:2504]: session cookie 2 consumer test-consumer session test-consumer_11_1_707118311591109060_v1grpc read failed 2025-06-30T09:21:07.462843Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|d036d298-6478bfa3-f252be81-9eead5ff_0 grpc read done: success: 0 data: 2025-06-30T09:21:07.462846Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [11:7521669993522537182:2504]: session cookie 2 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 grpc closed 2025-06-30T09:21:07.462847Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|d036d298-6478bfa3-f252be81-9eead5ff_0 grpc read failed 2025-06-30T09:21:07.462849Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [11:7521669993522537182:2504]: session cookie 2 consumer test-consumer session test-consumer_11_1_707118311591109060_v1 proxy is DEAD 2025-06-30T09:21:07.462857Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|d036d298-6478bfa3-f252be81-9eead5ff_0 grpc closed 2025-06-30T09:21:07.462860Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|d036d298-6478bfa3-f252be81-9eead5ff_0 is DEAD 2025-06-30T09:21:07.463019Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:21:07.463077Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521669984932602520:2474] destroyed 2025-06-30T09:21:07.463115Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> THealthCheckTest::NoBscResponse [GOOD] >> THealthCheckTest::LayoutIncorrect >> TConsoleTests::TestAlterUnknownTenant [GOOD] >> TConsoleTests::TestAlterUnknownTenantExtSubdomain >> KqpScheme::AlterUser [GOOD] >> KqpScheme::AsyncReplicationConnectionString >> KqpScheme::DisableS3ExternalDataSource >> KqpOlapTypes::Timestamp [GOOD] >> KqpOlapTypes::DecimalCsv >> KqpSystemView::NodesSimple [GOOD] >> KqpScheme::CreateDroppedTable [GOOD] >> KqpScheme::CreateDropTableViaApiMultipleTime >> KqpScheme::CreateTableWithTtlSettingsUncompat [GOOD] >> KqpScheme::CreateTableWithTtlSettingsCompat >> KqpOlapScheme::DropTable [GOOD] >> KqpOlapScheme::DropColumnOldSchemeBulkUpsert >> KqpScheme::CreateTableWithDefaultSettings [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysSigned >> TSchemeshardBackgroundCleaningTest::TempInTemp [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitioningBySizeUncompat [GOOD] >> KqpScheme::CreateTableWithCompactionPolicyUncompat [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitioningBySizeCompat >> KqpScheme::CreateTableWithCompactionPolicyCompat >> KqpScheme::CreateTableWithWrongPartitionAtKeys [GOOD] >> KqpScheme::CreateTableWithVectorIndexCovered >> KqpScheme::DropChangefeedNegative [GOOD] >> KqpScheme::DropAsyncReplication >> KqpScheme::AlterTableAlterVectorIndex [GOOD] >> KqpScheme::AlterTableRenameIndex >> TChargeBTreeIndex::FewNodes [GOOD] >> TChargeBTreeIndex::FewNodes_Groups >> KqpScheme::AlterDatabaseChangeOwner+EnableAlterDatabase [GOOD] >> KqpScheme::AlterDatabaseChangeOwner-EnableAlterDatabase ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::NodesSimple [GOOD] Test command err: Trying to start YDB, gRPC: 1068, MsgBus: 22067 2025-06-30T09:21:07.299023Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669998498217112:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:07.299054Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:07.303315Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670001916396835:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:07.303344Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:07.303960Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670001367828054:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:07.304000Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004687/r3tmp/tmpOvO95Q/pdisk_1.dat 2025-06-30T09:21:07.377078Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:07.390295Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 1068, node 1 2025-06-30T09:21:07.398043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:07.398061Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:07.398063Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:07.398111Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:07.399494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:07.399520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:07.401048Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22067 TClient is connected to server localhost:22067 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:21:07.446151Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:07.446185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:07.449172Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-30T09:21:07.449500Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:07.450988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:07.451014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:07.451893Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:21:07.452244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:07.465697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:07.475439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:07.545449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:07.582778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:07.602545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:07.787110Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669998498218894:2322], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:07.787146Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:07.849854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.866437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.886977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.902513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.916450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.935802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.002353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.019903Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670002793187033:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.019935Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670002793187038:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.019938Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.020721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.024843Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670002793187040:2368], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:08.110448Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670002793187121:4137] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:08.300511Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:08.305623Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:08.306127Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:08.333506Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275268330, txId: 281474976715672] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::TempInTemp [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:234:2060] recipient: [1:228:2144] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:234:2060] recipient: [1:228:2144] Leader for TabletID 72057594046678944 is [1:245:2155] sender: [1:246:2060] recipient: [1:228:2144] 2025-06-30T09:19:57.980714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:19:57.980735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:19:57.980740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:19:57.980745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:19:57.980750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:19:57.980753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:19:57.980762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:19:57.980773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:19:57.980866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:19:57.980926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:19:57.994465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:19:57.994490Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:57.999237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:19:57.999349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:19:57.999379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:19:58.001195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:19:58.001259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:19:58.001400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:58.001477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:19:58.002967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:58.003032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:19:58.003403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:58.003416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:58.003425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:19:58.003433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:19:58.003439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:19:58.003466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:19:58.004920Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:245:2155] sender: [1:358:2060] recipient: [1:17:2064] 2025-06-30T09:19:58.026430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:19:58.026500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:58.026566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:19:58.026574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:19:58.026623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:19:58.026639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:58.027593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:58.027635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:19:58.027704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:58.027722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:19:58.027728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:19:58.027733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:19:58.028187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:58.028199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:19:58.028205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:19:58.028601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:58.028611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:19:58.028617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:58.028625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:19:58.029287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:19:58.029739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:19:58.029778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:19:58.030010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:58.030040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 253 RawX2: 4294969456 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:19:58.030047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:58.030111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:19:58.030119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:19:58.030164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:19:58.030178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:19:58.030653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:58.030662Z node 1 :FLAT_TX_SCHEMESHARD ... d EvNotifyTxCompletion 2025-06-30T09:21:08.611055Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-30T09:21:08.611125Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [7:696:2511], Recipient [7:245:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:08.611135Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:08.611141Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:21:08.611166Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124996, Sender [7:591:2406], Recipient [7:245:2155]: NKikimrScheme.TEvNotifyTxCompletion TxId: 106 2025-06-30T09:21:08.611172Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4973: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-30T09:21:08.611184Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-30T09:21:08.611206Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-30T09:21:08.611212Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [7:694:2509] 2025-06-30T09:21:08.611237Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [7:696:2511], Recipient [7:245:2155]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:21:08.611243Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:21:08.611247Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 TestModificationResults wait txId: 107 2025-06-30T09:21:08.611334Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [8:565:2104], Recipient [7:245:2155] 2025-06-30T09:21:08.611339Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:21:08.612039Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/test/tmp/a/b" OperationType: ESchemeOpMkDir MkDir { Name: "tmp2" } TempDirOwnerActorId { RawX1: 565 RawX2: 34359740472 } AllowCreateInTempDir: false } TxId: 107 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:08.612095Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/test/tmp/a/b/tmp2, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-30T09:21:08.612118Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 107:1, propose status:StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/test/tmp/a/b', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:134, at schemeshard: 72057594046678944 2025-06-30T09:21:08.612180Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:21:08.619402Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 107, response: Status: StatusPreconditionFailed Reason: "Check failed: path: \'/MyRoot/test/tmp/a/b\', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:134" TxId: 107 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:08.619479Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 107, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/test/tmp/a/b', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:134, operation: CREATE DIRECTORY, path: /MyRoot/test/tmp/a/b/tmp2 2025-06-30T09:21:08.619493Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 2025-06-30T09:21:08.619618Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-06-30T09:21:08.619626Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-06-30T09:21:08.619702Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [7:702:2517], Recipient [7:245:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:08.619711Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:08.619716Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:21:08.619739Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124996, Sender [7:591:2406], Recipient [7:245:2155]: NKikimrScheme.TEvNotifyTxCompletion TxId: 107 2025-06-30T09:21:08.619743Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4973: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-30T09:21:08.619762Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-06-30T09:21:08.619790Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-30T09:21:08.619796Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [7:700:2515] 2025-06-30T09:21:08.619821Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [7:702:2517], Recipient [7:245:2155]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:21:08.619826Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:21:08.619831Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-06-30T09:21:08.619902Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [8:565:2104], Recipient [7:245:2155] 2025-06-30T09:21:08.619907Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:21:08.620564Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/test/tmp/a/b" OperationType: ESchemeOpMkDir MkDir { Name: "tmp2" } TempDirOwnerActorId { RawX1: 565 RawX2: 34359740472 } AllowCreateInTempDir: true } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:08.620629Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/test/tmp/a/b/tmp2, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-30T09:21:08.620639Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 108:1, propose status:StatusPreconditionFailed, reason: Can't create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can't be created in another temporary directory., at schemeshard: 72057594046678944 2025-06-30T09:21:08.620716Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:21:08.622089Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 108, response: Status: StatusPreconditionFailed Reason: "Can\'t create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can\'t be created in another temporary directory." TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:08.622159Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Can't create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can't be created in another temporary directory., operation: CREATE DIRECTORY, path: /MyRoot/test/tmp/a/b/tmp2 2025-06-30T09:21:08.622168Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-30T09:21:08.622288Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-30T09:21:08.622295Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-30T09:21:08.622363Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [7:708:2523], Recipient [7:245:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:08.622371Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:08.622375Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:21:08.622397Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124996, Sender [7:591:2406], Recipient [7:245:2155]: NKikimrScheme.TEvNotifyTxCompletion TxId: 108 2025-06-30T09:21:08.622402Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4973: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-30T09:21:08.622427Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-30T09:21:08.622450Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-30T09:21:08.622456Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [7:706:2521] 2025-06-30T09:21:08.622478Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [7:708:2523], Recipient [7:245:2155]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:21:08.622483Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:21:08.622487Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 108 >> KqpScheme::TouchIndexAfterMoveIndexWrite [GOOD] >> KqpScheme::TouchIndexAfterMoveIndexReadReplace >> KqpScheme::AlterIndexImplTable+VectorIndex [GOOD] >> KqpScheme::AlterIndexImplTable-VectorIndex >> KqpScheme::SchemaVersionMismatchWithRead [GOOD] >> KqpScheme::SchemaVersionMismatchWithWrite >> TConsoleTests::TestCreateTenantWrongPoolExtSubdomain [GOOD] >> TConsoleTests::TestCreateTenantAlreadyExists >> KqpOlapScheme::DropColumnOldSchemeBulkUpsert [GOOD] >> KqpOlapScheme::DropThenAddColumn >> THealthCheckTest::LayoutIncorrect [GOOD] >> KqpOlapScheme::BulkError >> THealthCheckTest::ShardsLimit999 [GOOD] >> THealthCheckTest::ShardsLimit995 >> KqpScheme::AsyncReplicationConnectionString [GOOD] >> KqpScheme::AsyncReplicationConnectionStringWithSsl >> KqpScheme::CreateTableWithTtlSettingsCompat [GOOD] >> KqpScheme::CreateTableWithTtlOnIntColumn ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/health_check/ut/unittest >> THealthCheckTest::LayoutIncorrect [GOOD] Test command err: 2025-06-30T09:20:58.467396Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.467444Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:58.467479Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.467505Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:58.467511Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:58.467526Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011f6/r3tmp/tmpbVChaz/pdisk_1.dat 2025-06-30T09:20:58.540985Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1975, node 1 TClient is connected to server localhost:19338 2025-06-30T09:20:58.642201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:58.642218Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:58.642221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:58.642299Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:20:59.700693Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.700766Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:20:59.700780Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.700972Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:20:59.701000Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:20:59.701015Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011f6/r3tmp/tmp8zm1to/pdisk_1.dat 2025-06-30T09:20:59.774989Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16464, node 3 TClient is connected to server localhost:65458 2025-06-30T09:20:59.886586Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:20:59.886604Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:20:59.886607Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:20:59.886699Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-a594-3-3-42" status: RED message: "PDisk state is FAULTY" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-42" path: "/home/runner/.ya/build/build_root/cl3w/0011f6/r3tmp/tmp8zm1to/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-a594-3-3-43" status: RED message: "PDisk state is FAULTY" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-43" path: "/home/runner/.ya/build/build_root/cl3w/0011f6/r3tmp/tmp8zm1to/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-a594-3-3-44" status: RED message: "PDisk state is FAULTY" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-44" path: "/home/runner/.ya/build/build_root/cl3w/0011f6/r3tmp/tmp8zm1to/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } location { id: 3 host: "::1" port: 12001 } 2025-06-30T09:21:00.675389Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:423:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:00.675449Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:00.675459Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011f6/r3tmp/tmpfYMFUD/pdisk_1.dat 2025-06-30T09:21:00.749323Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8852, node 5 TClient is connected to server localhost:16070 2025-06-30T09:21:00.861235Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:00.861256Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:00.861261Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:00.861411Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: EMERGENCY issue_log { id: "RED-be81" status: RED message: "Database has storage issues" reason: "RED-caea" type: "DATABASE" level: 1 } issue_log { id: "RED-caea" status: RED message: "There are no storage pools" type: "STORAGE" level: 2 } database_status { name: "/Root/database" overall: RED storage { overall: RED } compute { overall: GREEN nodes { id: "6" overall: GREEN load { overall: GREEN cores: 64 } } } } location { id: 5 host: "::1" port: 12001 } 2025-06-30T09:21:01.566331Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:421:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:01.566364Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:01.566381Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011f6/r3tmp/tmp7NVO82/pdisk_1.dat 2025-06-30T09:21:01.636495Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25427, node 7 TClient is connected to server localhost:13637 2025-06-30T09:21:01.727873Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:01.727890Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:01.727894Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:01.727951Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:01.801308Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:01.801342Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:01.812886Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:02.470556Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; self_check_result: EMERGENCY issue_log { id: "RED-70fb-1231c6b1" status: RED message: "Database has multiple issues" location { database { name: "/Root" } } reason: "RED-4e47-1231c6b1" reason: "RED-53b5-1231c6b1" reason: "RED-d6d1-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-7" type: "COMPUTE" level: 2 } issue_log { id: "RED-4e47-1231c6b1" status: RED message: "Compute has issues with system tablets" location { database { name: "/Root" } } reason: "RED-c138-1231c6b1-BSController" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-7" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 7 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-c138-1231c6b1-BSController" status: RED message: "System tablet is unresponsive" location { compute { tablet { type: "BSController" id: "72057594037932033" } } database { name: "/Root" } } type: "SYSTEM_TABLET" level: 3 } issue_log { id: "RED-53b5-1231c6b1" status: RED message: "System tablet BSC didn\'t provide information" location { database { name: "/Root" } } type: "STORAGE" level: 2 } issue_log { id: "RED-d6d1-1231c6b1" status: RED message: "Storage failed" location { database { name: "/Root" } } reason: "RED-258e-1231c6b1-80c02825" type: "STORAGE" level: 2 } issue_log { id: "RED-258e-1231c6b1-80c02825" status: RED message: "Pool failed" location { storage { pool { name: "static" } } database { name: "/Root" } } reason: "RED-819b-1231c6b1-0" type: "STORAGE_POOL" level: 3 } issue_log { id: "RED-84d8-7-7-1" status: RED message: "PDisk is not available" location { storage { node { id: 7 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "7-1" path: "/home/runner/.ya/build/build_root/cl3w/0011f6/r3tmp/tmp7NVO82/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-87c9-1231c6b1-7-0-1-0-0-0" status: RED message: "VDisk is degraded" location { storage { node { id: 7 host: "::1" port: 12001 } pool { name: "static" group { vdisk { id: "0-1-0-0-0" } } } } database { name: "/Root" } } type: "VDISK" level: 5 } issue_log { id: "RED-819b-1231c6b1-0" status: RED message: "Group failed" location { storage { pool { name: "static" group { id: "0" } } } database { name: "/Root" } } reason: "RED-87c9-1231c6b1-7-0-1-0-0-0" type: "STORAGE_GROUP" level: 4 } database_status { name: "/Root" overall: RED storage { overall: RED pools { id: "static" overall: RED groups { id: "0" overall: RED vdisks { id: "0-1-0-0-0" overall: RED pdisk { id: "7-1" overall: RED } } } } } compute { overall: RED nodes { id: "7" overall: YELLOW load { overall: YELLOW load: 123.390625 cores: 64 } } } } location { id: 7 host: "::1" port: 12001 } 2025-06-30T09:21:09.044574Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:257:2216], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:09.044633Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:09.044657Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0011f6/r3tmp/tmpcwgeZg/pdisk_1.dat 2025-06-30T09:21:09.171781Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26819, node 9 TClient is connected to server localhost:63336 2025-06-30T09:21:09.293258Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:09.293283Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:09.293289Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:09.293442Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> TxUsage::WriteToTopic_Demo_24_Query [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysSigned [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysComplex >> KqpScheme::CreateAndAlterTableWithPartitioningBySizeCompat [GOOD] >> KqpScheme::CreateAndDropUser+StrictAclCheck >> KqpScheme::CreateTableWithCompactionPolicyCompat [GOOD] >> KqpScheme::CreateTableStoreNegative >> KqpOlapTypes::DecimalCsv [GOOD] >> KqpOlapTypes::NegativeTimestampErr >> KqpScheme::AlterDatabaseChangeOwner-EnableAlterDatabase [GOOD] >> KqpScheme::AlterDatabaseChangeSchemeLimits+EnableAlterDatabase >> DataStreams::TestStreamTimeRetention [GOOD] >> DataStreams::TestUnsupported >> KqpScheme::CreateTableWithVectorIndexCovered [GOOD] >> KqpScheme::CreateTableWithVectorIndexNoFeatureFlag >> KqpOlapScheme::BulkError [GOOD] >> KqpOlapScheme::ColumnFamilyWithFieldData >> KqpConstraints::Utf8AndDefault >> KqpOlapScheme::DropThenAddColumn [GOOD] >> KqpOlapScheme::DropThenAddColumnCompaction >> KqpScheme::ChangefeedSchemaChanges+UseQueryService >> KqpScheme::TouchIndexAfterMoveIndexReadReplace [GOOD] >> KqpScheme::TouchIndexAfterMoveIndexWriteReplace >> KqpScheme::AlterTableRenameIndex [GOOD] >> KqpScheme::AlterTableReplaceIndex >> KqpScheme::SchemaVersionMismatchWithWrite [GOOD] >> KqpScheme::SchemaVersionMismatchWithIndexRead >> KqpScheme::CreateTableStoreNegative [GOOD] >> KqpScheme::CreateResourcePool >> KqpOlapTypes::NegativeTimestampErr [GOOD] >> KqpOlapTypes::JsonImport >> KqpScheme::AlterIndexImplTable-VectorIndex [GOOD] >> KqpScheme::AlterIndexImplTableUsingPublicAPI >> KqpScheme::AsyncReplicationConnectionStringWithSsl [GOOD] >> KqpScheme::AsyncReplicationEndpointAndDatabase >> KqpOlapScheme::ColumnFamilyWithFieldData [GOOD] >> KqpOlapScheme::CrateWithWrongCodec >> DataStreams::TestUnsupported [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices_Sticky [GOOD] >> TPartGroupBtreeIndexIter::NoNodes [GOOD] >> TPartGroupBtreeIndexIter::OneNode [GOOD] >> TPartGroupBtreeIndexIter::FewNodes [GOOD] >> TPartMulti::Basics [GOOD] >> TPartMulti::BasicsReverse [GOOD] >> TPartSlice::SimpleMerge [GOOD] >> TPartSlice::ComplexMerge [GOOD] >> TPartSlice::LongTailMerge [GOOD] >> TPartSlice::CutSingle [GOOD] >> TPartSlice::CutMulti [GOOD] >> TPartSlice::LookupBasics [GOOD] >> TPartSlice::LookupFull [GOOD] >> TPartSlice::EqualByRowId [GOOD] >> TPartSlice::ParallelCompactions [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysComplex [GOOD] >> KqpScheme::CreateTableWithFamiliesRegular ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_24_Query [GOOD] Test command err: 2025-06-30T09:19:14.676318Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669515473430870:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:14.676449Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a2c/r3tmp/tmpkrQMzA/pdisk_1.dat 2025-06-30T09:19:14.726465Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:19:14.761593Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:14.769163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:14.770678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 2584, node 1 2025-06-30T09:19:14.771408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:14.803028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004a2c/r3tmp/yandexCE2E6x.tmp 2025-06-30T09:19:14.803043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004a2c/r3tmp/yandexCE2E6x.tmp 2025-06-30T09:19:14.803144Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004a2c/r3tmp/yandexCE2E6x.tmp 2025-06-30T09:19:14.803203Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:14.808855Z INFO: TTestServer started on Port 30014 GrpcPort 2584 TClient is connected to server localhost:30014 PQClient connected to localhost:2584 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:14.882038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:14.886039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:14.895337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:19:14.979469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:15.270125Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669519768398818:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:15.270151Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669519768398827:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:15.270160Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:15.271331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:15.274847Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669519768398862:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:15.274960Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:15.275301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-30T09:19:15.275369Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669519768398832:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:19:15.332275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:15.364098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:15.369222Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669519768398989:2493] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:15.407496Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669519768399024:2320], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:19:15.408209Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=MjlhYjM0OTgtNjM2ODI3My0yYmJmODkwMC04YTZlNDk2ZA==, ActorId: [1:7521669519768398792:2296], ActorState: ExecuteState, TraceId: 01jz024bt24c74rgya5th9ker9, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:19:15.408819Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:19:15.417228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669519768399183:2613] 2025-06-30T09:19:15.668436Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:19.675580Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669515473430870:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:19.675617Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:19:20.676901Z :WriteToTopic_Demo_22_RestartNo_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:19:20.684743Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:19:20.692351Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:19:20.692498Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:19:20.693798Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:19:20.693880Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-30T09:19:20.693894Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTx ... rtition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:21:08.130956Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=293, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:21:08.130961Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-30T09:21:08.130970Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 1 endOffset 1 with cookie 2 2025-06-30T09:21:08.130975Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 1 2025-06-30T09:21:08.131201Z :DEBUG: [/Root] [/Root] [6c41ff7b-e8150e21-2df61490-cb3b139e] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 1 } } 2025-06-30T09:21:09.113551Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:21:09.114066Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 4 sessionId: test-message_group_id|46fa74b9-315c435e-b90f744a-fbe5a3f7_0 describe result for acl check 2025-06-30T09:21:09.124932Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:0:1 2025-06-30T09:21:09.124961Z :INFO: [/Root] [/Root] [6c41ff7b-e8150e21-2df61490-cb3b139e] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1001 BytesRead: 144 MessagesRead: 1 BytesReadCompressed: 144 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:21:09.125843Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 checking auth because of timeout 2025-06-30T09:21:09.125893Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 auth for : test-consumer 2025-06-30T09:21:09.126226Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 Handle describe topics response 2025-06-30T09:21:09.126269Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 auth is DEAD 2025-06-30T09:21:09.126289Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:21:09.128367Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 checking auth because of timeout 2025-06-30T09:21:09.128414Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 auth for : test-consumer 2025-06-30T09:21:09.128741Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 Handle describe topics response 2025-06-30T09:21:09.128772Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 auth is DEAD 2025-06-30T09:21:09.128791Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:21:10.128815Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 checking auth because of timeout 2025-06-30T09:21:10.128865Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 auth for : test-consumer 2025-06-30T09:21:10.129204Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 Handle describe topics response 2025-06-30T09:21:10.129246Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 auth is DEAD 2025-06-30T09:21:10.129258Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:21:10.207643Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037894] server connected, pipe [11:7521670014253163235:2943], now have 1 active actors on pipe 2025-06-30T09:21:10.211160Z :INFO: [/Root] [/Root] [6c41ff7b-e8150e21-2df61490-cb3b139e] Closing read session. Close timeout: 0.000000s 2025-06-30T09:21:10.211180Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:0:1 2025-06-30T09:21:10.211192Z :INFO: [/Root] [/Root] [6c41ff7b-e8150e21-2df61490-cb3b139e] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2087 BytesRead: 144 MessagesRead: 1 BytesReadCompressed: 144 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:21:10.211215Z :NOTICE: [/Root] [/Root] [6c41ff7b-e8150e21-2df61490-cb3b139e] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:21:10.211224Z :DEBUG: [/Root] [/Root] [6c41ff7b-e8150e21-2df61490-cb3b139e] [] Abort session to cluster 2025-06-30T09:21:10.211435Z :DEBUG: [/Root] 0x0000103379396D90 TDirectReadSessionManager ServerSessionId=test-consumer_11_1_13699166257924303_v1 Close 2025-06-30T09:21:10.211494Z :DEBUG: [/Root] 0x0000103379396D90 TDirectReadSessionManager ServerSessionId=test-consumer_11_1_13699166257924303_v1 Close 2025-06-30T09:21:10.211511Z :NOTICE: [/Root] [/Root] [6c41ff7b-e8150e21-2df61490-cb3b139e] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:21:10.214790Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 grpc read done: success# 0, data# { } 2025-06-30T09:21:10.214812Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 grpc read failed 2025-06-30T09:21:10.214807Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [11:7521670005663228533:2493]: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 grpc read done: success# 0, data# { } 2025-06-30T09:21:10.214819Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [11:7521670005663228533:2493]: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1grpc read failed 2025-06-30T09:21:10.214824Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 grpc closed 2025-06-30T09:21:10.214831Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [11:7521670005663228533:2493]: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 grpc closed 2025-06-30T09:21:10.214836Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [11:7521670005663228533:2493]: session cookie 2 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 proxy is DEAD 2025-06-30T09:21:10.214838Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_11_1_13699166257924303_v1 is DEAD 2025-06-30T09:21:10.215880Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [11:7521670005663228525:2488] disconnected; active server actors: 1 2025-06-30T09:21:10.215897Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [11:7521670005663228525:2488] client test-consumer disconnected session test-consumer_11_1_13699166257924303_v1 2025-06-30T09:21:10.215934Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_11_1_13699166257924303_v1 2025-06-30T09:21:10.215942Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521670005663228528:2491] destroyed 2025-06-30T09:21:10.215957Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_1_13699166257924303_v1 2025-06-30T09:21:10.222895Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|46fa74b9-315c435e-b90f744a-fbe5a3f7_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:21:10.222916Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|46fa74b9-315c435e-b90f744a-fbe5a3f7_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:21:10.222925Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|46fa74b9-315c435e-b90f744a-fbe5a3f7_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:21:10.223145Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|46fa74b9-315c435e-b90f744a-fbe5a3f7_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:21:10.223154Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|46fa74b9-315c435e-b90f744a-fbe5a3f7_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:21:10.223461Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message_group_id|46fa74b9-315c435e-b90f744a-fbe5a3f7_0 grpc read done: success: 0 data: 2025-06-30T09:21:10.223482Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message_group_id|46fa74b9-315c435e-b90f744a-fbe5a3f7_0 grpc read failed 2025-06-30T09:21:10.223496Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message_group_id|46fa74b9-315c435e-b90f744a-fbe5a3f7_0 grpc closed 2025-06-30T09:21:10.223502Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message_group_id|46fa74b9-315c435e-b90f744a-fbe5a3f7_0 is DEAD 2025-06-30T09:21:10.223990Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:21:10.224011Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:21:10.224790Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521670005663228461:2477] destroyed 2025-06-30T09:21:10.224809Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521670005663228464:2477] destroyed 2025-06-30T09:21:10.224847Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> KqpOlapScheme::AddColumnWithTtl [GOOD] >> KqpOlapScheme::AddColumnSimpleReader >> KqpOlapScheme::InvalidColumnInTieringRule >> KqpScheme::CreateAndDropUser+StrictAclCheck [GOOD] >> KqpScheme::CreateAndDropUser-StrictAclCheck >> THealthCheckTest::ShardsLimit995 [GOOD] >> THealthCheckTest::ShardsLimit905 >> TConsoleTests::TestAlterUnknownTenantExtSubdomain [GOOD] >> TConsoleTests::TestAlterBorrowedStorage >> KqpScheme::AlterDatabaseChangeSchemeLimits+EnableAlterDatabase [GOOD] >> KqpScheme::AlterDatabaseChangeSchemeLimits-EnableAlterDatabase >> KqpScheme::DropAsyncReplication [GOOD] >> KqpScheme::DropAsyncReplicationCascade >> TConsoleTests::TestScaleRecommenderPolicies [GOOD] >> TConsoleTests::TestScaleRecommenderPoliciesValidation >> KqpScheme::CreateTableWithVectorIndexNoFeatureFlag [GOOD] >> KqpScheme::CreateTableWithVectorIndexPublicApi >> KqpOlapTypes::JsonImport [GOOD] >> KqpScheme::AddChangefeed >> KqpScheme::DisableS3ExternalDataSource [GOOD] >> KqpScheme::DoubleCreateExternalDataSource ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestUnsupported [GOOD] Test command err: 2025-06-30T09:21:02.793511Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669980397977146:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:02.793537Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001bb1/r3tmp/tmpKEvLVw/pdisk_1.dat 2025-06-30T09:21:02.872928Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24895, node 1 2025-06-30T09:21:02.891221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:02.891233Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:02.891235Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:02.891267Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:02.893764Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:02.893801Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:02.895327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18764 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:02.928126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:02.945842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:18764 2025-06-30T09:21:02.958960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:03.036290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:03.637461Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669981548458897:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:03.637701Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001bb1/r3tmp/tmpRFHBYn/pdisk_1.dat 2025-06-30T09:21:03.659206Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14936, node 4 2025-06-30T09:21:03.674654Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:03.674670Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:03.674672Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:03.674718Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23022 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:03.738118Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:03.738145Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:03.739745Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:03.742424Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:03.771260Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:23022 2025-06-30T09:21:03.787047Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:03.865902Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:03.876043Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "16" shard_id: "shard-000000" } records { sequence_number: "17" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000000" } records { sequence_number: "20" shard_id: "shard-000000" } records { sequence_number: "21" shard_id: "shard-000000" } records { sequence_number: "22" shard_id: "shard-000000" } records { sequence_number: "23" shard_id: "shard-000000" } records { sequence_number: "24" shard_id: "shard-000000" } records { sequence_number: "25" shard_id: "shard-000000" } records { sequence_number: "26" shard_id: "shard-000000" } records { sequence_number: "27" shard_id: "shard-000000" } records { sequence_number: "28" shard_id: "shard-000000" } records { sequence_number: "29" shard_id: "shard-000000" } 2025-06-30T09:21:04.639599Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; encryption_type: NONE records { sequence_number: "30" shard_id: "shard-000000" } records { sequence_number: "31" shard_id: "shard-000000" } records { sequence_number: "32" shard_id: "shard-000000" } records { sequence_number: "33" shard_id: "shard-000000" } records { sequence_number: "34" ... "81" shard_id: "shard-000000" } records { sequence_number: "82" shard_id: "shard-000000" } records { sequence_number: "83" shard_id: "shard-000000" } records { sequence_number: "84" shard_id: "shard-000000" } records { sequence_number: "85" shard_id: "shard-000000" } records { sequence_number: "86" shard_id: "shard-000000" } records { sequence_number: "87" shard_id: "shard-000000" } records { sequence_number: "88" shard_id: "shard-000000" } records { sequence_number: "89" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "90" shard_id: "shard-000000" } records { sequence_number: "91" shard_id: "shard-000000" } records { sequence_number: "92" shard_id: "shard-000000" } records { sequence_number: "93" shard_id: "shard-000000" } records { sequence_number: "94" shard_id: "shard-000000" } records { sequence_number: "95" shard_id: "shard-000000" } records { sequence_number: "96" shard_id: "shard-000000" } records { sequence_number: "97" shard_id: "shard-000000" } records { sequence_number: "98" shard_id: "shard-000000" } records { sequence_number: "99" shard_id: "shard-000000" } records { sequence_number: "100" shard_id: "shard-000000" } records { sequence_number: "101" shard_id: "shard-000000" } records { sequence_number: "102" shard_id: "shard-000000" } records { sequence_number: "103" shard_id: "shard-000000" } records { sequence_number: "104" shard_id: "shard-000000" } records { sequence_number: "105" shard_id: "shard-000000" } records { sequence_number: "106" shard_id: "shard-000000" } records { sequence_number: "107" shard_id: "shard-000000" } records { sequence_number: "108" shard_id: "shard-000000" } records { sequence_number: "109" shard_id: "shard-000000" } records { sequence_number: "110" shard_id: "shard-000000" } records { sequence_number: "111" shard_id: "shard-000000" } records { sequence_number: "112" shard_id: "shard-000000" } records { sequence_number: "113" shard_id: "shard-000000" } records { sequence_number: "114" shard_id: "shard-000000" } records { sequence_number: "115" shard_id: "shard-000000" } records { sequence_number: "116" shard_id: "shard-000000" } records { sequence_number: "117" shard_id: "shard-000000" } records { sequence_number: "118" shard_id: "shard-000000" } records { sequence_number: "119" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "120" shard_id: "shard-000000" } records { sequence_number: "121" shard_id: "shard-000000" } records { sequence_number: "122" shard_id: "shard-000000" } records { sequence_number: "123" shard_id: "shard-000000" } records { sequence_number: "124" shard_id: "shard-000000" } records { sequence_number: "125" shard_id: "shard-000000" } records { sequence_number: "126" shard_id: "shard-000000" } records { sequence_number: "127" shard_id: "shard-000000" } records { sequence_number: "128" shard_id: "shard-000000" } records { sequence_number: "129" shard_id: "shard-000000" } records { sequence_number: "130" shard_id: "shard-000000" } records { sequence_number: "131" shard_id: "shard-000000" } records { sequence_number: "132" shard_id: "shard-000000" } records { sequence_number: "133" shard_id: "shard-000000" } records { sequence_number: "134" shard_id: "shard-000000" } records { sequence_number: "135" shard_id: "shard-000000" } records { sequence_number: "136" shard_id: "shard-000000" } records { sequence_number: "137" shard_id: "shard-000000" } records { sequence_number: "138" shard_id: "shard-000000" } records { sequence_number: "139" shard_id: "shard-000000" } records { sequence_number: "140" shard_id: "shard-000000" } records { sequence_number: "141" shard_id: "shard-000000" } records { sequence_number: "142" shard_id: "shard-000000" } records { sequence_number: "143" shard_id: "shard-000000" } records { sequence_number: "144" shard_id: "shard-000000" } records { sequence_number: "145" shard_id: "shard-000000" } records { sequence_number: "146" shard_id: "shard-000000" } records { sequence_number: "147" shard_id: "shard-000000" } records { sequence_number: "148" shard_id: "shard-000000" } records { sequence_number: "149" shard_id: "shard-000000" } 2025-06-30T09:21:08.637877Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7521669981548458897:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.637936Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1751275263861-2","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1751275263,"finish":1751275263},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1751275263}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1751275263872-3","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1751275263,"finish":1751275263},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1751275263}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1751275263886-4","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1751275263,"finish":1751275264},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1751275264}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1751275264892-5","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1751275264,"finish":1751275265},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1751275265}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1751275265895-6","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1751275265,"finish":1751275266},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1751275266}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1751275266899-7","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1751275266,"finish":1751275267},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1751275267}' 2025-06-30T09:21:10.536989Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521670012563368091:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:10.537008Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001bb1/r3tmp/tmpYZVibM/pdisk_1.dat 2025-06-30T09:21:10.561576Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29307, node 7 2025-06-30T09:21:10.584538Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:10.584550Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:10.584552Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:10.584601Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10042 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:10.637393Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:10.637436Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:10.639235Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:10.643169Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:10.663200Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:10042 2025-06-30T09:21:10.679230Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... >> TxUsage::Sinks_Oltp_WriteToTopic_5_Table [GOOD] >> KqpScheme::ChangefeedSchemaChanges+UseQueryService [GOOD] >> KqpScheme::ChangefeedSchemaChanges-UseQueryService >> KqpOlapScheme::CrateWithWrongCodec [GOOD] >> KqpOlapScheme::AlterCompressionType >> KqpConstraints::Utf8AndDefault [GOOD] >> KqpOlapScheme::AddColumnLongPk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> TPartSlice::ParallelCompactions [GOOD] Test command err: ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 346b 12r} data 755b + FlatIndex{4} Label{3 rev 3, 172b} 5 rec | Page Row Bytes (Uint32, String) | 0 0 86b {1, aaa} | 1 3 88b {1, b} | 2 6 86b {2, NULL} | 3 9 86b {2, ccx} | 3 11 86b {2, cxz} + BTreeIndex{PageId: 5 RowCount: 12 DataSize: 346 ErasedRowCount: 0} Label{13 rev 1, 208b} | PageId: 0 RowCount: 3 DataSize: 86 ErasedRowCount: 0 | > {1, b} | PageId: 1 RowCount: 6 DataSize: 174 ErasedRowCount: 0 | > {2, NULL} | PageId: 2 RowCount: 9 DataSize: 260 ErasedRowCount: 0 | > {2, ccx} | PageId: 3 RowCount: 12 DataSize: 346 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 346b 12r} data 777b + FlatIndex{4} Label{3 rev 3, 179b} 5 rec | Page Row Bytes (Uint32, String) | 0 0 86b {1, aaa} | 1 3 88b {1, baaaa} | 2 6 86b {2, aaa} | 3 9 86b {2, ccx} | 3 11 86b {2, cxz} + BTreeIndex{PageId: 5 RowCount: 12 DataSize: 346 ErasedRowCount: 0} Label{13 rev 1, 223b} | PageId: 0 RowCount: 3 DataSize: 86 ErasedRowCount: 0 | > {1, baaaa} | PageId: 1 RowCount: 6 DataSize: 174 ErasedRowCount: 0 | > {2, aaa} | PageId: 2 RowCount: 9 DataSize: 260 ErasedRowCount: 0 | > {2, ccx} | PageId: 3 RowCount: 12 DataSize: 346 ErasedRowCount: 0 ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1347b + FlatIndex{10} Label{3 rev 3, 362b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, ab} | 2 2 42b {1, ac} | 3 3 42b {1, b} | 4 4 42b {1, bb} | 5 5 42b {2, NULL} | 6 6 42b {2, ab} | 7 7 42b {2, ac} | 8 8 42b {2, b} | 9 9 42b {2, bb} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 536b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, ab} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, ac} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, b} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bb} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, NULL} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, ab} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, ac} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, b} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bb} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1381b + FlatIndex{10} Label{3 rev 3, 375b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, aba} | 2 2 42b {1, aca} | 3 3 42b {1, baa} | 4 4 42b {1, bba} | 5 5 42b {2, aaa} | 6 6 42b {2, aba} | 7 7 42b {2, aca} | 8 8 42b {2, baa} | 9 9 42b {2, bba} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 557b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, aba} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, aca} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, baa} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bba} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, aaa} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, aba} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, aca} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, baa} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bba} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= SLICES ======= { [0, 1), [1, 3), [3, 5), [5, 6), [6, 7), [7, 8), [8, 9] } ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1347b + FlatIndex{10} Label{3 rev 3, 362b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, ab} | 2 2 42b {1, ac} | 3 3 42b {1, b} | 4 4 42b {1, bb} | 5 5 42b {2, NULL} | 6 6 42b {2, ab} | 7 7 42b {2, ac} | 8 8 42b {2, b} | 9 9 42b {2, bb} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 536b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, ab} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, ac} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, b} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bb} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, NULL} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, ab} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, ac} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, b} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bb} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1381b + FlatIndex{10} Label{3 rev 3, 375b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, aba} | 2 2 42b {1, aca} | 3 3 42b {1, baa} | 4 4 42b {1, bba} | 5 5 42b {2, aaa} | 6 6 42b {2, aba} | 7 7 42b {2, aca} | 8 8 42b {2, baa} | 9 9 42b {2, bba} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 557b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, aba} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, aca} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, baa} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bba} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, aaa} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, aba} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, aca} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, baa} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bba} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 81b 2r} data 316b + FlatIndex{2} Label{3 rev 3, 107b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 41b {ccccccd} | 1 1 41b {ccccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 81 ErasedRowCount: 0} Label{13 rev 1, 109b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccccd} | PageId: 1 RowCount: 2 DataSize: 81 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 83b 2r} data 320b + FlatIndex{2} Label{3 rev 3, 109b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 43b {ccccccd} | 1 1 43b {ccccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 83 ErasedRowCount: 0} Label{13 rev 1, 109b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccccd} | PageId: 1 RowCount: 2 DataSize: 83 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 80b 2r} data 312b + FlatIndex{2} Label{3 rev 3, 105b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 40b {cccccd} | 1 1 40b {cccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 80 ErasedRowCount: 0} Label{13 rev 1, 108b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccccd} | PageId: 1 RowCount: 2 DataSize: 80 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 82b 2r} data 316b + FlatIndex{2} Label{3 rev 3, 107b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 42b {cccccd} | 1 1 42b {cccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 82 ErasedRowCount: 0} Label{13 rev 1, 108b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccccd} | PageId: 1 RowCount: 2 DataSize: 82 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 79b 2r} data 308b + FlatIndex{2} Label{3 rev 3, 103b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 39b {ccccd} | 1 1 39b {ccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 79 ErasedRowCount: 0} Label{13 rev 1, 107b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccd} | PageId: 1 RowCount: 2 DataSize: 79 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 81b 2r} data 312b + FlatIndex{2} Label{3 rev 3, 105b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 41b {ccccd} | 1 1 41b {ccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 81 ErasedRowCount: 0} Label{13 rev 1, 107b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccd} | PageId: 1 RowCount: 2 DataSize: 81 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 78b 2r} data 304b + FlatIndex{2} Label{3 rev 3, 101b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 38b {cccd} | 1 1 38b {cccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 78 ErasedRowCount: 0} Label{13 rev 1, 106b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccd} | PageId: 1 RowCount: 2 DataSize: 78 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 80b 2r} data 308b + FlatIndex{2} Label{3 rev 3, 103b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 40b {cccd} | 1 1 40b {cccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 80 ErasedRowCount: 0} Label{13 rev 1, 106b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccd} | PageId: 1 RowCount: 2 DataSize: 80 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 75b 2r} data 292b + FlatIndex{2} Label{3 rev 3, 95b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 35b {d} | 1 1 35b {d} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 75 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 75 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 77b 2r} data 296b + FlatIndex{2} Label{3 rev 3, 97b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 37b {d} | 1 1 37b {ddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 77 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 77 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 69b 2r} data 280b + FlatIndex{2} Label{3 rev 3, 89b} 3 rec | Page Row Bytes (String) | 0 0 34b {} | 1 1 35b {d} | 1 1 35b {d} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 69 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 34 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 69 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 71b 2r} data 284b + FlatIndex{2} Label{3 rev 3, 91b} 3 rec | Page Row Bytes (String) | 0 0 34b {} | 1 1 37b {d} | 1 1 37b {ddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 71 ErasedRowCount: 0} Label{13 rev 1, 10 ... owOp 1: {0, 8} {Set 2 Uint32 : 5}, {Set 3 Uint64 : 5}, {Set 4 String : xxxxxxxxxx_5} + Rows{3} Label{34 rev 1, 120b}, [6, +2)row | ERowOp 1: {0, 10} {Set 2 Uint32 : 6}, {Set 3 Uint64 : 6}, {Set 4 String : xxxxxxxxxx_6} | ERowOp 1: {1, 1} {Set 2 Uint32 : 7}, {Set 3 Uint64 : 7}, {Set 4 String : xxxxxxxxxx_7} + Rows{4} Label{44 rev 1, 120b}, [8, +2)row | ERowOp 1: {1, 3} {Set 2 Uint32 : 8}, {Set 3 Uint64 : 8}, {Set 4 String : xxxxxxxxxx_8} | ERowOp 1: {1, 4} {Set 2 Uint32 : 9}, {Set 3 Uint64 : 9}, {Set 4 String : xxxxxxxxxx_9} + Rows{5} Label{54 rev 1, 122b}, [10, +2)row | ERowOp 1: {1, 6} {Set 2 Uint32 : 10}, {Set 3 Uint64 : 10}, {Set 4 String : xxxxxxxxxx_10} | ERowOp 1: {1, 7} {Set 2 Uint32 : 11}, {Set 3 Uint64 : 11}, {Set 4 String : xxxxxxxxxx_11} + Rows{6} Label{64 rev 1, 122b}, [12, +2)row | ERowOp 1: {1, 8} {Set 2 Uint32 : 12}, {Set 3 Uint64 : 12}, {Set 4 String : xxxxxxxxxx_12} | ERowOp 1: {1, 10} {Set 2 Uint32 : 13}, {Set 3 Uint64 : 13}, {Set 4 String : xxxxxxxxxx_13} + Rows{7} Label{74 rev 1, 122b}, [14, +2)row | ERowOp 1: {2, 1} {Set 2 Uint32 : 14}, {Set 3 Uint64 : 14}, {Set 4 String : xxxxxxxxxx_14} | ERowOp 1: {2, 3} {Set 2 Uint32 : 15}, {Set 3 Uint64 : 15}, {Set 4 String : xxxxxxxxxx_15} + Rows{8} Label{84 rev 1, 122b}, [16, +2)row | ERowOp 1: {2, 4} {Set 2 Uint32 : 16}, {Set 3 Uint64 : 16}, {Set 4 String : xxxxxxxxxx_16} | ERowOp 1: {2, 6} {Set 2 Uint32 : 17}, {Set 3 Uint64 : 17}, {Set 4 String : xxxxxxxxxx_17} + Rows{9} Label{94 rev 1, 122b}, [18, +2)row | ERowOp 1: {2, 7} {Set 2 Uint32 : 18}, {Set 3 Uint64 : 18}, {Set 4 String : xxxxxxxxxx_18} | ERowOp 1: {2, 8} {Set 2 Uint32 : 19}, {Set 3 Uint64 : 19}, {Set 4 String : xxxxxxxxxx_19} + Rows{10} Label{104 rev 1, 122b}, [20, +2)row | ERowOp 1: {2, 10} {Set 2 Uint32 : 20}, {Set 3 Uint64 : 20}, {Set 4 String : xxxxxxxxxx_20} | ERowOp 1: {3, 1} {Set 2 Uint32 : 21}, {Set 3 Uint64 : 21}, {Set 4 String : xxxxxxxxxx_21} + Rows{11} Label{114 rev 1, 122b}, [22, +2)row | ERowOp 1: {3, 3} {Set 2 Uint32 : 22}, {Set 3 Uint64 : 22}, {Set 4 String : xxxxxxxxxx_22} | ERowOp 1: {3, 4} {Set 2 Uint32 : 23}, {Set 3 Uint64 : 23}, {Set 4 String : xxxxxxxxxx_23} + Rows{12} Label{124 rev 1, 122b}, [24, +2)row | ERowOp 1: {3, 6} {Set 2 Uint32 : 24}, {Set 3 Uint64 : 24}, {Set 4 String : xxxxxxxxxx_24} | ERowOp 1: {3, 7} {Set 2 Uint32 : 25}, {Set 3 Uint64 : 25}, {Set 4 String : xxxxxxxxxx_25} + Rows{13} Label{134 rev 1, 122b}, [26, +2)row | ERowOp 1: {3, 8} {Set 2 Uint32 : 26}, {Set 3 Uint64 : 26}, {Set 4 String : xxxxxxxxxx_26} | ERowOp 1: {3, 10} {Set 2 Uint32 : 27}, {Set 3 Uint64 : 27}, {Set 4 String : xxxxxxxxxx_27} + Rows{14} Label{144 rev 1, 122b}, [28, +2)row | ERowOp 1: {4, 1} {Set 2 Uint32 : 28}, {Set 3 Uint64 : 28}, {Set 4 String : xxxxxxxxxx_28} | ERowOp 1: {4, 3} {Set 2 Uint32 : 29}, {Set 3 Uint64 : 29}, {Set 4 String : xxxxxxxxxx_29} + Rows{15} Label{154 rev 1, 122b}, [30, +2)row | ERowOp 1: {4, 4} {Set 2 Uint32 : 30}, {Set 3 Uint64 : 30}, {Set 4 String : xxxxxxxxxx_30} | ERowOp 1: {4, 6} {Set 2 Uint32 : 31}, {Set 3 Uint64 : 31}, {Set 4 String : xxxxxxxxxx_31} + Rows{16} Label{164 rev 1, 122b}, [32, +2)row | ERowOp 1: {4, 7} {Set 2 Uint32 : 32}, {Set 3 Uint64 : 32}, {Set 4 String : xxxxxxxxxx_32} | ERowOp 1: {4, 8} {Set 2 Uint32 : 33}, {Set 3 Uint64 : 33}, {Set 4 String : xxxxxxxxxx_33} + Rows{17} Label{174 rev 1, 122b}, [34, +2)row | ERowOp 1: {4, 10} {Set 2 Uint32 : 34}, {Set 3 Uint64 : 34}, {Set 4 String : xxxxxxxxxx_34} | ERowOp 1: {5, 1} {Set 2 Uint32 : 35}, {Set 3 Uint64 : 35}, {Set 4 String : xxxxxxxxxx_35} + Rows{18} Label{184 rev 1, 122b}, [36, +2)row | ERowOp 1: {5, 3} {Set 2 Uint32 : 36}, {Set 3 Uint64 : 36}, {Set 4 String : xxxxxxxxxx_36} | ERowOp 1: {5, 4} {Set 2 Uint32 : 37}, {Set 3 Uint64 : 37}, {Set 4 String : xxxxxxxxxx_37} + Rows{19} Label{194 rev 1, 122b}, [38, +2)row | ERowOp 1: {5, 6} {Set 2 Uint32 : 38}, {Set 3 Uint64 : 38}, {Set 4 String : xxxxxxxxxx_38} | ERowOp 1: {5, 7} {Set 2 Uint32 : 39}, {Set 3 Uint64 : 39}, {Set 4 String : xxxxxxxxxx_39} Slices{ [0, 39] } Part{[1:2:3:0:0:0:0] eph 0, 2430b 40r} data 4441b + FlatIndex{26} Label{3 rev 3, 558b} 21 rec | Page Row Bytes (Uint32, Uint32) | 0 0 120b {0, 1} | 1 2 120b {0, 4} | 2 4 120b {0, 7} | 3 6 120b {0, 10} | 4 8 120b {1, 3} | 5 10 122b {1, 6} | 7 12 122b {1, 8} | 8 14 122b {2, NULL} | 9 16 122b {2, 4} | 11 18 122b {2, 7} | 12 20 122b {2, 10} | 13 22 122b {3, 3} | 15 24 122b {3, 6} | 16 26 122b {3, 8} | 17 28 122b {4, NULL} | 19 30 122b {4, 4} | 20 32 122b {4, 7} | 21 34 122b {4, 10} | 24 36 122b {5, 3} | 25 38 122b {5, 6} | 25 39 122b {5, 7} + BTreeIndex{PageId: 29 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 102b} | + BTreeIndex{PageId: 23 RowCount: 18 DataSize: 1088 ErasedRowCount: 0} Label{13 rev 1, 151b} | | + BTreeIndex{PageId: 6 RowCount: 6 DataSize: 360 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 0 RowCount: 2 DataSize: 120 ErasedRowCount: 0 | | | > {0, 4} | | | PageId: 1 RowCount: 4 DataSize: 240 ErasedRowCount: 0 | | | > {0, 7} | | | PageId: 2 RowCount: 6 DataSize: 360 ErasedRowCount: 0 | | > {0, 10} | | + BTreeIndex{PageId: 10 RowCount: 12 DataSize: 722 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 3 RowCount: 8 DataSize: 480 ErasedRowCount: 0 | | | > {1, 3} | | | PageId: 4 RowCount: 10 DataSize: 600 ErasedRowCount: 0 | | | > {1, 6} | | | PageId: 5 RowCount: 12 DataSize: 722 ErasedRowCount: 0 | | > {1, 8} | | + BTreeIndex{PageId: 14 RowCount: 18 DataSize: 1088 ErasedRowCount: 0} Label{13 rev 1, 147b} | | | PageId: 7 RowCount: 14 DataSize: 844 ErasedRowCount: 0 | | | > {2, NULL} | | | PageId: 8 RowCount: 16 DataSize: 966 ErasedRowCount: 0 | | | > {2, 4} | | | PageId: 9 RowCount: 18 DataSize: 1088 ErasedRowCount: 0 | > {2, 7} | + BTreeIndex{PageId: 28 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 151b} | | + BTreeIndex{PageId: 18 RowCount: 24 DataSize: 1454 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 11 RowCount: 20 DataSize: 1210 ErasedRowCount: 0 | | | > {2, 10} | | | PageId: 12 RowCount: 22 DataSize: 1332 ErasedRowCount: 0 | | | > {3, 3} | | | PageId: 13 RowCount: 24 DataSize: 1454 ErasedRowCount: 0 | | > {3, 6} | | + BTreeIndex{PageId: 22 RowCount: 30 DataSize: 1820 ErasedRowCount: 0} Label{13 rev 1, 147b} | | | PageId: 15 RowCount: 26 DataSize: 1576 ErasedRowCount: 0 | | | > {3, 8} | | | PageId: 16 RowCount: 28 DataSize: 1698 ErasedRowCount: 0 | | | > {4, NULL} | | | PageId: 17 RowCount: 30 DataSize: 1820 ErasedRowCount: 0 | | > {4, 4} | | + BTreeIndex{PageId: 27 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 249b} | | | PageId: 19 RowCount: 32 DataSize: 1942 ErasedRowCount: 0 | | | > {4, 7} | | | PageId: 20 RowCount: 34 DataSize: 2064 ErasedRowCount: 0 | | | > {4, 10} | | | PageId: 21 RowCount: 36 DataSize: 2186 ErasedRowCount: 0 | | | > {5, 3} | | | PageId: 24 RowCount: 38 DataSize: 2308 ErasedRowCount: 0 | | | > {5, 6} | | | PageId: 25 RowCount: 40 DataSize: 2430 ErasedRowCount: 0 + Rows{0} Label{04 rev 1, 120b}, [0, +2)row | ERowOp 1: {0, 1} {Set 2 Uint32 : 0}, {Set 3 Uint64 : 0}, {Set 4 String : xxxxxxxxxx_0} | ERowOp 1: {0, 3} {Set 2 Uint32 : 1}, {Set 3 Uint64 : 1}, {Set 4 String : xxxxxxxxxx_1} + Rows{1} Label{14 rev 1, 120b}, [2, +2)row | ERowOp 1: {0, 4} {Set 2 Uint32 : 2}, {Set 3 Uint64 : 2}, {Set 4 String : xxxxxxxxxx_2} | ERowOp 1: {0, 6} {Set 2 Uint32 : 3}, {Set 3 Uint64 : 3}, {Set 4 String : xxxxxxxxxx_3} + Rows{2} Label{24 rev 1, 120b}, [4, +2)row | ERowOp 1: {0, 7} {Set 2 Uint32 : 4}, {Set 3 Uint64 : 4}, {Set 4 String : xxxxxxxxxx_4} | ERowOp 1: {0, 8} {Set 2 Uint32 : 5}, {Set 3 Uint64 : 5}, {Set 4 String : xxxxxxxxxx_5} + Rows{3} Label{34 rev 1, 120b}, [6, +2)row | ERowOp 1: {0, 10} {Set 2 Uint32 : 6}, {Set 3 Uint64 : 6}, {Set 4 String : xxxxxxxxxx_6} | ERowOp 1: {1, 1} {Set 2 Uint32 : 7}, {Set 3 Uint64 : 7}, {Set 4 String : xxxxxxxxxx_7} + Rows{4} Label{44 rev 1, 120b}, [8, +2)row | ERowOp 1: {1, 3} {Set 2 Uint32 : 8}, {Set 3 Uint64 : 8}, {Set 4 String : xxxxxxxxxx_8} | ERowOp 1: {1, 4} {Set 2 Uint32 : 9}, {Set 3 Uint64 : 9}, {Set 4 String : xxxxxxxxxx_9} + Rows{5} Label{54 rev 1, 122b}, [10, +2)row | ERowOp 1: {1, 6} {Set 2 Uint32 : 10}, {Set 3 Uint64 : 10}, {Set 4 String : xxxxxxxxxx_10} | ERowOp 1: {1, 7} {Set 2 Uint32 : 11}, {Set 3 Uint64 : 11}, {Set 4 String : xxxxxxxxxx_11} + Rows{7} Label{74 rev 1, 122b}, [12, +2)row | ERowOp 1: {1, 8} {Set 2 Uint32 : 12}, {Set 3 Uint64 : 12}, {Set 4 String : xxxxxxxxxx_12} | ERowOp 1: {1, 10} {Set 2 Uint32 : 13}, {Set 3 Uint64 : 13}, {Set 4 String : xxxxxxxxxx_13} + Rows{8} Label{84 rev 1, 122b}, [14, +2)row | ERowOp 1: {2, 1} {Set 2 Uint32 : 14}, {Set 3 Uint64 : 14}, {Set 4 String : xxxxxxxxxx_14} | ERowOp 1: {2, 3} {Set 2 Uint32 : 15}, {Set 3 Uint64 : 15}, {Set 4 String : xxxxxxxxxx_15} + Rows{9} Label{94 rev 1, 122b}, [16, +2)row | ERowOp 1: {2, 4} {Set 2 Uint32 : 16}, {Set 3 Uint64 : 16}, {Set 4 String : xxxxxxxxxx_16} | ERowOp 1: {2, 6} {Set 2 Uint32 : 17}, {Set 3 Uint64 : 17}, {Set 4 String : xxxxxxxxxx_17} + Rows{11} Label{114 rev 1, 122b}, [18, +2)row | ERowOp 1: {2, 7} {Set 2 Uint32 : 18}, {Set 3 Uint64 : 18}, {Set 4 String : xxxxxxxxxx_18} | ERowOp 1: {2, 8} {Set 2 Uint32 : 19}, {Set 3 Uint64 : 19}, {Set 4 String : xxxxxxxxxx_19} + Rows{12} Label{124 rev 1, 122b}, [20, +2)row | ERowOp 1: {2, 10} {Set 2 Uint32 : 20}, {Set 3 Uint64 : 20}, {Set 4 String : xxxxxxxxxx_20} | ERowOp 1: {3, 1} {Set 2 Uint32 : 21}, {Set 3 Uint64 : 21}, {Set 4 String : xxxxxxxxxx_21} + Rows{13} Label{134 rev 1, 122b}, [22, +2)row | ERowOp 1: {3, 3} {Set 2 Uint32 : 22}, {Set 3 Uint64 : 22}, {Set 4 String : xxxxxxxxxx_22} | ERowOp 1: {3, 4} {Set 2 Uint32 : 23}, {Set 3 Uint64 : 23}, {Set 4 String : xxxxxxxxxx_23} + Rows{15} Label{154 rev 1, 122b}, [24, +2)row | ERowOp 1: {3, 6} {Set 2 Uint32 : 24}, {Set 3 Uint64 : 24}, {Set 4 String : xxxxxxxxxx_24} | ERowOp 1: {3, 7} {Set 2 Uint32 : 25}, {Set 3 Uint64 : 25}, {Set 4 String : xxxxxxxxxx_25} + Rows{16} Label{164 rev 1, 122b}, [26, +2)row | ERowOp 1: {3, 8} {Set 2 Uint32 : 26}, {Set 3 Uint64 : 26}, {Set 4 String : xxxxxxxxxx_26} | ERowOp 1: {3, 10} {Set 2 Uint32 : 27}, {Set 3 Uint64 : 27}, {Set 4 String : xxxxxxxxxx_27} + Rows{17} Label{174 rev 1, 122b}, [28, +2)row | ERowOp 1: {4, 1} {Set 2 Uint32 : 28}, {Set 3 Uint64 : 28}, {Set 4 String : xxxxxxxxxx_28} | ERowOp 1: {4, 3} {Set 2 Uint32 : 29}, {Set 3 Uint64 : 29}, {Set 4 String : xxxxxxxxxx_29} + Rows{19} Label{194 rev 1, 122b}, [30, +2)row | ERowOp 1: {4, 4} {Set 2 Uint32 : 30}, {Set 3 Uint64 : 30}, {Set 4 String : xxxxxxxxxx_30} | ERowOp 1: {4, 6} {Set 2 Uint32 : 31}, {Set 3 Uint64 : 31}, {Set 4 String : xxxxxxxxxx_31} + Rows{20} Label{204 rev 1, 122b}, [32, +2)row | ERowOp 1: {4, 7} {Set 2 Uint32 : 32}, {Set 3 Uint64 : 32}, {Set 4 String : xxxxxxxxxx_32} | ERowOp 1: {4, 8} {Set 2 Uint32 : 33}, {Set 3 Uint64 : 33}, {Set 4 String : xxxxxxxxxx_33} + Rows{21} Label{214 rev 1, 122b}, [34, +2)row | ERowOp 1: {4, 10} {Set 2 Uint32 : 34}, {Set 3 Uint64 : 34}, {Set 4 String : xxxxxxxxxx_34} | ERowOp 1: {5, 1} {Set 2 Uint32 : 35}, {Set 3 Uint64 : 35}, {Set 4 String : xxxxxxxxxx_35} + Rows{24} Label{244 rev 1, 122b}, [36, +2)row | ERowOp 1: {5, 3} {Set 2 Uint32 : 36}, {Set 3 Uint64 : 36}, {Set 4 String : xxxxxxxxxx_36} | ERowOp 1: {5, 4} {Set 2 Uint32 : 37}, {Set 3 Uint64 : 37}, {Set 4 String : xxxxxxxxxx_37} + Rows{25} Label{254 rev 1, 122b}, [38, +2)row | ERowOp 1: {5, 6} {Set 2 Uint32 : 38}, {Set 3 Uint64 : 38}, {Set 4 String : xxxxxxxxxx_38} | ERowOp 1: {5, 7} {Set 2 Uint32 : 39}, {Set 3 Uint64 : 39}, {Set 4 String : xxxxxxxxxx_39} Slices{ [0, 39] } >> KqpScheme::CreateResourcePool [GOOD] >> KqpScheme::CreateResourcePoolClassifier >> KqpScheme::SchemaVersionMismatchWithIndexRead [GOOD] >> KqpScheme::SchemaVersionMismatchWithIndexWrite >> KqpScheme::AlterTableReplaceIndex [GOOD] >> KqpScheme::AlterTableRenameVectorIndex >> TxUsage::Sinks_Oltp_WriteToTopic_5_Query >> KqpScheme::TouchIndexAfterMoveIndexWriteReplace [GOOD] >> KqpScheme::TouchIndexAfterMoveTableRead >> KqpScheme::CreateTableWithFamiliesRegular [GOOD] >> KqpScheme::CreateTableWithDefaultFamily >> KqpScheme::AlterTableAddExplicitSyncIndex >> KqpScheme::AlterDatabaseChangeSchemeLimits-EnableAlterDatabase [GOOD] >> KqpScheme::AlterGroup >> KqpOlapScheme::AlterCompressionType [GOOD] >> KqpOlapScheme::AlterCompressionLevel >> KqpOlapScheme::AddColumnLongPk [GOOD] >> KqpOlapScheme::AddColumn >> KqpScheme::CreateAndDropUser-StrictAclCheck [GOOD] >> KqpScheme::CreateAndDropGroup >> KqpScheme::CreateTableWithVectorIndexPublicApi [GOOD] >> KqpScheme::CreateTableWithVectorIndexCoveredPublicApi >> TConsoleTests::TestCreateTenantAlreadyExists [GOOD] >> TConsoleTests::TestCreateTenantAlreadyExistsExtSubdomain >> KqpOlapScheme::AddColumnSimpleReader [GOOD] >> KqpOlapScheme::AddColumnWithStore >> THealthCheckTest::ShardsLimit905 [GOOD] >> KqpScheme::DoubleCreateExternalDataSource [GOOD] >> THealthCheckTest::ShardsLimit800 >> KqpScheme::DoubleCreateExternalTable >> TConsoleTests::TestRemoveTenant [GOOD] >> TConsoleTests::TestRemoveTenantExtSubdomain >> KqpConstraints::AddSerialColumnForbidden >> KqpScheme::ChangefeedSchemaChanges-UseQueryService [GOOD] >> KqpScheme::ChangefeedTopicPartitions >> KqpScheme::AlterIndexImplTableUsingPublicAPI [GOOD] >> KqpScheme::AlterResourcePool >> KqpAcl::ReadSuccess >> KqpOlapScheme::AlterCompressionLevel [GOOD] >> KqpOlapScheme::AlterCompressionLevelError >> KqpScheme::SchemaVersionMismatchWithIndexWrite [GOOD] >> KqpScheme::TouchIndexAfterMoveIndexRead >> KqpScheme::AlterTableRenameVectorIndex [GOOD] >> KqpScheme::AlterTableWithDecimalColumn >> KqpScheme::TouchIndexAfterMoveTableRead [GOOD] >> KqpScheme::TouchIndexAfterMoveTableWrite >> KqpScheme::DropAsyncReplicationCascade [GOOD] >> KqpScheme::DoubleCreateResourcePool >> KqpScheme::AlterGroup [GOOD] >> KqpOlapScheme::InvalidColumnInTieringRule [GOOD] >> KqpOlapScheme::NullColumnError >> KqpScheme::CreateTableWithVectorIndexCoveredPublicApi [GOOD] >> KqpScheme::CreateTransfer >> KqpScheme::CreateTableWithDefaultFamily [GOOD] >> KqpScheme::CreateTableWithDecimalColumn >> TConsoleTests::TestScaleRecommenderPoliciesValidation [GOOD] >> KqpScheme::CreateAndDropGroup [GOOD] >> KqpScheme::CreateAsyncReplication >> TConsoleTxProcessorTests::TestTxProcessorSingle >> KqpScheme::AsyncReplicationEndpointAndDatabase [GOOD] >> KqpScheme::AlterTransfer >> KqpScheme::DoubleCreateExternalTable [GOOD] >> KqpScheme::DisableResourcePools >> KqpOlapScheme::AddColumn [GOOD] >> KqpOlapScheme::AddColumnOldSchemeBulkUpsert >> KqpOlapScheme::AlterCompressionLevelError [GOOD] >> KqpOlapScheme::CreateTableNonDefaultFamilyWithoutCompression ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AlterGroup [GOOD] Test command err: Trying to start YDB, gRPC: 26259, MsgBus: 7513 2025-06-30T09:21:07.373078Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669999617593739:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:07.373120Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004859/r3tmp/tmpY2olph/pdisk_1.dat TServer::EnableGrpc on GrpcPort 26259, node 1 2025-06-30T09:21:07.447040Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:07.460677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:07.460693Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:07.460695Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:07.460759Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:07.475310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:07.475342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:07.476395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7513 TClient is connected to server localhost:7513 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:07.531075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:07.544331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:07.567207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:07.598609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.612372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:07.811023Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669999617595302:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:07.811068Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:07.854784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.863101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.876881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.934756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.947795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.957447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.967822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.983673Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669999617595958:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:07.983706Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:07.983717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669999617595963:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:07.984605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:07.987806Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669999617595965:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:08.077346Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670003912563312:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:08.227602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004859/r3tmp/tmpQm7H05/pdisk_1.dat 2025-06-30T09:21:08.589726Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:08.602919Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15833, node 2 2025-06-30T09:21:08.615383Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.615400Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.615404Z no ... severity: 1 } 2025-06-30T09:21:12.302404Z node 8 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [8:7521670023159062785:2302], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:83: Error: At function: KiAlterDatabase!
:2:83: Error: ALTER DATABASE statement is not supported 2025-06-30T09:21:12.302852Z node 8 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=8&id=YjU1MTk4NGItZDViNjliYjQtODlmODRiNC1iODNlMzljOQ==, ActorId: [8:7521670023159062677:2291], ActorState: ExecuteState, TraceId: 01jz027y064nff30dfvf3fkrw8, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:21:12.320431Z node 8 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 9 2025-06-30T09:21:12.320553Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connected -> Disconnected Trying to start YDB, gRPC: 21088, MsgBus: 30247 2025-06-30T09:21:12.783417Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7521670021033245576:2195];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:12.785716Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004859/r3tmp/tmpltUIdv/pdisk_1.dat 2025-06-30T09:21:12.796613Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21088, node 10 2025-06-30T09:21:12.807605Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:12.807619Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:12.807623Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:12.807685Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30247 TClient is connected to server localhost:30247 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:12.885070Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:12.885101Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:12.885438Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:12.886096Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:21:12.889642Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:12.894062Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:12.951227Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:12.979157Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:13.046867Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:13.221734Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521670025328214280:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:13.221762Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:13.229059Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.241721Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.255459Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.268821Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.288489Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.300379Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.311987Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.332229Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521670025328214931:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:13.332262Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:13.332458Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521670025328214936:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:13.333484Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:13.336459Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7521670025328214938:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:13.411517Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7521670025328214989:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> KqpScheme::AlterTableAddExplicitSyncIndex [GOOD] >> KqpScheme::AlterTableAddExplicitAsyncIndex >> KqpOlapScheme::AddColumnWithStore [GOOD] >> KqpOlapScheme::AddPgColumnWithStore >> THealthCheckTest::ShardsLimit800 [GOOD] >> THealthCheckTest::ShardsNoLimit >> KqpScheme::ChangefeedTopicPartitions [GOOD] >> KqpScheme::ChangefeedTopicAutoPartitioning >> KqpOlapScheme::NullColumnError [GOOD] >> KqpScheme::AlterTableWithDecimalColumn [GOOD] >> KqpScheme::AlterTableWithPgColumn >> KqpOlapScheme::DropTtlColumn >> KqpConstraints::AddSerialColumnForbidden [GOOD] >> KqpConstraints::AlterTableAddColumnWithDefaultValue >> KqpScheme::AlterResourcePool [GOOD] >> KqpScheme::AlterNonExistingResourcePool >> KqpScheme::CreateResourcePoolClassifier [GOOD] >> KqpScheme::CreateResourcePoolClassifierOnServerless >> KqpAcl::ReadSuccess [GOOD] >> KqpAcl::FailedReadAccessDenied >> KqpScheme::TouchIndexAfterMoveIndexRead [GOOD] >> KqpScheme::ResourcePoolsValidation >> KqpScheme::DoubleCreateResourcePool [GOOD] >> KqpScheme::DoubleCreateResourcePoolClassifier+UseSink >> KqpOlapScheme::CreateTableNonDefaultFamilyWithoutCompression [GOOD] >> TConsoleTests::TestAlterBorrowedStorage [GOOD] >> TConsoleTests::TestAlterStorageUnitsOfSharedTenant >> KqpScheme::CreateTableWithTtlOnIntColumn [GOOD] >> KqpScheme::CreateTableWithTtlOnDatetime64Column >> KqpScheme::CreateTransfer [GOOD] >> KqpScheme::CreateTableWithDecimalColumn [GOOD] >> KqpOlapScheme::AddColumnOldSchemeBulkUpsert [GOOD] >> KqpOlapScheme::AddColumnErrors >> KqpScheme::CreateAsyncReplication [GOOD] >> KqpScheme::TouchIndexAfterMoveTableWrite [GOOD] >> KqpScheme::TwoSimilarFamiliesTest >> KqpScheme::DisableResourcePools [GOOD] >> KqpScheme::DisableResourcePoolsOnServerless >> KqpOlapScheme::DropTtlColumn [GOOD] >> KqpOlapScheme::InsertAddInsertDrop ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpOlapScheme::CreateTableNonDefaultFamilyWithoutCompression [GOOD] Test command err: Trying to start YDB, gRPC: 27739, MsgBus: 13948 2025-06-30T09:21:09.887125Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670010231077261:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:09.888498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00480b/r3tmp/tmpuoMy4v/pdisk_1.dat TServer::EnableGrpc on GrpcPort 27739, node 1 2025-06-30T09:21:09.987253Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:09.992323Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:09.992353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:09.993859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:10.006987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:10.007003Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:10.007006Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:10.007054Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13948 TClient is connected to server localhost:13948 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:10.083594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:10.091909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 CREATE TABLE `/Root/ColumnTableTest` (id Int32 NOT NULL, level Uuid, PRIMARY KEY (id)) PARTITION BY HASH(id) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:21:10.356475Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670014526045119:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:10.356508Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:10.411919Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670014526045139:2299] txid# 281474976715658, issues: { message: "Column \'level\': Type error: unsupported type Uuid" severity: 1 } Trying to start YDB, gRPC: 5394, MsgBus: 21551 2025-06-30T09:21:10.638131Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670013471226309:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:10.638287Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00480b/r3tmp/tmp6xCYGg/pdisk_1.dat 2025-06-30T09:21:10.653863Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5394, node 2 2025-06-30T09:21:10.671174Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:10.671187Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:10.671190Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:10.671244Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21551 TClient is connected to server localhost:21551 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:10.741281Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:10.741307Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:10.741684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:10.742197Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected CREATE TABLE `/Root/TableWithoutColumnFamily` (Key Uint64 NOT NULL, Value1 String, Value2 Uint32, PRIMARY KEY (Key), FAMILY default (DATA="test", COMPRESSION="off")) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:21:11.092007Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670017766194183:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.092042Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } CREATE TABLE `/Root/TableWithoutColumnFamily` (Key Uint64 NOT NULL, Value1 String FAMILY family1, Value2 Uint32, PRIMARY KEY (Key), FAMILY default (COMPRESSION="off"), FAMILY family1 (DATA="test", COMPRESSION="lz4")) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:21:11.098012Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670017766194205:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.098036Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } CREATE TABLE `/Root/TableWithoutColumnFamily` (Key Uint64 NOT NULL, Value1 String FAMILY family1, Value2 Uint32, PRIMARY KEY (Key), FAMILY default (COMPRESSION="off"), FAMILY family1 (COMPRESSION="lz4")) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:21:11.106148Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670017766194213:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.106174Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.110701Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:11.122077Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[2:7521670017766194261:2303];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:11.122124Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[2:7521670017766194261:2303];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:11.122174Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[2:7521670017766194261:2303];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:11.122201Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[2:7521670017766194261:2303];tablet_id=72075186224037888;process=TTxInitSchema: ... ine=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:14.263756Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670032260827776:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:14.263778Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670032260827776:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:14.269635Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:14.269655Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:14.269671Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:14.269677Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:14.269701Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:14.269708Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:14.269720Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:14.269727Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:14.269736Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:14.269742Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:14.269774Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:14.269780Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:14.269824Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:14.269834Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:14.269852Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:14.269863Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:14.269873Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:21:14.269882Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:21:14.269891Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:21:14.269960Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:21:14.269967Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:21:14.269983Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:21:14.269988Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:21:14.271205Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670032260827776:2297];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=55473716302688;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275274271;max=18446744073709551615;plan=0;src=[6:7521670027965860120:2141];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:14.276477Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:14.277684Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:14.288588Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670032260827846:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.288657Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.290291Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670032260827855:2354] txid# 281474976715659, issues: { message: "schema update error: family `default`: codec `lz4` is not support compression level. in alter constructor STANDALONE_UPDATE" severity: 1 } Trying to start YDB, gRPC: 27136, MsgBus: 28244 2025-06-30T09:21:14.640838Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521670028573709762:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:14.642649Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00480b/r3tmp/tmpw4ZXiF/pdisk_1.dat 2025-06-30T09:21:14.667511Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27136, node 7 2025-06-30T09:21:14.683047Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:14.683060Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:14.683062Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:14.683114Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28244 TClient is connected to server localhost:28244 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:14.748514Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:14.748554Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:14.749554Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:14.749612Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:14.751856Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:15.296366Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670032868677639:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:15.296394Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } >> TConsoleTests::TestCreateTenantAlreadyExistsExtSubdomain [GOOD] >> TConsoleTests::TestCreateSubSubDomain ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateTransfer [GOOD] Test command err: Trying to start YDB, gRPC: 17021, MsgBus: 23301 2025-06-30T09:21:08.246078Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670003185598609:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.246107Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004821/r3tmp/tmpeFNmdo/pdisk_1.dat 2025-06-30T09:21:08.313695Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17021, node 1 2025-06-30T09:21:08.335122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.335139Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.335140Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.335181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23301 TClient is connected to server localhost:23301 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:21:08.391244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:08.391272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:08.392208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:08.399075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:08.402793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:08.413688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.446017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.511200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.527922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.638823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670003185600198:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.638856Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.687993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.696896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.709327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.716475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.731324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.745178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.759230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.775360Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670003185600852:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.775398Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.775407Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670003185600857:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.776215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.778784Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670003185600859:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:08.870336Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670003185600910:3405] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 6281, MsgBus: 11505 2025-06-30T09:21:09.369712Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670010936232818:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:09.369738Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004821/r3tmp/tmpCBZrxw/pdisk_1.dat 2025-06-30T09:21:09.387490Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6281, node 2 2025-06-30T09:21:09.398717Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:09.398732Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty ... /runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:14.263629Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:14.276134Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:14.306858Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:14.353196Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:14.369029Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:14.609152Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670028546486221:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.609183Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.620049Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.635759Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.651457Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.668456Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.680641Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.695054Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.709321Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.728315Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670028546486874:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.728338Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.728503Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670028546486879:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.729438Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:14.732139Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670028546486881:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:14.820893Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670028546486932:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:15.135240Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:15.141999Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670032841454540:3584] txid# 281474976715672, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-30T09:21:15.151262Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.264006Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTransfer, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp:473) 2025-06-30T09:21:15.306263Z node 6 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:21:15.306901Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTransfer, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp:473) 2025-06-30T09:21:15.325935Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:15.333742Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTransfer, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp:473) 2025-06-30T09:21:15.333832Z node 6 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:21:15.339946Z node 6 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:21:15.340487Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:15.346392Z node 6 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:21:15.347800Z node 6 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:21:15.348143Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:15.360997Z node 6 :REPLICATION_CONTROLLER WARN: target_with_stream.cpp:27: [TableWorkerRegistar][rid 1][tid 1] Error of resolving topic '/Root/topic': NKikimr::NReplication::TEvYdbProxy::TEvDescribeTopicResponse { Result: { status: CLIENT_CANCELLED, issues: [ {
: Error: GRpc error: (1): Cancelled on the server side } {
: Error: Grpc error response on endpoint localhost:14806 } ] } }. Retry. ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateAsyncReplication [GOOD] Test command err: Trying to start YDB, gRPC: 8021, MsgBus: 7322 2025-06-30T09:21:08.117427Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670002386073110:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.117448Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004834/r3tmp/tmpZ4EeT8/pdisk_1.dat 2025-06-30T09:21:08.179478Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8021, node 1 2025-06-30T09:21:08.198601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.198616Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.198618Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.198667Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:08.218985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:08.219029Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:08.220056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7322 TClient is connected to server localhost:7322 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:08.280843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:08.284210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:08.289750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.368946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:08.397003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.410475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.535955Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670002386074670:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.535990Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.595076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.606567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.618805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.632968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.647952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.706195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.716735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.734071Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670002386075324:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.734097Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.734205Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670002386075329:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.735012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.739920Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670002386075331:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:08.797522Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670002386075385:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:08.946013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.966771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) Trying to start YDB, gRPC: 29386, MsgBus: 25923 2025-06-30T09:21:09.232472Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:752167000 ... sActor;event=undelivered;self_id=[6:7521670029674899690:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:14.401044Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004834/r3tmp/tmpFCmB4B/pdisk_1.dat 2025-06-30T09:21:14.444958Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32136, node 6 2025-06-30T09:21:14.462966Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:14.462984Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:14.462987Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:14.463063Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2276 TClient is connected to server localhost:2276 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:21:14.516853Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:14.516883Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:14.521365Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:14.521976Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:14.523840Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:14.530104Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:14.559909Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:14.636001Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:14.656072Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:14.899236Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670029674901213:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.899260Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.910266Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.937090Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.956198Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.975382Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.991521Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.011526Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.039949Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.083572Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670033969869162:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:15.083604Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:15.083781Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670033969869167:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:15.084823Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:15.088682Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:15.089592Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670033969869169:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:15.178083Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670033969869220:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:15.404789Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:15.486468Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.506691Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateReplication, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp:473) >> KqpScheme::ChangefeedTopicAutoPartitioning [GOOD] >> KqpScheme::CreateAlterDropTableStore ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateTableWithDecimalColumn [GOOD] Test command err: Trying to start YDB, gRPC: 63380, MsgBus: 11979 2025-06-30T09:21:07.746407Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670001421105615:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:07.746425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004849/r3tmp/tmpzJYTYv/pdisk_1.dat TServer::EnableGrpc on GrpcPort 63380, node 1 2025-06-30T09:21:07.800373Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:07.812901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:07.812918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:07.812921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:07.812976Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11979 2025-06-30T09:21:07.848042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:07.848080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:07.849179Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11979 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:07.884729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:07.908693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:07.974870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:07.995545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.009269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.239719Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670005716074469:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.239745Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.308895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.318989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.334027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.350268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.408184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.420274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.476628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.491882Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670005716075127:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.491911Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.492079Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670005716075132:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.492741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.499475Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670005716075134:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:08.595943Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670005716075185:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:08.748422Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:08.794073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 21913, MsgBus: 13222 2025-06-30T09:21:09.066959Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670007147866159:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:09.068927Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004849/r3tmp/tmp5KMLLI/pdisk_1.dat 2025-06-30T09:21:09.097992Z node 2 :IMP ... LASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:14.299108Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:14.299145Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:14.303345Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2329 TClient is connected to server localhost:2329 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:14.399169Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:14.403161Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:14.415578Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:14.444050Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.484776Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:14.517786Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:14.675736Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670032080928300:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.675765Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.684708Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.698366Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.709269Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.767862Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.780891Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.800276Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.819895Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.863631Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670032080928955:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.863697Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.863944Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670032080928963:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.864994Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:14.869029Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670032080928965:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:14.960390Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670032080929016:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:15.197355Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:15.213845Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.241577Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.266453Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.305389Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.344128Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpConstraints::AlterTableAddColumnWithDefaultValue [GOOD] >> KqpConstraints::DefaultValuesForTable >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo [GOOD] >> KqpScheme::CreateTableWithTtlOnDatetime64Column [GOOD] >> KqpScheme::CreateTableWithUniformPartitionsCompat >> KqpOlapScheme::AddPgColumnWithStore [GOOD] >> KqpOlapScheme::AddColumnWithoutColumnFamily >> KqpConstraints::SerialTypeBigSerial >> KqpOlapScheme::AddColumnErrors [GOOD] >> KqpOlapScheme::AddColumnFamily >> THealthCheckTest::ShardsNoLimit [GOOD] >> KqpAcl::FailedReadAccessDenied [GOOD] >> KqpAcl::WriteSuccess >> KqpScheme::AlterTableWithPgColumn [GOOD] >> KqpScheme::AlterTableAddExplicitAsyncIndex [GOOD] >> KqpScheme::AlterTableAddExplicitSyncVectorKMeansTreeIndex >> KqpScheme::TwoSimilarFamiliesTest [GOOD] >> KqpOlapScheme::InsertAddInsertDrop [GOOD] >> KqpOlapScheme::InsertDropAddColumn >> KqpScheme::AlterNonExistingResourcePool [GOOD] >> KqpScheme::AlterNonExistingResourcePoolClassifier >> KqpScheme::ResourcePoolsValidation [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorSingle [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorSubProcessor >> KqpScheme::CreateAlterDropTableStore [GOOD] >> KqpScheme::CreateAlterDropColumnTableInStore >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsOnly [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxTags >> KqpOlapScheme::AddColumnWithoutColumnFamily [GOOD] >> KqpOlapScheme::AddColumnWithColumnFamily ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo [GOOD] Test command err: 2025-06-30T09:21:03.018861Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669984774435289:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:03.018893Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001b80/r3tmp/tmpXSUhXO/pdisk_1.dat 2025-06-30T09:21:03.090952Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 30766, node 1 2025-06-30T09:21:03.096858Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:03.097293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:03.097302Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:03.097304Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:03.097342Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62069 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:21:03.119356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:03.119386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:03.121193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:03.161585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:03.177130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:62069 2025-06-30T09:21:03.188617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:03.268090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-30T09:21:03.272161Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-30T09:21:03.272167Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-30T09:21:03.272169Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-30T09:21:03.272171Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-30T09:21:03.875317Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669982975712782:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:03.875350Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001b80/r3tmp/tmpFqtzmz/pdisk_1.dat 2025-06-30T09:21:03.892667Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26441, node 4 2025-06-30T09:21:03.906530Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:03.906544Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:03.906545Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:03.906586Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20012 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:03.975948Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:03.975978Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:03.977564Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:03.980222Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:03.996089Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:20012 2025-06-30T09:21:04.011349Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:04.088259Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:04.102301Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-30T09:21:04.107111Z node 4 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037889 not found 2025-06-30T09:21:04.107131Z node 4 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037891 not found 2025-06-30T09:21:04.107134Z node 4 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037890 not found 2025-06-30T09:21:04.107252Z node 4 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037888 not found 2025-06-30T09:21:04.109543Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-30T09:21:04.109555Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-30T09:21:04.109558Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-30T09:21:04.109562Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-30T09:21:04.733714Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521669986757886124:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:04.733734Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001b80/r3tmp/tmpGH58Bo/pdisk_1.dat 2025-06-30T09:21:04.751089Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10607, node 7 2025-06-30T09:21:04.766390Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:04.766405Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:04.766407Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:04.766465Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17998 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:04.834109Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:04.834149Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:04.835910Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:04.838661Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:04.851796Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:17998 2025-06-30T09:21:04.862585Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:04.895846Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:04.906809Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:04.919218Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-30T09:21:04.922692Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-06-30T09:21:04.922702Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found 2025-06-30T09:21:04.922704Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2025-06-30T09:21:04.923070Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-06-30T09:21:04.929199Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-30T09:21:04.929230Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-30T09:21:04.929236Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-30T09:21:04.929241Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-30T09:21:05.624162Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7521669989793491238:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:05.624194Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001b80/r3tmp/tmp9QZpfQ/pdisk_1.dat 2025-06-30T09:21:05.646648Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25154, node 10 2025-06-30T09:21:05.661277Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:05.661295Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:05.661296Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:05.661344Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9335 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:05.724429Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:05.724461Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:05.726075Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:05.733673Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:05.747330Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:9335 2025-06-30T09:21:05.758308Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-30T09:21:06.625351Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:10.624588Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7521669989793491238:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:10.624652Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::TwoSimilarFamiliesTest [GOOD] Test command err: Trying to start YDB, gRPC: 5562, MsgBus: 25892 2025-06-30T09:21:08.325176Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670006613166596:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.325232Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004820/r3tmp/tmp4mwiXQ/pdisk_1.dat 2025-06-30T09:21:08.393895Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5562, node 1 2025-06-30T09:21:08.413044Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.413060Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.413062Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.413112Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:08.428068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:08.428104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:08.430211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25892 TClient is connected to server localhost:25892 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:08.483080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:08.485630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:08.488296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.559158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.602068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.663070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.788264Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670006613168157:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.788300Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.842343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.852690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.863695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.881338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.893299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.905881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.922012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.942493Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670006613168808:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.942529Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670006613168813:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.942529Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.943519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.946345Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670006613168815:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:09.022637Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670010908136162:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:09.201841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:09.215492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) waiting... 2025-06-30T09:21:09.327735Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvent ... node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.202181Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) Trying to start YDB, gRPC: 32754, MsgBus: 20505 2025-06-30T09:21:15.816603Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670034505704139:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:15.816633Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004820/r3tmp/tmpeNfnu1/pdisk_1.dat 2025-06-30T09:21:15.844885Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32754, node 6 2025-06-30T09:21:15.857478Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:15.857490Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:15.857492Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:15.857549Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20505 TClient is connected to server localhost:20505 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:15.921380Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:15.921416Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:15.921767Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:15.922621Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:21:15.923534Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:15.932559Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:15.951351Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:15.978468Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:15.993071Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:16.252484Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670038800672988:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:16.252516Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:16.263730Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.279031Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.339911Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.352675Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.365943Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.376427Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.436170Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.454798Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670038800673645:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:16.454829Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:16.454842Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670038800673650:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:16.455557Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:16.458234Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:16.458590Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670038800673652:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:16.532873Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670038800673703:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> KqpOlapScheme::AddColumnFamily [GOOD] >> KqpOlapScheme::AddColumnFamilyWithNotSupportedCodec ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/health_check/ut/unittest >> THealthCheckTest::ShardsNoLimit [GOOD] Test command err: 2025-06-30T09:21:09.394756Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:09.394830Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:09.394889Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:09.394930Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:09.394942Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:09.394968Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0010ef/r3tmp/tmpDUZD3I/pdisk_1.dat 2025-06-30T09:21:09.489684Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6550, node 1 TClient is connected to server localhost:28335 2025-06-30T09:21:09.605376Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:09.605401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:09.605406Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:09.605524Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration id: "RED-70fb-1231c6b1" status: RED message: "Database has multiple issues" location { database { name: "/Root" } } reason: "RED-4ff1-1231c6b1" reason: "RED-ebec-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 id: "RED-4ff1-1231c6b1" status: RED message: "Compute quota usage" location { database { name: "/Root" } } reason: "RED-3195-1231c6b1" type: "COMPUTE" level: 2 id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-1" reason: "YELLOW-e9e2-1231c6b1-2" type: "COMPUTE" level: 2 id: "YELLOW-e9e2-1231c6b1-1" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 1 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 id: "YELLOW-e9e2-1231c6b1-2" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 2 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 id: "RED-3195-1231c6b1" status: RED message: "Shards quota exhausted" location { database { name: "/Root" } } type: "COMPUTE_QUOTA" level: 3 id: "RED-ebec-1231c6b1" status: RED message: "Storage usage over 90%" location { database { name: "/Root" } } type: "STORAGE" level: 2 2025-06-30T09:21:10.917609Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:10.917833Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:10.917888Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:10.918404Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:10.918433Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:10.918480Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0010ef/r3tmp/tmpRdZNYI/pdisk_1.dat 2025-06-30T09:21:11.029719Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24550, node 3 TClient is connected to server localhost:3683 2025-06-30T09:21:11.167341Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:11.167364Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:11.167369Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:11.167506Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration id: "RED-70fb-1231c6b1" status: RED message: "Database has multiple issues" location { database { name: "/Root" } } reason: "ORANGE-4ff1-1231c6b1" reason: "RED-ebec-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 id: "ORANGE-4ff1-1231c6b1" status: ORANGE message: "Compute quota usage" location { database { name: "/Root" } } reason: "ORANGE-3f66-1231c6b1" type: "COMPUTE" level: 2 id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" type: "COMPUTE" level: 2 id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 id: "ORANGE-3f66-1231c6b1" status: ORANGE message: "Shards quota usage is over 99%" location { database { name: "/Root" } } type: "COMPUTE_QUOTA" level: 3 id: "RED-ebec-1231c6b1" status: RED message: "Storage usage over 90%" location { database { name: "/Root" } } type: "STORAGE" level: 2 2025-06-30T09:21:12.443444Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:12.443548Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:12.443594Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:12.443974Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:622:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:12.444040Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:12.444070Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0010ef/r3tmp/tmpi6sQRZ/pdisk_1.dat 2025-06-30T09:21:12.541971Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27622, node 5 TClient is connected to server localhost:7792 2025-06-30T09:21:12.675974Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:12.675995Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:12.676000Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:12.676189Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration id: "RED-70fb-1231c6b1" status: RED message: "Database has multiple issues" location { database { name: "/Root" } } reason: "RED-ebec-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" reason: "YELLOW-4ff1-1231c6b1" type: "DATABASE" level: 1 id: "YELLOW-4ff1-1231c6b1" status: YELLOW message: "Compute quota usage" location { database { name: "/Root" } } reason: "YELLOW-d159-1231c6b1" type: "COMPUTE" level: 2 id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 id: "YELLOW-d159-1231c6b1" status: YELLOW message: "Shards quota usage is over 90%" location { database { name: "/Root" } } type: "COMPUTE_QUOTA" level: 3 id: "RED-ebec-1231c6b1" status: RED message: "Storage usage over 90%" location { database { name: "/Root" } } type: "STORAGE" level: 2 2025-06-30T09:21:13.962108Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:631:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:13.962194Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:13.962228Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:13.962550Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:628:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:13.962581Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:13.962607Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0010ef/r3tmp/tmp0nHUW8/pdisk_1.dat 2025-06-30T09:21:14.074684Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17278, node 7 TClient is connected to server localhost:23846 2025-06-30T09:21:14.280127Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:14.280147Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:14.280152Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:14.280212Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration id: "YELLOW-70fb-1231c6b1" status: YELLOW message: "Database has multiple issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" reason: "YELLOW-1c83-1231c6b1" type: "DATABASE" level: 1 id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-7" reason: "YELLOW-e9e2-1231c6b1-8" type: "COMPUTE" level: 2 id: "YELLOW-e9e2-1231c6b1-7" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 7 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 id: "YELLOW-e9e2-1231c6b1-8" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 8 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 id: "YELLOW-1c83-1231c6b1" status: YELLOW message: "Storage usage over 75%" location { database { name: "/Root" } } type: "STORAGE" level: 2 2025-06-30T09:21:15.997671Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:295:2222], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:15.997841Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:15.997928Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:15.997968Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:631:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:15.998070Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:15.998109Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0010ef/r3tmp/tmptXhF7V/pdisk_1.dat 2025-06-30T09:21:16.111762Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8377, node 9 TClient is connected to server localhost:17517 2025-06-30T09:21:16.316618Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:16.316648Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:16.316654Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:16.316818Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-10" reason: "YELLOW-e9e2-1231c6b1-9" type: "COMPUTE" level: 2 id: "YELLOW-e9e2-1231c6b1-9" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 9 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 id: "YELLOW-e9e2-1231c6b1-10" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 10 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AlterTableWithPgColumn [GOOD] Test command err: Trying to start YDB, gRPC: 65137, MsgBus: 9675 2025-06-30T09:21:08.338609Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670004293857757:2180];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.338649Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00481b/r3tmp/tmp67N9s0/pdisk_1.dat TServer::EnableGrpc on GrpcPort 65137, node 1 2025-06-30T09:21:08.421108Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:08.429656Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.429677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.429680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.429723Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:08.438334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:08.438383Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:08.439293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9675 TClient is connected to server localhost:9675 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:08.491543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:08.495208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.514814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.548039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.615224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.808539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670004293859199:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.808578Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.870438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.878407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.892984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.908996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.925507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.937585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.948215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.960513Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670004293859853:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.960546Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.960553Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670004293859858:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.961351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.967959Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670004293859860:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:09.021627Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670008588827207:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:09.215251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 8027, MsgBus: 28132 2025-06-30T09:21:09.440610Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670009336261867:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:09.440631Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00481b/r3tmp/tmpmcPwF2/pdisk_1.dat 2025-06-30T09:21:09.456568Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8027, node 2 2025-06-30T09:21:09.466687Z node 2 :NET_CLASSIFIER WARN: ... , but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:15.564336Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:15.603798Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:15.796359Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670034158657003:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:15.796388Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:15.808360Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.867774Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.879381Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.892800Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.948708Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.959595Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.973738Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:15.987321Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670034158657662:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:15.987352Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:15.987397Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670034158657667:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:15.988448Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:15.993084Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670034158657669:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:16.045599Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670038453625016:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:16.262484Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.299437Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:16.306436Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:16.312223Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:16.328094Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:16.341626Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:16.364680Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:16.380125Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:16.396072Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:16.411178Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:16.430339Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:16.441569Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715682:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:16.455191Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) >> KqpScheme::BuildingUniqIndexDeniesTableModificationsPublicApi >> KqpOlapScheme::InsertDropAddColumn [GOOD] >> KqpOlapScheme::InitTtlSettingsOnShardStart >> KqpScheme::CreateTableWithUniformPartitionsCompat [GOOD] >> KqpScheme::CreateTableWithStoreExternalBlobs ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::ResourcePoolsValidation [GOOD] Test command err: Trying to start YDB, gRPC: 30036, MsgBus: 14019 2025-06-30T09:21:08.328077Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670003518030534:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.328179Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004828/r3tmp/tmpvV413Z/pdisk_1.dat 2025-06-30T09:21:08.380708Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:08.382075Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670003518030423:2080] 1751275268326367 != 1751275268326370 TServer::EnableGrpc on GrpcPort 30036, node 1 2025-06-30T09:21:08.398670Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.398683Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.398686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.398728Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14019 TClient is connected to server localhost:14019 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:21:08.462558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:08.462605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:08.463651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:08.468490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:08.471333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:21:08.477349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.545653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:08.575837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.594598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.797098Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670003518032027:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.797133Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.861476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.869870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.877963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.893013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.949152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.957069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.968645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.991341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670003518032682:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.991367Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.991394Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670003518032687:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.992245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.995818Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670003518032689:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:21:09.072968Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670007813000036:3402] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:09.268791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:09.329149Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 10555, MsgBus: 32169 2025-06-30T09:21:09.666542Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670007422001172:2069];send_to=[0:73071995366581 ... d__operation_finalize_build_index.cpp:383) waiting... Trying to start YDB, gRPC: 62543, MsgBus: 10289 2025-06-30T09:21:15.471968Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670036446772846:2242];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004828/r3tmp/tmphkwFnH/pdisk_1.dat 2025-06-30T09:21:15.475629Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; TServer::EnableGrpc on GrpcPort 62543, node 6 2025-06-30T09:21:15.512585Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:15.512886Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7521670036446772628:2080] 1751275275467443 != 1751275275467446 2025-06-30T09:21:15.519009Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:15.519021Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:15.519023Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:15.519075Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:15.580200Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:15.580230Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:15.587297Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10289 TClient is connected to server localhost:10289 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:15.703603Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:15.705610Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:15.718515Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:15.784000Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:15.850139Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:15.868151Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:15.977186Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670036446774245:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:15.977211Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:15.989376Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.046645Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.056812Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.068441Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.127781Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.149183Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.163897Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.203410Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670040741742197:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:16.203442Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:16.203633Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670040741742202:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:16.204738Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:16.210113Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670040741742204:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:16.315769Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670040741742255:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:16.474207Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:16.674875Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670040741742547:3577] txid# 281474976715672, issues: { message: "Invalid resource pool settings: Invalid resource pool configuration, concurrent_query_limit is 1001, that exceeds limit in 1000" severity: 1 } 2025-06-30T09:21:16.680519Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670040741742558:3585] txid# 281474976715673, issues: { message: "Invalid resource pool settings: Invalid resource pool configuration, queue_size unsupported without concurrent_query_limit or database_load_cpu_threshold" severity: 1 } >> KqpOlapScheme::CreateTableWithTtl >> KqpConstraints::DefaultValuesForTable [GOOD] >> KqpConstraints::DefaultValuesForTableNegative2 >> KqpScheme::AlterTableAddImplicitSyncIndex >> KqpConstraints::SerialTypeBigSerial [GOOD] >> KqpConstraints::DropCreateSerial >> KqpScheme::CreateAlterDropColumnTableInStore [GOOD] >> KqpScheme::CreateTableWithReadReplicasUncompat >> KqpScheme::DoubleCreateResourcePoolClassifier+UseSink [GOOD] >> KqpScheme::DoubleCreateResourcePoolClassifier-UseSink >> KqpOlapScheme::AddColumnWithColumnFamily [GOOD] >> KqpOlapScheme::AddExsitsColumnFamily >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestReboot [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCleanIndex >> KqpAcl::WriteSuccess [GOOD] >> KqpAcl::FailedWriteAccessDenied >> KqpOlapScheme::AddColumnFamilyWithNotSupportedCodec [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_5_Query [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateAlterDropColumnTableInStore [GOOD] Test command err: Trying to start YDB, gRPC: 8996, MsgBus: 2283 2025-06-30T09:21:10.827132Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670013930040332:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:10.827251Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004804/r3tmp/tmppprije/pdisk_1.dat 2025-06-30T09:21:10.882783Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8996, node 1 2025-06-30T09:21:10.906965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:10.906978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:10.906981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:10.907028Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:10.928273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:10.928302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:2283 2025-06-30T09:21:10.929402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2283 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:10.974270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:10.979168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:10.981855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:11.046632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:11.106457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:11.122662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:11.305882Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670018225009116:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.305907Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.366167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.378098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.391825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.405760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.420337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.435463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.449116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.473755Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670018225009770:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.473780Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.473958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670018225009775:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.475033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:11.478439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:11.478518Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670018225009777:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:11.545624Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670018225009828:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:11.747218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.828469Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:11.873480Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037922:1][1:7521670018225010306:2500] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:19:0] RequestType: ByTableId ... nks; 2025-06-30T09:21:17.745965Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[6:7521670044894024537:2306];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:17.745986Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[6:7521670044894024537:2306];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:17.746008Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[6:7521670044894024537:2306];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:17.746029Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[6:7521670044894024537:2306];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:17.746055Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[6:7521670044894024537:2306];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:17.746622Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:17.746634Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:17.746651Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:17.746656Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:17.746680Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:17.746685Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:17.746696Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:17.746702Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:17.746710Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:17.746715Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:17.752233Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:17.752254Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:17.752303Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:17.752321Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:17.752338Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:17.752346Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:17.752356Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:21:17.752387Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:21:17.752395Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:21:17.752515Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:21:17.752522Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:21:17.752542Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:21:17.752547Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:21:17.757728Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670044894024434:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:17.757762Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670044894024434:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:17.757862Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670044894024434:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:17.757886Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670044894024434:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:17.757916Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670044894024434:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:17.757940Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670044894024434:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:17.757963Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670044894024434:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:17.757986Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670044894024434:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:17.758007Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670044894024434:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:17.758026Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670044894024434:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:17.758058Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670044894024434:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:17.758078Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670044894024434:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:17.764022Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[6:7521670044894024427:2298];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:17.764046Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[6:7521670044894024427:2298];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:17.764108Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[6:7521670044894024427:2298];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:17.764132Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[6:7521670044894024427:2298];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:17.764162Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[6:7521670044894024427:2298];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:17.764202Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[6:7521670044894024427:2298];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:17.764223Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[6:7521670044894024427:2298];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; >> KqpOlapScheme::InitTtlSettingsOnShardStart [GOOD] |81.2%| [TA] $(B)/ydb/core/health_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpScheme::DisableExternalDataSourcesOnServerless >> KqpScheme::CreateDropTableMultipleTime >> KqpOlapScheme::CreateTableWithTtl [GOOD] >> KqpOlapScheme::CreateTableWithoutTtl >> KqpScheme::AlterTableAddExplicitSyncVectorKMeansTreeIndex [GOOD] >> KqpScheme::AlterSequence >> TNodeBrokerTest::Test1001NodesSubscribers [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query >> KqpScheme::AddColumnFamilyWithCompressionLevel >> KqpOlapScheme::AddExsitsColumnFamily [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpOlapScheme::InitTtlSettingsOnShardStart [GOOD] Test command err: Trying to start YDB, gRPC: 2905, MsgBus: 10380 2025-06-30T09:21:11.467269Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670017301486664:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:11.467468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047fb/r3tmp/tmp7DzT5Q/pdisk_1.dat 2025-06-30T09:21:11.541303Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2905, node 1 2025-06-30T09:21:11.560784Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:11.560797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:11.560800Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:11.560848Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10380 2025-06-30T09:21:11.615125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:11.615159Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:11.616139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10380 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:11.647618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:11.650598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 CREATE TABLE `/Root/ColumnTableTest` (id Int32 NOT NULL, id_second Int32 NOT NULL, level Int32, created_at Timestamp NOT NULL, PRIMARY KEY (created_at, id_second)) PARTITION BY HASH(created_at) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1, TTL = Interval("PT1H") ON created_at); 2025-06-30T09:21:11.935920Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670017301487183:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.935951Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.998445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:12.010574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670021596454542:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:12.010675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670021596454542:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:12.010853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670021596454542:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:12.010882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670021596454542:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:12.010901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670021596454542:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:12.010919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670021596454542:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:12.010940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670021596454542:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:12.010957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670021596454542:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:12.010983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670021596454542:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:12.011000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670021596454542:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:12.011018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670021596454542:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:12.011036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670021596454542:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:12.015181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:12.015198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:12.015213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:12.015220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:12.015245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:12.015251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:12.015264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:12.015270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:12.015279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:12.015286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:12.015320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:12.015327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:12.015349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:12.015355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:12.015369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:12.015376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:12.01 ... 2075186224037888;self_id=[6:7521670047899343698:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:18.418239Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:18.418271Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:18.418287Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:18.418294Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:18.418321Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:18.418328Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:18.418342Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:18.418350Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:18.418368Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:18.418377Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:18.418406Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:18.418414Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:18.418442Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:18.418452Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:18.418466Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:18.418472Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:18.418480Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:21:18.418487Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:21:18.418494Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:21:18.418569Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:21:18.418578Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:21:18.418594Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:21:18.418598Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:21:18.449860Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670047899343698:2297];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=89583256405728;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275278449;max=18446744073709551615;plan=0;src=[6:7521670043604376047:2143];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:18.452037Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:18.453153Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:18.466708Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:21:18.467848Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670047899343768:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.468036Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.473142Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:21:18.478429Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670047899343795:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.478476Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.480037Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:21:18.482922Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:21:18.489239Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670047899343829:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.489306Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.490825Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:21:18.494642Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-30T09:21:18.499466Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670047899343859:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.499524Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.501156Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:21:18.512630Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-30T09:21:18.519744Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670047899343698:2297];ev=NActors::TEvents::TEvPoison;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-30T09:21:18.537005Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670047899343899:2325];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=19; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpOlapScheme::AddColumnFamilyWithNotSupportedCodec [GOOD] Test command err: Trying to start YDB, gRPC: 9183, MsgBus: 26807 2025-06-30T09:21:10.775213Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670012271808674:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:10.775234Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047fe/r3tmp/tmpJC1muk/pdisk_1.dat 2025-06-30T09:21:10.837169Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9183, node 1 2025-06-30T09:21:10.855365Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:10.855376Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:10.855378Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:10.855410Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26807 2025-06-30T09:21:10.877163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:10.877192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:10.878219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26807 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:10.923285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:10.927550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:10.938649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:11.007989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:11.069954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:11.083840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:11.213614Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670016566777531:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.213644Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.263160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.271214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.280489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.295839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.307446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.321825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.335973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.351425Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670016566778183:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.351460Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.351523Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670016566778188:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:11.352585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:11.362263Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670016566778190:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:11.418613Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670016566778241:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:11.639249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:11.723375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710757:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:11.745424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMai ... ON="lz4"), FAMILY family2 (COMPRESSION="lz4")) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:21:18.089083Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670046872264528:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.089117Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.109406Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:18.136485Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:18.136541Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:18.136667Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:18.136699Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:18.136727Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:18.136763Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:18.136787Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:18.136813Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:18.136835Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:18.136860Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:18.136885Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:18.136908Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:18.140900Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:18.140931Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:18.140950Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:18.140957Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:18.140984Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:18.140991Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:18.141004Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:18.141011Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:18.141024Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:18.141033Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:18.141067Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:18.141074Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:18.141098Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:18.141107Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:18.141124Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:18.141134Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:18.141143Z node 7 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:21:18.141151Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:21:18.141158Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:21:18.141245Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:21:18.141251Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:21:18.141269Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:21:18.141274Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:21:18.144065Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670046872264571:2297];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=88361362504096;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275278143;max=18446744073709551615;plan=0;src=[7:7521670042577296921:2142];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:18.163057Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:18.164479Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:18.177323Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670046872264641:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.177500Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.202861Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670046872264647:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.203223Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } >> KqpScheme::AlterNonExistingResourcePoolClassifier [GOOD] >> KqpAcl::AclForOltpAndOlap+isOlap ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::Test1001NodesSubscribers [GOOD] Test command err: 2025-06-30T09:20:43.178694Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.186924Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.187008Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.187049Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.187099Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.187142Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.187172Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.192153Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.192248Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.192304Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.192356Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.192426Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.192466Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.192503Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.192600Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.192656Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.192698Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.194153Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.194219Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.194260Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.194295Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.194328Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.194419Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.195827Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.195985Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.196051Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.196109Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.196164Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.196194Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.196219Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.196243Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.197042Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.197183Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.197228Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.197265Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.197297Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.197353Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.197403Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:43.197495Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.197536Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.197852Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.198318Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.198449Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.198487Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.203450Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.204207Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.204237Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.204255Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.204897Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.205426Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.205560Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.205619Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.205668Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.205825Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.205924Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.206024Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.207006Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.207350Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:43.227840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:43.227872Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:43.232828Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:43.233239Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:43.233323Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:43.233488Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:43.233926Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:43.233966Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:43.234007Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:43.234018Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:43.234022Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:43.234032Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:43.234059Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:43.234062Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:43.234065Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:43.234069Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:43.234080Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-30T09:20:43.234085Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-30T09:20:43.266300Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-30T09:20:43.266339Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.023000Z 2025-06-30T09:20:43.266349Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z, approximate epoch start #1.1 nodes=0 expired=0 2025-06-30T09:20:43.266358Z ... 025-06-30T09:21:17.692470Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:17.709576Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:563:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.709611Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.709628Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5585:7169] 2025-06-30T09:21:17.719751Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9695:11226], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:17.730888Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:17.730919Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:17.730940Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:17.744892Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:563:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.744925Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.744941Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5590:7174] 2025-06-30T09:21:17.749259Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9697:11228], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:17.749390Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:17.749399Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:17.749418Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:17.781582Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:563:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.781607Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.781621Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5595:7179] 2025-06-30T09:21:17.785077Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9699:11230], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:17.785140Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:17.785146Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:17.785158Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:17.801703Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:563:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.801733Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.801751Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5600:7184] 2025-06-30T09:21:17.807428Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9701:11232], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:17.807572Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:17.807583Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:17.807602Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:17.821722Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:563:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.821753Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.821770Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5605:7189] 2025-06-30T09:21:17.826235Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9703:11234], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:17.826298Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:17.826333Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:17.826350Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:17.847056Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:563:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.847090Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.847106Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5610:7194] 2025-06-30T09:21:17.867588Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9705:11236], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:17.867685Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:17.867695Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:17.867714Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:17.891378Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:563:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.891410Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.891427Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5615:7199] 2025-06-30T09:21:17.904418Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9707:11238], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:17.904507Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:17.904515Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:17.904530Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:17.927062Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:563:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.927094Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.927112Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5620:7204] 2025-06-30T09:21:17.931560Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9709:11240], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:17.931641Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:17.931650Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:17.931666Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:17.947072Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:563:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.947106Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.947123Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5625:7209] 2025-06-30T09:21:17.951599Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9711:11242], Recipient [1:563:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:17.951694Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2213], Recipient [1:563:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:17.951702Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:17.951715Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:17.968611Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:563:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.968634Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:17.968649Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5630:7214] >> KqpConstraints::DropCreateSerial [GOOD] >> KqpConstraints::DefaultsAndDeleteAndUpdate >> KqpOlapScheme::CreateTableWithoutTtl [GOOD] >> KqpOlapScheme::CreateWithDefaultColumnFamily >> KqpConstraints::DefaultValuesForTableNegative2 [GOOD] >> KqpConstraints::DefaultValuesForTableNegative3 >> KqpScheme::CreateTableWithStoreExternalBlobs [GOOD] >> KqpScheme::DisableResourcePoolsOnServerless [GOOD] >> KqpScheme::DisableResourcePoolClassifiersOnServerless >> KqpScheme::CreateTableWithReadReplicasUncompat [GOOD] >> KqpScheme::CreateTableWithReadReplicasCompat >> KqpScheme::CreateDropTableViaApiMultipleTime [GOOD] >> KqpScheme::CreateExternalDataSource >> TConsoleTxProcessorTests::TestTxProcessorSubProcessor [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorTemporary >> KqpScheme::CreateResourcePoolClassifierOnServerless [GOOD] >> TConsoleTests::TestRemoveTenantExtSubdomain [GOOD] >> TConsoleTests::TestRemoveSharedTenantWoServerlessTenants >> TConsoleTests::TestAlterStorageUnitsOfSharedTenant [GOOD] >> TConsoleTests::TestAlterServerlessTenant >> KqpScheme::CreateAndAlterTableWithPartitionBy ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AlterNonExistingResourcePoolClassifier [GOOD] Test command err: Trying to start YDB, gRPC: 28642, MsgBus: 20334 2025-06-30T09:21:08.318293Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670003672485644:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.318572Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004818/r3tmp/tmpJ5HKBx/pdisk_1.dat 2025-06-30T09:21:08.397750Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:08.403656Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 28642, node 1 2025-06-30T09:21:08.418966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.418982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.418984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.419027Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:08.422727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:08.422776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:08.423795Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20334 TClient is connected to server localhost:20334 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:08.491791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:08.507380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.539202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.569160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.589194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.802986Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670003672487207:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.803027Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.853942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.862203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.870310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.884712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.899201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.913305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.927776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.944054Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670003672487859:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.944088Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.944112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670003672487864:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.944924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.953721Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670003672487866:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:09.032948Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670007967455213:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:09.207983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:09.216052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:09.232303Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7521670007967455692:3709], for# user@builtin, access# DescribeSchema 2025-06-30T09:21:09.232320Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Acces ... > Connecting 2025-06-30T09:21:17.148157Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6605 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:17.166155Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:21:17.170281Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.192247Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:17.236107Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:17.264112Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:17.562858Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670044202675201:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:17.562884Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:17.572987Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.589678Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.603238Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.625668Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.659850Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.679215Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.699363Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.728416Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670044202675855:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:17.728460Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:17.728741Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670044202675860:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:17.729880Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:17.733848Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:17.733970Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670044202675862:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:17.828219Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670044202675913:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:18.059975Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:18.236335Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.493035Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:18.662075Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.830384Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.916920Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:19.021436Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) >> KqpScheme::AlterTableAddImplicitSyncIndex [GOOD] >> KqpScheme::AlterTableAlterIndex+UseQueryService >> KqpScheme::AlterSequence [GOOD] >> KqpScheme::AlterSequenceRestartWith >> KqpAcl::FailNavigate >> KqpOlapScheme::CreateWithDefaultColumnFamily [GOOD] >> KqpOlapScheme::CreateWithColumnFamily ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpOlapScheme::AddExsitsColumnFamily [GOOD] Test command err: Trying to start YDB, gRPC: 16189, MsgBus: 17671 2025-06-30T09:21:08.518208Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670003455346777:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.518957Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004816/r3tmp/tmp77TIIn/pdisk_1.dat TServer::EnableGrpc on GrpcPort 16189, node 1 2025-06-30T09:21:08.586144Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:08.596861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.596878Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.596882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.596931Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:08.617206Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:08.617244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:08.618480Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17671 TClient is connected to server localhost:17671 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:08.678733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:08.681385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 CREATE TABLE `/Root/ColumnTableTest` (id Int32 NOT NULL, id_second Int32 NOT NULL, level Int32, created_at Timestamp NOT NULL, PRIMARY KEY (id, id_second)) PARTITION BY HASH(id) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:21:08.903413Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670003455347260:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.903447Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.950092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:08.960747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670003455347323:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:08.960830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670003455347323:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:08.960896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670003455347323:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:08.960924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670003455347323:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:08.960944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670003455347323:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:08.960969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670003455347323:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:08.960994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670003455347323:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:08.961019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670003455347323:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:08.961051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670003455347323:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:08.961074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670003455347323:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:08.961100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670003455347323:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:08.961125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670003455347323:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:08.961652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:08.961667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:08.961681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:08.961686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:08.961706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:08.961714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:08.961727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:08.961736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:08.961743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:08.961750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:08.961783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:08.961794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:08.961826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:08.961832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:08.961845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:08.961849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:08.961857Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tabl ... ON="lz4"), FAMILY family2 (COMPRESSION="lz4")) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:21:19.081609Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670051543493936:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.081689Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.083864Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:19.096023Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:19.096079Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:19.096149Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:19.096176Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:19.096207Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:19.096230Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:19.096254Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:19.096279Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:19.096301Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:19.096326Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:19.096350Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:19.096375Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:19.102594Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:19.102617Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:19.102633Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:19.102640Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:19.102668Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:19.102675Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:19.102687Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:19.102695Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:19.102703Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:19.102710Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:19.102757Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:19.102764Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:19.102787Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:19.102796Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:19.102810Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:19.102818Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:19.102826Z node 7 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:21:19.102833Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:21:19.102839Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:21:19.102925Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:21:19.102932Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:21:19.102949Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:21:19.102954Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:21:19.139318Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521670051543493990:2297];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976710658;this=55863494456960;method=TTxController::StartProposeOnExecute;tx_info=281474976710658:TX_KIND_SCHEMA;min=1751275279139;max=18446744073709551615;plan=0;src=[7:7521670047248526337:2142];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:19.142700Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:19.144081Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-30T09:21:19.148721Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670051543494059:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.148742Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.162826Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670051543494065:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.162932Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateTableWithStoreExternalBlobs [GOOD] Test command err: Trying to start YDB, gRPC: 5275, MsgBus: 16481 2025-06-30T09:21:07.635668Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669999851119004:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:07.635702Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004853/r3tmp/tmpVDnqDE/pdisk_1.dat TServer::EnableGrpc on GrpcPort 5275, node 1 2025-06-30T09:21:07.698792Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:07.702311Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:07.702327Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:07.702329Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:07.702371Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16481 2025-06-30T09:21:07.738099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:07.738128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:07.739243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16481 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:07.767787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:07.776053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:07.797225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:07.820974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:07.833626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.062639Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670004146087881:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.062680Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.128733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.138344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.150097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.163806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.180284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.193916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.206249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.223315Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670004146088536:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.223362Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.223491Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670004146088541:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.224477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.233321Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670004146088543:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:21:08.292436Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670004146088594:3401] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:08.494544Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670004146088885:3578] txid# 281474976710672, issues: { message: "Cannot enable TTL on unknown column: \'CreatedAt\'" severity: 1 } 2025-06-30T09:21:08.505578Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670004146088898:3585] txid# 281474976710673, issues: { message: "Unsupported column type" severity: 1 } 2025-06-30T09:21:08.515642Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670004146088911:3592] txid# 281474976710674, issues: { message: "To enable TTL on integral type column \'ValueSinceUnixEpochModeSettings\' should be specified" severity: 1 } 2025-06-30T09:21:08.523839Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670004146088924:3599] txid# 281474976710675, issues: { message: "To enable TTL on integral type column \'ValueSinceUnixEpochModeSettings\' should be specified" severity: 1 } 2025-06-30T09:21:08.536686Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670004146088937:3606] txid# 281474976710676, issues: { message: "To enable TTL on integral type column \'ValueSinceUnixEpochModeSettings\' should be specified" severity: 1 } 2025-06-30T09:21:08.544797Z node 1 :FLAT_TX_S ... is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 19386, MsgBus: 61157 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004853/r3tmp/tmpzNk9ny/pdisk_1.dat 2025-06-30T09:21:17.820127Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:17.837954Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7521670042178840474:2080] 1751275277786979 != 1751275277786982 2025-06-30T09:21:17.838679Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19386, node 9 2025-06-30T09:21:17.891068Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:17.891108Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:17.891584Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:17.891586Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:17.891589Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:17.891640Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:17.899182Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61157 TClient is connected to server localhost:61157 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:18.154409Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:18.163297Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:18.183801Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:18.241022Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:18.330362Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:18.401111Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:18.785402Z node 9 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:18.844526Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7521670046473809382:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.844560Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.863524Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.904235Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.923182Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.950048Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.980686Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.997097Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.033349Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.119045Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7521670050768777333:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.119089Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.119321Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7521670050768777338:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.120479Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:19.125365Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:19.125769Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [9:7521670050768777340:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:19.187879Z node 9 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [9:7521670050768777391:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:19.519513Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpScheme::AddColumnFamilyWithCompressionLevel [GOOD] >> KqpScheme::AddDropColumn ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateResourcePoolClassifierOnServerless [GOOD] Test command err: Trying to start YDB, gRPC: 61838, MsgBus: 24525 2025-06-30T09:21:08.132271Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670006520961820:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.132380Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00482e/r3tmp/tmpglI0US/pdisk_1.dat 2025-06-30T09:21:08.188622Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61838, node 1 2025-06-30T09:21:08.205906Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.205919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.205921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.205969Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24525 2025-06-30T09:21:08.231580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:08.231602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:08.232721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24525 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:08.278192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:08.291076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.313296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.337005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:08.357194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.560069Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670006520963296:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.560089Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.630851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.640240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.700433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.710356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.724767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.738205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.752492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.768123Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670006520963950:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.768155Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.768218Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670006520963955:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.769166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.771856Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670006520963957:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:08.851781Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670006520964008:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:09.011054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient::Ls request: /Root/TableWithCompactionPolicy TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableWithCompactionPolicy" PathId: 17 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715672 CreateStep: 1751275269065 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableWithCompactionPolicy" Co ... 0T09:21:19.383525Z node 7 :KQP_SESSION TRACE: kqp_session_actor.cpp:856: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: ExecuteState, TraceId: 01jz0284wg3dfgvyztwxtdhnwj, read snapshot result: UNAVAILABLE, step: 0, tx id: 0 2025-06-30T09:21:19.383532Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: ExecuteState, TraceId: 01jz0284wg3dfgvyztwxtdhnwj, Create QueryResponse for error on request, msg: 2025-06-30T09:21:19.383575Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2056: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: ExecuteState, TraceId: 01jz0284wg3dfgvyztwxtdhnwj, txInfo Status: Aborted Kind: ReadOnly TotalDuration: 0 ServerDuration: 91.915 QueriesCount: 2 2025-06-30T09:21:19.383604Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1546: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: ExecuteState, TraceId: 01jz0284wg3dfgvyztwxtdhnwj, Sending to Executer TraceId: 0 8 2025-06-30T09:21:19.383626Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1604: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: ExecuteState, TraceId: 01jz0284wg3dfgvyztwxtdhnwj, Created new KQP executer: [7:7521670051358452181:2588] isRollback: 1 2025-06-30T09:21:19.383635Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: ExecuteState, TraceId: 01jz0284wg3dfgvyztwxtdhnwj, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 1 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:21:19.384258Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: CleanupState, TraceId: 01jz0284wg3dfgvyztwxtdhnwj, EndCleanup, isFinal: 0 2025-06-30T09:21:19.384291Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2368: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: CleanupState, TraceId: 01jz0284wg3dfgvyztwxtdhnwj, Sent query response back to proxy, proxyRequestId: 33, proxyId: [7:7521670038473547925:2080] 2025-06-30T09:21:19.385466Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Database coordinators are unavailable" severity: 1 } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { id: "01jz0284xv3x5282580t9p136z" } } } } ; 2025-06-30T09:21:19.385559Z node 7 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Database coordinators are unavailable 2025-06-30T09:21:19.385596Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2413: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: ReadyState, Session closed due to explicit close event 2025-06-30T09:21:19.385603Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:21:19.385607Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-30T09:21:19.385610Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2644: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: unknown state, Cleanup temp tables: 0 2025-06-30T09:21:19.385627Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2735: SessionId: ydb://session/3?node_id=7&id=ZTViYjM0NGYtYzgyYzBkMmYtMWY1NDgyZmQtZDViYzVlNGM=, ActorId: [7:7521670051358452135:2588], ActorState: unknown state, Session actor destroyed 2025-06-30T09:21:19.423861Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM= 2025-06-30T09:21:19.423991Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: unknown state, session actor bootstrapped 2025-06-30T09:21:19.424079Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ReadyState, TraceId: 01jz028520ch173yrw955dn66c, received request, proxyRequestId: 35 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: SELECT * FROM `//Root/test-shared/.metadata/initialization/migrations`; rpcActor: [7:7521670051358452194:2597] database: /Root/test-shared databaseId: /Root/test-shared pool id: default 2025-06-30T09:21:19.424083Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ReadyState, TraceId: 01jz028520ch173yrw955dn66c, request placed into pool from cache: default 2025-06-30T09:21:19.424104Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:618: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ExecuteState, TraceId: 01jz028520ch173yrw955dn66c, Sending CompileQuery request 2025-06-30T09:21:19.439707Z node 7 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [7:7521670038473547919:2075], processor id# 72075186224038891, database# /Root/test-shared 2025-06-30T09:21:19.464926Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:818: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ExecuteState, TraceId: 01jz028520ch173yrw955dn66c, acquire mvcc snapshot 2025-06-30T09:21:19.536473Z node 7 :KQP_RESOURCE_MANAGER ERROR: kqp_snapshot_manager.cpp:129: KqpSnapshotManager: CreateSnapshot got unexpected status=UNAVAILABLE, issues:
: Error: Database coordinators are unavailable 2025-06-30T09:21:19.536525Z node 7 :KQP_SESSION TRACE: kqp_session_actor.cpp:856: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ExecuteState, TraceId: 01jz028520ch173yrw955dn66c, read snapshot result: UNAVAILABLE, step: 0, tx id: 0 2025-06-30T09:21:19.536533Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ExecuteState, TraceId: 01jz028520ch173yrw955dn66c, Create QueryResponse for error on request, msg: 2025-06-30T09:21:19.536608Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2056: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ExecuteState, TraceId: 01jz028520ch173yrw955dn66c, txInfo Status: Aborted Kind: ReadOnly TotalDuration: 0 ServerDuration: 71.675 QueriesCount: 2 2025-06-30T09:21:19.536655Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1546: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ExecuteState, TraceId: 01jz028520ch173yrw955dn66c, Sending to Executer TraceId: 0 8 2025-06-30T09:21:19.536687Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1604: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ExecuteState, TraceId: 01jz028520ch173yrw955dn66c, Created new KQP executer: [7:7521670051358452224:2596] isRollback: 1 2025-06-30T09:21:19.536707Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ExecuteState, TraceId: 01jz028520ch173yrw955dn66c, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 1 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:21:19.537399Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: CleanupState, TraceId: 01jz028520ch173yrw955dn66c, EndCleanup, isFinal: 0 2025-06-30T09:21:19.537443Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2368: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: CleanupState, TraceId: 01jz028520ch173yrw955dn66c, Sent query response back to proxy, proxyRequestId: 35, proxyId: [7:7521670038473547925:2080] 2025-06-30T09:21:19.537882Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Database coordinators are unavailable" severity: 1 } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { id: "01jz028538764vkg34yrvxc66v" } } } } ; 2025-06-30T09:21:19.538003Z node 7 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Database coordinators are unavailable 2025-06-30T09:21:19.538017Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2413: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ReadyState, Session closed due to explicit close event 2025-06-30T09:21:19.538024Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:21:19.538027Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-30T09:21:19.538030Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2644: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: unknown state, Cleanup temp tables: 0 2025-06-30T09:21:19.538048Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2735: SessionId: ydb://session/3?node_id=7&id=YmNkNjQxNmUtZWYzMTAwN2UtMmU3ZGQ5ZjUtNmUxMzFiMGM=, ActorId: [7:7521670051358452193:2596], ActorState: unknown state, Session actor destroyed >> KqpScheme::DropKeyColumn >> KqpScheme::MoveTableWithSerialTypes >> KqpScheme::DoubleCreateResourcePoolClassifier-UseSink [GOOD] |81.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |81.2%| [TA] {RESULT} $(B)/ydb/core/health_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpAcl::FailedWriteAccessDenied [GOOD] >> KqpAcl::RecursiveCreateTableShouldSuccess >> TConsoleTests::TestCreateSubSubDomain [GOOD] >> TConsoleTests::TestCreateSubSubDomainExtSubdomain |81.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots >> KqpOlapScheme::CreateWithColumnFamily [GOOD] >> KqpOlapScheme::CreateTableStoreWithFamily >> KqpScheme::QueryWithAlter >> KqpScheme::CreateTableWithReadReplicasCompat [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysSimpleUncompat |81.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |81.2%| [LD] {RESULT} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer >> KqpConstraints::DefaultValuesForTableNegative3 [GOOD] >> KqpConstraints::DefaultAndIndexesTestDefaultColumnNotIncludedInIndex >> KqpConstraints::DefaultsAndDeleteAndUpdate [GOOD] >> KqpConstraints::DefaultValuesForTableNegative4 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::DoubleCreateResourcePoolClassifier-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 63751, MsgBus: 26232 2025-06-30T09:21:08.321585Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670002418727745:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.321849Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004823/r3tmp/tmpNEOUio/pdisk_1.dat 2025-06-30T09:21:08.390928Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63751, node 1 2025-06-30T09:21:08.416322Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.416336Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.416337Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.416380Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26232 2025-06-30T09:21:08.464395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:08.464426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:08.465586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26232 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:08.489638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:08.492624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:08.494815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.514312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.551843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.572926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.792063Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670002418729301:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.792098Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.836693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.846597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.856737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.870405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.885358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.899209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.912732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.931398Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670002418729953:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.931441Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.931500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670002418729958:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.932264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.939855Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670002418729960:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:09.040536Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670006713697307:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:09.221251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:09.239303Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670006713697664:3624] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/table/feed\', error: path hasn\'t been resolved, nearest resolved path: \'/Root/table\' (id: [OwnerId: 72057594046644480, LocalPathId: 17]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_drop_cdc_stream.cpp:477" issue_code: 200200 severity: 1 } Trying to start YDB, gRPC: 7251, MsgBus: 9504 2025-06-30T09:21:09.461344Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=und ... 30T09:21:18.820300Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:19.162188Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670050708443582:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.162218Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.174200Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.203761Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.231816Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.252077Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.275581Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.301172Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.321480Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.356216Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670050708444235:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.356242Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.356418Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670050708444240:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.357493Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:19.365149Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670050708444242:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:19.375737Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:19.447256Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670050708444302:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:19.690222Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.798867Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:19.890458Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.984182Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.085297Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:20.150911Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:20.680865Z node 6 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [6:7521670055003412642:2706], TxId: 281474976715706, task: 1. Ctx: { TraceId : 01jz02867q1j38a57qy7d6v666. SessionId : ydb://session/3?node_id=6&id=NzQ1NzgxNTAtZGZlYThmODUtY2I3YmRhMDItMjFjNmM3N2M=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-30T09:21:20.681008Z node 6 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [6:7521670055003412643:2707], TxId: 281474976715706, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=6&id=NzQ1NzgxNTAtZGZlYThmODUtY2I3YmRhMDItMjFjNmM3N2M=. TraceId : 01jz02867q1j38a57qy7d6v666. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [6:7521670055003412639:2681], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-30T09:21:20.681074Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=6&id=NzQ1NzgxNTAtZGZlYThmODUtY2I3YmRhMDItMjFjNmM3N2M=, ActorId: [6:7521670055003412562:2681], ActorState: ExecuteState, TraceId: 01jz02867q1j38a57qy7d6v666, Create QueryResponse for error on request, msg: 2025-06-30T09:21:20.682159Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor.h:64;event=unexpected reply;error_message=operation { ready: true status: PRECONDITION_FAILED issues { message: "Conflict with existing key." issue_code: 2012 severity: 1 } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { id: "01jz02867d74sy6aw1zprwmzb5" } } } } ;request=session_id: "ydb://session/3?node_id=6&id=NzQ1NzgxNTAtZGZlYThmODUtY2I3YmRhMDItMjFjNmM3N2M=" tx_control { tx_id: "01jz02867d74sy6aw1zprwmzb5" } query { yql_text: "DECLARE $objects AS List>;\nINSERT INTO `//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers`\nSELECT database,name,config,rank FROM AS_TABLE($objects)\n" } parameters { key: "$objects" value { type { list_type { item { struct_type { members { name: "database" type { type_id: UTF8 } } members { name: "name" type { type_id: UTF8 } } members { name: "config" type { type_id: JSON_DOCUMENT } } members { name: "rank" type { type_id: INT64 } } } } } } value { items { items { text_value: "/Root" } items { text_value: "MyResourcePoolClassifier" } items { text_value: "{\"resource_pool\":\"test_pool\"}" } items { int64_value: 1 } } } } } ; >> KqpScheme::CreateUserWithPassword >> KqpScheme::CreateAndAlterTableWithPartitionBy [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitionSizeUncompat >> KqpScheme::CreateExternalDataSource [GOOD] >> KqpScheme::DisableExternalDataSourcesOnServerless [GOOD] >> KqpScheme::CreateExternalDataSourceWithSa >> KqpScheme::DisableDropExternalDataSource >> KqpOlapScheme::DropThenAddColumnCompaction [GOOD] >> KqpOlapScheme::DropColumnTableStoreErrors >> KqpScheme::CreateTableWithUniformPartitionsUncompat >> KqpScheme::AddDropColumn [GOOD] >> KqpScheme::AddChangefeedWhenDisabled >> KqpAcl::FailNavigate [GOOD] >> KqpAcl::FailResolve >> KqpScheme::AlterSequenceRestartWith [GOOD] >> KqpScheme::AlterResourcePoolClassifier >> KqpOlapScheme::CreateTableStoreWithFamily [GOOD] >> KqpOlapScheme::CreateTableWithDefaultFamilyWithoutSettings >> KqpScheme::AlterTableAlterIndex+UseQueryService [GOOD] >> KqpScheme::AlterTableAlterIndex-UseQueryService >> KqpScheme::DropTransfer >> KqpScheme::DropKeyColumn [GOOD] >> KqpScheme::DropIndexDataColumn >> KqpOlapScheme::WithoutDefaultColumnFamily >> KqpAcl::AclForOltpAndOlap+isOlap [GOOD] >> KqpAcl::AclForOltpAndOlap-isOlap >> KqpScheme::MoveTableWithSerialTypes [GOOD] >> KqpScheme::PathWithNoRoot >> KqpScheme::CreateTableWithPartitionAtKeysSimpleUncompat [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysSimpleCompat >> TConsoleTxProcessorTests::TestTxProcessorTemporary [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorRandom >> KqpOlapScheme::DropColumnTableStoreErrors [GOOD] >> KqpOlapScheme::DropTableAfterInsert >> KqpAcl::RecursiveCreateTableShouldSuccess [GOOD] >> KqpConstraints::AddColumnWithDefaultForbidden |81.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |81.2%| [LD] {RESULT} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |81.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table >> KqpConstraints::DefaultValuesForTableNegative4 [GOOD] >> KqpConstraints::IndexedTableAndNotNullColumn |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |81.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table >> TChargeBTreeIndex::FewNodes_Groups [GOOD] >> TChargeBTreeIndex::FewNodes_History >> KqpScheme::DisableDropExternalDataSource [GOOD] >> KqpScheme::DisableCreateExternalTable >> KqpScheme::CreateAndAlterTableWithPartitionSizeUncompat [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitionSizeCompat |81.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |81.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore >> KqpScheme::AddChangefeedWhenDisabled [GOOD] >> KqpScheme::AlterColumnTableTtl >> KqpOlapScheme::WithoutDefaultColumnFamily [GOOD] >> KqpOlapScheme::UnknownColumnFamily >> KqpScheme::CreateTableWithUniformPartitionsUncompat [GOOD] >> KqpScheme::CreateTableWithUniformPartitionsUuid >> KqpAcl::FailResolve [GOOD] >> KqpAcl::AclRevoke-UseSink-IsOlap >> KqpConstraints::DefaultAndIndexesTestDefaultColumnNotIncludedInIndex [GOOD] >> KqpConstraints::AlterTableAddNotNullWithDefault >> KqpScheme::CreateUserWithPassword [GOOD] >> KqpScheme::CreateUserWithoutPassword >> KqpOlapScheme::TtlRunInterval >> KqpOlapScheme::CreateTableWithDefaultFamilyWithoutSettings [GOOD] >> KqpOlapScheme::CreateTableWithFamilyWithOnlyCompressionLevel >> KqpOlapScheme::DropTableAfterInsert [GOOD] >> KqpScheme::PathWithNoRoot [GOOD] >> KqpScheme::ModifyUnknownPermissions >> KqpOlapScheme::UnknownColumnFamily [GOOD] >> KqpOlapScheme::TwoSimilarColumnFamilies >> KqpConstraints::AddColumnWithDefaultForbidden [GOOD] >> KqpConstraints::AddNonColumnDoesnotReturnInternalError >> KqpScheme::CreateTableWithPartitionAtKeysSimpleCompat [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysUuid >> KqpScheme::DropIndexDataColumn [GOOD] >> KqpScheme::DropExternalDataSource ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpOlapScheme::DropTableAfterInsert [GOOD] Test command err: Trying to start YDB, gRPC: 18980, MsgBus: 64100 2025-06-30T09:21:08.182047Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670003063697274:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.182087Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00482c/r3tmp/tmp8Oo6db/pdisk_1.dat 2025-06-30T09:21:08.254771Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18980, node 1 2025-06-30T09:21:08.276389Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.276403Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.276405Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.276478Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:08.284687Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:08.284738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:08.285773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64100 TClient is connected to server localhost:64100 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:08.368939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... CREATE TABLE `/Root/ColumnTableTest` (id Int32 NOT NULL, id_second Int32 NOT NULL, level Int32, created_at Timestamp NOT NULL, PRIMARY KEY (id, id_second)) PARTITION BY HASH(id) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 16); 2025-06-30T09:21:08.687540Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670003063697865:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.687578Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.742007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:08.764278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670003063698047:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:08.764324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670003063698047:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:08.764354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670003063698047:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:08.764376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670003063698047:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:08.764394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670003063698047:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:08.764413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670003063698047:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:08.764431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670003063698047:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:08.764448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670003063698047:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:08.764469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670003063698047:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:08.764485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670003063698047:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:08.764506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670003063698047:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:08.764526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670003063698047:2297];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:08.765774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521670003063698048:2298];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:08.765846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521670003063698048:2298];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:08.765892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521670003063698048:2298];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:08.765920Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521670003063698048:2298];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:08.765947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521670003063698048:2298];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:08.765975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521670003063698048:2298];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:08.766003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521670003063698048:2298];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:08.766038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521670003063698048:2298];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:08.766114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521670003063698048:2298];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:08.766141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521670003063698048:2298];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:08.766169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521670003063698048:2298];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:08.766199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7521670003063698048:2298];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:08.766811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:08.766830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:08.766846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_f ... sues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:23.401942Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:23.420487Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:23.420528Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:23.420653Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:23.420680Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:23.420711Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:23.420738Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:23.420762Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:23.420790Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:23.420812Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:23.420836Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:23.420861Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:23.420886Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:23.424470Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:23.424494Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:23.424513Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:23.424522Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:23.424558Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:23.424565Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:23.424581Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:23.424589Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:23.424599Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:23.424607Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:23.424641Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:23.424649Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:23.424676Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:23.424686Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:23.424703Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:23.424711Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:23.424720Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:21:23.424729Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:21:23.424737Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:21:23.424821Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:21:23.424828Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:21:23.424846Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:21:23.424851Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:21:23.428576Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=55006622500096;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275283428;max=18446744073709551615;plan=0;src=[6:7521670065624805178:2144];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.436067Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.437384Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=1392;columns=2; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=1392;columns=2; 2025-06-30T09:21:23.460802Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670069919772908:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:23.460847Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:23.461719Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/drop_table.cpp:401) 2025-06-30T09:21:23.468016Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:21:23.476232Z node 6 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037888 not found 2025-06-30T09:21:23.476434Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670069919772828:2297];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; >> KqpScheme::AlterTableAlterIndex-UseQueryService [GOOD] >> KqpScheme::AlterTableAlterMissedIndex >> KqpScheme::DropTransfer [GOOD] >> KqpScheme::DropTransfer_QueryService >> KqpScheme::DisableCreateExternalTable [GOOD] >> KqpScheme::DisableDropExternalTable >> KqpAcl::AclForOltpAndOlap-isOlap [GOOD] >> KqpAcl::AclDml-UseSink-IsOlap >> KqpOlapScheme::CreateTableWithFamilyWithOnlyCompressionLevel [GOOD] >> KqpScheme::BuildingUniqIndexDeniesTableModificationsPublicApi [GOOD] >> KqpScheme::BuildingUniqIndexDeniesTableModificationsSql >> KqpScheme::AlterColumnTableTtl [GOOD] >> KqpScheme::AlterColumnTableTiering >> KqpScheme::CreateAndAlterTableWithPartitionSizeCompat [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitioningByLoadUncompat >> KqpScheme::CreateTableWithUniformPartitionsUuid [GOOD] >> KqpScheme::CreateTableWithUniqConstraint >> TConsoleTests::TestRemoveSharedTenantWoServerlessTenants [GOOD] >> TConsoleTests::TestRemoveSharedTenantWithServerlessTenants >> KqpOlapScheme::TwoSimilarColumnFamilies [GOOD] >> KqpOlapTypes::Decimal >> KqpScheme::CreateUserWithoutPassword [GOOD] >> KqpScheme::DescribeIndexTable >> KqpScheme::QueryWithAlter [GOOD] >> KqpScheme::RenameTable+СolumnTable >> KqpAcl::AclRevoke-UseSink-IsOlap [GOOD] >> KqpAcl::AclRevoke+UseSink-IsOlap ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpOlapScheme::CreateTableWithFamilyWithOnlyCompressionLevel [GOOD] Test command err: Trying to start YDB, gRPC: 20954, MsgBus: 2766 2025-06-30T09:21:17.956534Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670044863818924:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:17.958540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047de/r3tmp/tmpnbZQFF/pdisk_1.dat 2025-06-30T09:21:18.109300Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:18.109328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:18.109740Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:18.117998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20954, node 1 2025-06-30T09:21:18.169839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:18.169853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:18.169856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:18.169905Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2766 TClient is connected to server localhost:2766 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:18.308770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:18.315216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 CREATE TABLE `/Root/ColumnTableTest` (id Int32 NOT NULL, id_second Int32 NOT NULL, level Int32, created_at Timestamp NOT NULL, PRIMARY KEY (created_at, id_second)) PARTITION BY HASH(created_at) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1, TTL = Interval("PT1H") ON created_at); 2025-06-30T09:21:18.765241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670049158786802:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.765274Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.827470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:18.839687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670049158786864:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:18.839784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670049158786864:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:18.839856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670049158786864:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:18.839881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670049158786864:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:18.839909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670049158786864:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:18.839934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670049158786864:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:18.839963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670049158786864:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:18.839989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670049158786864:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:18.840011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670049158786864:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:18.840036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670049158786864:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:18.840056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670049158786864:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:18.840080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670049158786864:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:18.840608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:18.840618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:18.840631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:18.840635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:18.840654Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:18.840658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:18.840669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:18.840673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:18.840679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:18.840684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:18.840706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:18.840711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:18.840730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:18.840738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:18.840751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:18.840755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:18.840 ... .253170Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037915;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.253287Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.253468Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.253560Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.254138Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.254252Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.254409Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.254706Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.256016Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.256178Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.257031Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.257185Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.258064Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037921;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.258216Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.258553Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.258921Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.259750Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.259939Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.260802Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.260934Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.261818Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.261935Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.263175Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.263325Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.263675Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.263960Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.264133Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.264282Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.264763Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.264856Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:23.265080Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:23.265583Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; Trying to start YDB, gRPC: 10096, MsgBus: 6829 2025-06-30T09:21:23.700984Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521670067179794736:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:23.702364Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047de/r3tmp/tmpzUd3aX/pdisk_1.dat 2025-06-30T09:21:23.730412Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10096, node 7 2025-06-30T09:21:23.757919Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:23.757932Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:23.757935Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:23.757995Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6829 2025-06-30T09:21:23.804319Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:23.804347Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:23.805844Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6829 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:23.831233Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:23.834798Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:24.078123Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670071474762609:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:24.078162Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } >> KqpScheme::CreateExternalDataSourceWithSa [GOOD] >> KqpScheme::CreateExternalDataSourceValidationAuthMethod >> KqpScheme::AlterResourcePoolClassifier [GOOD] >> KqpScheme::DropExternalDataSource [GOOD] >> KqpScheme::DropExternalTable >> KqpOlapScheme::TtlRunInterval [GOOD] >> KqpOlapScheme::TenThousandColumns >> KqpConstraints::IndexedTableAndNotNullColumn [GOOD] >> KqpConstraints::IndexAutoChooseAndNonReadyIndex >> KqpScheme::ModifyUnknownPermissions [GOOD] >> KqpScheme::ModifyPermissionsByRelativePathQueryClient >> TConsoleTests::TestAlterServerlessTenant [GOOD] >> TConsoleTests::TestAttributes >> KqpScheme::DisableDropExternalTable [GOOD] >> KqpScheme::DisableResourcePoolClassifiers |81.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |81.2%| [LD] {RESULT} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut >> KqpScheme::UseUnauthorizedTable >> KqpScheme::DropTransfer_QueryService [GOOD] >> KqpScheme::DropResourcePool >> KqpScheme::AlterTableAlterMissedIndex [GOOD] >> KqpScheme::AlterTableAddUniqIndexSqlFeatureOff >> TRestoreWithRebootsTests::CancelShouldSucceed[Raw] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AlterResourcePoolClassifier [GOOD] Test command err: Trying to start YDB, gRPC: 28253, MsgBus: 27788 2025-06-30T09:21:12.719173Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670020486802151:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:12.719249Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047fa/r3tmp/tmp5BjmOs/pdisk_1.dat 2025-06-30T09:21:12.780030Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28253, node 1 2025-06-30T09:21:12.801520Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:12.801534Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:12.801536Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:12.801589Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27788 TClient is connected to server localhost:27788 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:21:12.863095Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:12.863127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:12.867215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:12.870213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:21:12.878568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:12.909411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:12.937742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:12.951973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:13.200202Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670024781770939:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:13.200241Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:13.262694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.274071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.294236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.313197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.330832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.352740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.366211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.386433Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670024781771591:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:13.386458Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:13.386540Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670024781771596:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:13.387368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:13.393117Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670024781771598:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:13.481831Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670024781771649:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:13.702058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.720080Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:13.727435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.747215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, a ... e itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:22.266441Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:22.289638Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.611431Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670063702139661:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.611466Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.621027Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.648840Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.661213Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.676403Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.689584Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.707148Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.748772Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.778975Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670063702140311:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.778999Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.779143Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670063702140316:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.780078Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:22.787806Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670063702140318:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:22.867729Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670063702140369:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:23.019026Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:23.129996Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.238612Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:23.325706Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.415263Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.496472Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:23.579430Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:23.925000Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670067997108676:2675], DatabaseId: /Root, PoolId: test_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-30T09:21:23.925036Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool test_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-30T09:21:24.096191Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670072292076173:2742], DatabaseId: /Root, PoolId: test_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-30T09:21:24.096238Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool test_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-30T09:21:24.249050Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670072292076312:2786], DatabaseId: /Root, PoolId: test_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-30T09:21:24.249093Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool test_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-30T09:21:24.484478Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670072292076470:2842], DatabaseId: /Root, PoolId: test_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-30T09:21:24.484505Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool test_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } >> KqpScheme::CreateTableWithUniqConstraint [GOOD] >> KqpScheme::CreateTableWithUniqConstraintPublicApi >> KqpScheme::CreateAndAlterTableWithPartitioningByLoadUncompat [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitioningByLoadCompat >> TConsoleTxProcessorTests::TestTxProcessorRandom [GOOD] >> TImmediateControlsConfiguratorTests::TestControlsInitialization >> TConsoleTests::TestCreateSubSubDomainExtSubdomain [GOOD] >> TConsoleTests::TestAuthorization >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysUuid [GOOD] >> KqpScheme::CreateTableWithPgColumn >> TxUsage::Sinks_Oltp_WriteToTopics_2_Query [GOOD] >> RetryPolicy::TWriteSession_SeqNoShift >> KqpOlapTypes::Decimal [GOOD] >> KqpOlapTypes::Decimal35 >> KqpScheme::DropExternalTable [GOOD] >> KqpScheme::DropDependentExternalDataSource >> KqpScheme::DescribeIndexTable [GOOD] >> KqpScheme::CreatedAt >> KqpScheme::InvalidationAfterDropCreate >> TxUsage::Sinks_Oltp_WriteToTopics_3_Table >> KqpScheme::AlterTransfer [GOOD] >> KqpScheme::AlterTransfer_QueryService >> KqpAcl::AclRevoke+UseSink-IsOlap [GOOD] >> KqpAcl::AclRevoke-UseSink+IsOlap >> TImmediateControlsConfiguratorTests::TestControlsInitialization [GOOD] >> TImmediateControlsConfiguratorTests::TestModifiedControls |81.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |81.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots >> KqpScheme::CreateExternalDataSourceValidationAuthMethod [GOOD] >> KqpScheme::CreateExternalDataSourceValidationLocation >> KqpAcl::AclDml-UseSink-IsOlap [GOOD] >> KqpAcl::AclDml+UseSink-IsOlap >> KqpScheme::AlterTableAddUniqIndexSqlFeatureOff [GOOD] >> KqpScheme::AlterTableAddUniqIndexPublicApiFeatureOff >> KqpScheme::UseUnauthorizedTable [GOOD] >> KqpScheme::UseNonexistentTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_restore/unittest >> TRestoreWithRebootsTests::CancelShouldSucceed[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:21:04.446471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:04.446491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.446496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:04.446500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:04.446504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:04.446507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:04.446513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.446525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:04.446610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:04.446668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:04.458500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:21:04.458519Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:04.462059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:04.462113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:04.462148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:04.463691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:04.463757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:04.463873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.463935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:04.464547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.464582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:04.464872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.464883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.464908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:04.464916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:04.464923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:04.464943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.466284Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:04.488797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:04.488877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.488953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:04.488962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:04.489020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:04.489035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:04.489762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.489821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:04.489878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.489889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:04.489895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:04.489901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:04.490288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.490297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:04.490305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:04.490626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.490635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.490642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.490649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:04.491388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:04.491800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:04.491837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:04.492017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.492042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:04.492050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.492107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:04.492115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.492147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:04.492159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:04.492581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.492589Z node 1 :FLAT_TX_SCHEMESHARD ... andle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:25.852014Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:25.852019Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:21:25.852025Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-30T09:21:25.852033Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:21:25.852051Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-30T09:21:25.853071Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:21:25.853089Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_backup_restore_common.h:324: TRestore TAborting, opId: 1003:0 ProgressState at tablet72057594046678944 2025-06-30T09:21:25.853100Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_backup_restore_common.h:351: TRestore Abort, on datashard: 72075186233409546, opId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:21:25.853175Z node 67 :DATASHARD_RESTORE DEBUG: import_s3.cpp:605: [Import] [s3:1003] Handle NKikimr::TEvDataShard::TEvS3DownloadInfo { Info: { DataETag: a3ed28bfb53c9214f635c51ed6b618c4 ProcessedBytes: 0 WrittenBytes: 0 WrittenRows: 0 ChecksumState: DownloadState: } } 2025-06-30T09:21:25.853185Z node 67 :DATASHARD_RESTORE NOTICE: import_s3.cpp:620: [Import] [s3:1003] Process download info at 'DownloadInfo': info# { DataETag: a3ed28bfb53c9214f635c51ed6b618c4 ProcessedBytes: 0 WrittenBytes: 0 WrittenRows: 0 ChecksumState: DownloadState: } 2025-06-30T09:21:25.853201Z node 67 :DATASHARD_RESTORE DEBUG: import_s3.cpp:516: [Import] [s3:1003] GetObject: key# /data_00.csv, range# 0-13 2025-06-30T09:21:25.853306Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:21:25.853694Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1003:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 269551625 REQUEST: GET /data_00.csv HTTP/1.1 HEADERS: Host: localhost:26749 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 09F0D619-5F01-4CDC-9726-6BF43ECEC9B6 amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-13 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /data_00.csv / 14 2025-06-30T09:21:25.865074Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6376: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: ERROR Error { Kind: WRONG_SHARD_STATE Reason: "Interrupted Restore operation [5000004:1003] while waiting to finish at 72075186233409546" } TxId: 1003 ExecLatency: 5 ProposeLatency: 6 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 276 } } 2025-06-30T09:21:25.865102Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-30T09:21:25.865135Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: ERROR Error { Kind: WRONG_SHARD_STATE Reason: "Interrupted Restore operation [5000004:1003] while waiting to finish at 72075186233409546" } TxId: 1003 ExecLatency: 5 ProposeLatency: 6 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 276 } } 2025-06-30T09:21:25.865152Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: ERROR Error { Kind: WRONG_SHARD_STATE Reason: "Interrupted Restore operation [5000004:1003] while waiting to finish at 72075186233409546" } TxId: 1003 ExecLatency: 5 ProposeLatency: 6 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 276 } } 2025-06-30T09:21:25.865372Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 337 RawX2: 287762811154 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 OpResult { Success: false Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-30T09:21:25.865381Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-30T09:21:25.865397Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Source { RawX1: 337 RawX2: 287762811154 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 OpResult { Success: false Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-30T09:21:25.865411Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TRestore TAborting, opId: 1003:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 337 RawX2: 287762811154 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 OpResult { Success: false Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-30T09:21:25.865428Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1003:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: Aborting, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:25.865432Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:21:25.865438Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:21:25.865446Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 133 -> 240 2025-06-30T09:21:25.865488Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TRestore, opId# 1003:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:25.866219Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:21:25.866320Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:21:25.866403Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:21:25.866413Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:21:25.866431Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:21:25.866437Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:21:25.866443Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:21:25.866446Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:21:25.866455Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-30T09:21:25.866462Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:21:25.866468Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:21:25.866473Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:21:25.866501Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 TestWaitNotification wait txId: 1003 2025-06-30T09:21:25.867278Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:21:25.867291Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 TestWaitNotification wait txId: 1004 2025-06-30T09:21:25.867309Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-30T09:21:25.867313Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-30T09:21:25.867430Z node 67 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:21:25.867457Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:21:25.867463Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [67:490:2460] 2025-06-30T09:21:25.867491Z node 67 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-30T09:21:25.867501Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:21:25.867505Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [67:490:2460] TestWaitNotification: OK eventTxId 1003 TestWaitNotification: OK eventTxId 1004 >> KqpScheme::ModifyPermissionsByRelativePathQueryClient [GOOD] >> KqpScheme::NEG_CreateTableWithUnsupportedStoreType >> TNodeBrokerTest::Test1000NodesSubscribers [GOOD] >> KqpScheme::CreateTableWithUniqConstraintPublicApi [GOOD] >> KqpScheme::CreateTableWithVectorIndex >> KqpScheme::RenameTable+СolumnTable [GOOD] >> KqpScheme::RenameTable-СolumnTable |81.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |81.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots >> KqpScheme::CreateAndAlterTableWithPartitioningByLoadCompat [GOOD] >> KqpScheme::CreateAndAlterTableWithMinMaxPartitionsUncompat >> TImmediateControlsConfiguratorTests::TestModifiedControls [GOOD] >> TImmediateControlsConfiguratorTests::TestResetToDefault >> KqpScheme::CreateTableWithPgColumn [GOOD] >> KqpScheme::DisableResourcePoolClassifiers [GOOD] >> KqpScheme::DisableMetadataObjectsOnServerless >> KqpScheme::DropResourcePool [GOOD] >> KqpScheme::DropNonExistingResourcePool >> KqpScheme::DisableResourcePoolClassifiersOnServerless [GOOD] >> KqpScheme::DropDependentExternalDataSource [GOOD] >> KqpScheme::DropNonExistingExternalDataSource ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::Test1000NodesSubscribers [GOOD] Test command err: 2025-06-30T09:20:49.627823Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.631319Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.631357Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.631379Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.631408Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.631432Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.631449Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.634976Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.635027Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.635062Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.635091Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.635130Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.635154Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.635175Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.635234Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.635264Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.635282Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.636118Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.636143Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.636160Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.636174Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.636187Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.636228Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.637126Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.637224Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.637266Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.637297Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.637328Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.637343Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.637356Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.637369Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.637884Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.637963Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.637986Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.638007Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.638023Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.638054Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.638085Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:49.638178Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.638200Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.638579Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.638612Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.638631Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.638674Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.638692Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.641619Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.642135Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.642189Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.642394Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.642489Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.642562Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.642672Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.642812Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.642881Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.642932Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.642965Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.643377Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.643599Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.643775Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.643891Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.644058Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.644162Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:49.655173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:49.655194Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:49.658529Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:49.658861Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:49.658923Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:49.659056Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:49.659415Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:49.659438Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:49.659465Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:49.659474Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.659478Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:49.659487Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:49.659509Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:49.659512Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:49.659515Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:49.659519Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:49.659530Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:134 ... 025-06-30T09:21:26.020368Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:26.033973Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.034008Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.034031Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5584:7164] 2025-06-30T09:21:26.040734Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9690:11217], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:26.040834Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:26.040843Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:26.040861Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:26.054204Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.054235Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.054252Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5589:7169] 2025-06-30T09:21:26.059242Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9692:11219], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:26.059331Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:26.059340Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:26.059358Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:26.075154Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.075191Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.075209Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5594:7174] 2025-06-30T09:21:26.079820Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9694:11221], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:26.079907Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:26.079916Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:26.079933Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:26.093027Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.093064Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.093080Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5599:7179] 2025-06-30T09:21:26.105039Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9696:11223], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:26.105165Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:26.105174Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:26.105194Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:26.120375Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.120407Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.120429Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5604:7184] 2025-06-30T09:21:26.125379Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9698:11225], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:26.125471Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:26.125480Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:26.125498Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:26.138638Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.138667Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.138697Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5609:7189] 2025-06-30T09:21:26.144358Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9700:11227], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:26.144452Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:26.144460Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:26.144478Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:26.159160Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.159195Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.159213Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5614:7194] 2025-06-30T09:21:26.164720Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9702:11229], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:26.164811Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:26.164820Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:26.164838Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:26.179611Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.179666Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.179690Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5619:7199] 2025-06-30T09:21:26.190470Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9704:11231], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:26.190561Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:26.190570Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:26.190589Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:26.209699Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.209743Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.209762Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5624:7204] 2025-06-30T09:21:26.215569Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9706:11233], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:26.215683Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:26.215692Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:26.215713Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:26.229794Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.229831Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:26.229846Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5629:7209] >> KqpScheme::CreatedAt [GOOD] >> KqpScheme::DisableCreateExternalDataSource >> KqpConstraints::AlterTableAddNotNullWithDefault [GOOD] >> TImmediateControlsConfiguratorTests::TestResetToDefault [GOOD] >> TImmediateControlsConfiguratorTests::TestMaxLimit >> KqpScheme::InvalidationAfterDropCreate [GOOD] >> KqpScheme::InvalidationAfterDropCreateCompatSchema >> KqpScheme::CreateAndAlterTableWithMinMaxPartitionsCompat ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateTableWithPgColumn [GOOD] Test command err: Trying to start YDB, gRPC: 9269, MsgBus: 26479 2025-06-30T09:21:18.335189Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670048073560545:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:18.438378Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047d9/r3tmp/tmp3coA57/pdisk_1.dat 2025-06-30T09:21:18.479543Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9269, node 1 2025-06-30T09:21:18.535029Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:18.535041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:18.535043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:18.535092Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26479 TClient is connected to server localhost:26479 2025-06-30T09:21:18.647047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:18.647083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:21:18.652701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:18.673378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:18.679264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:21:18.683801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:18.733517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:18.762233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:18.779094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:18.895824Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670048073561934:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.895860Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.944969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.958402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.973120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.985748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.001215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.015730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.031677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.051887Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670052368529882:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.051908Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.052042Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670052368529887:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.052840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:19.054957Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670052368529889:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:21:19.152183Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670052368529940:3400] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:19.323703Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:19.413535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.467768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:19.468780Z node 1 :HIV ... s undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:26.546374Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:26.579231Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.593149Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:26.847377Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670079929013342:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:26.847401Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:26.854528Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.865050Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.896941Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.908996Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.917639Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.935302Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.945571Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.964979Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670079929013996:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:26.965008Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:26.965205Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670079929014001:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:26.966265Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:26.972663Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670079929014003:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:27.058068Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670084223981350:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:27.239689Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.266105Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.286710Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.314139Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.331254Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.350968Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.371345Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.387119Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:27.393041Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.412458Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.478669Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.496013Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715682:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpScheme::CreateExternalDataSourceValidationLocation [GOOD] >> KqpScheme::UseNonexistentTable [GOOD] >> KqpScheme::UseDroppedTable >> KqpScheme::NEG_CreateTableWithUnsupportedStoreType [GOOD] >> KqpScheme::OlapSharding_KeyOnly >> KqpScheme::AlterColumnTableTiering [GOOD] >> KqpScheme::AlterAsyncReplication >> KqpScheme::AlterTableAddUniqIndexPublicApiFeatureOff [GOOD] >> KqpScheme::AlterTransfer_QueryService [GOOD] >> KqpScheme::InvalidationAfterDropCreateTable2MultiStageTx ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::DisableResourcePoolClassifiersOnServerless [GOOD] Test command err: Trying to start YDB, gRPC: 19142, MsgBus: 3091 2025-06-30T09:21:08.928191Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670006025097856:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.928264Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004811/r3tmp/tmpR2BfxR/pdisk_1.dat 2025-06-30T09:21:08.979784Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19142, node 1 2025-06-30T09:21:08.999337Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.999352Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.999354Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.999399Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3091 2025-06-30T09:21:09.026053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:09.026091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:09.026764Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3091 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:09.084332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:09.096245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:09.164603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:09.185022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:09.242401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:09.314196Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670010320066532:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:09.314235Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:09.369901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:09.381831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:09.437489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:09.445884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:09.459675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:09.473510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:09.487983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:09.506272Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670010320067189:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:09.506321Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:09.506341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670010320067194:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:09.507194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:09.513714Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670010320067196:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:09.601716Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670010320067247:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:09.779019Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670010320067518:3573] txid# 281474976715672, issues: { message: "(NKikimr::NExternalSource::TExternalSourceException) External source with type ObjectStorage is disabled. Please contact your system administrator to enable it" severity: 1 } 2025-06-30T09:21:09.927206Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:09.928870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:10.033339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/ac ... /test-shared databaseId: /Root/test-shared pool id: default 2025-06-30T09:21:27.122579Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=9&id=NzA4MzA1MDUtZjQ0YTBjMzMtOTBkNWJmM2ItM2Q1MmZiYTg=, ActorId: [9:7521670086917693008:2771], ActorState: ReadyState, TraceId: 01jz028cjj7y5mfe6mtkmhak92, request placed into pool from cache: default 2025-06-30T09:21:27.122598Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:618: SessionId: ydb://session/3?node_id=9&id=NzA4MzA1MDUtZjQ0YTBjMzMtOTBkNWJmM2ItM2Q1MmZiYTg=, ActorId: [9:7521670086917693008:2771], ActorState: ExecuteState, TraceId: 01jz028cjj7y5mfe6mtkmhak92, Sending CompileQuery request 2025-06-30T09:21:27.125125Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][9:7521670065442854738:2547][/Root/test-shared/.metadata/initialization/migrations] Sync is incomplete in one of the ring groups: cookie# 34 2025-06-30T09:21:27.125140Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][9:7521670065442854738:2547][/Root/test-shared/.metadata/initialization/migrations] Sync is incomplete in one of the ring groups: cookie# 35 2025-06-30T09:21:27.125426Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7521670086917693011:2773], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/initialization/migrations]
: Error: LookupError, code: 2005 2025-06-30T09:21:27.126114Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=9&id=NzA4MzA1MDUtZjQ0YTBjMzMtOTBkNWJmM2ItM2Q1MmZiYTg=, ActorId: [9:7521670086917693008:2771], ActorState: ExecuteState, TraceId: 01jz028cjj7y5mfe6mtkmhak92, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-06-30T09:21:27.126124Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=9&id=NzA4MzA1MDUtZjQ0YTBjMzMtOTBkNWJmM2ItM2Q1MmZiYTg=, ActorId: [9:7521670086917693008:2771], ActorState: ExecuteState, TraceId: 01jz028cjj7y5mfe6mtkmhak92, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:21:27.126127Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=9&id=NzA4MzA1MDUtZjQ0YTBjMzMtOTBkNWJmM2ItM2Q1MmZiYTg=, ActorId: [9:7521670086917693008:2771], ActorState: ExecuteState, TraceId: 01jz028cjj7y5mfe6mtkmhak92, EndCleanup, isFinal: 0 2025-06-30T09:21:27.126169Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2368: SessionId: ydb://session/3?node_id=9&id=NzA4MzA1MDUtZjQ0YTBjMzMtOTBkNWJmM2ItM2Q1MmZiYTg=, ActorId: [9:7521670086917693008:2771], ActorState: ExecuteState, TraceId: 01jz028cjj7y5mfe6mtkmhak92, Sent query response back to proxy, proxyRequestId: 68, proxyId: [9:7521670061147886494:2147] 2025-06-30T09:21:27.126405Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/initialization/migrations]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-06-30T09:21:27.126477Z node 9 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/initialization/migrations]
: Error: LookupError, code: 2005 2025-06-30T09:21:27.126489Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2413: SessionId: ydb://session/3?node_id=9&id=NzA4MzA1MDUtZjQ0YTBjMzMtOTBkNWJmM2ItM2Q1MmZiYTg=, ActorId: [9:7521670086917693008:2771], ActorState: ReadyState, Session closed due to explicit close event 2025-06-30T09:21:27.126495Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=9&id=NzA4MzA1MDUtZjQ0YTBjMzMtOTBkNWJmM2ItM2Q1MmZiYTg=, ActorId: [9:7521670086917693008:2771], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:21:27.126497Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=9&id=NzA4MzA1MDUtZjQ0YTBjMzMtOTBkNWJmM2ItM2Q1MmZiYTg=, ActorId: [9:7521670086917693008:2771], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-30T09:21:27.126501Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2644: SessionId: ydb://session/3?node_id=9&id=NzA4MzA1MDUtZjQ0YTBjMzMtOTBkNWJmM2ItM2Q1MmZiYTg=, ActorId: [9:7521670086917693008:2771], ActorState: unknown state, Cleanup temp tables: 0 2025-06-30T09:21:27.126518Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2735: SessionId: ydb://session/3?node_id=9&id=NzA4MzA1MDUtZjQ0YTBjMzMtOTBkNWJmM2ItM2Q1MmZiYTg=, ActorId: [9:7521670086917693008:2771], ActorState: unknown state, Session actor destroyed 2025-06-30T09:21:27.365680Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM= 2025-06-30T09:21:27.365876Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: unknown state, session actor bootstrapped 2025-06-30T09:21:27.365912Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: ReadyState, TraceId: 01jz028ct5cha28tfbnmqyc1bg, received request, proxyRequestId: 70 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: SELECT * FROM `//Root/test-shared/.metadata/initialization/migrations`; rpcActor: [9:7521670086917693019:2778] database: /Root/test-shared databaseId: /Root/test-shared pool id: default 2025-06-30T09:21:27.365916Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: ReadyState, TraceId: 01jz028ct5cha28tfbnmqyc1bg, request placed into pool from cache: default 2025-06-30T09:21:27.365936Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:618: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: ExecuteState, TraceId: 01jz028ct5cha28tfbnmqyc1bg, Sending CompileQuery request 2025-06-30T09:21:27.369143Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][9:7521670065442854738:2547][/Root/test-shared/.metadata/initialization/migrations] Sync is incomplete in one of the ring groups: cookie# 36 2025-06-30T09:21:27.369180Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][9:7521670065442854738:2547][/Root/test-shared/.metadata/initialization/migrations] Sync is incomplete in one of the ring groups: cookie# 37 2025-06-30T09:21:27.369895Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7521670086917693021:2779], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/initialization/migrations]
: Error: LookupError, code: 2005 2025-06-30T09:21:27.370804Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: ExecuteState, TraceId: 01jz028ct5cha28tfbnmqyc1bg, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-06-30T09:21:27.370817Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: ExecuteState, TraceId: 01jz028ct5cha28tfbnmqyc1bg, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:21:27.370820Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: ExecuteState, TraceId: 01jz028ct5cha28tfbnmqyc1bg, EndCleanup, isFinal: 0 2025-06-30T09:21:27.370861Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2368: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: ExecuteState, TraceId: 01jz028ct5cha28tfbnmqyc1bg, Sent query response back to proxy, proxyRequestId: 70, proxyId: [9:7521670061147886494:2147] 2025-06-30T09:21:27.371080Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/initialization/migrations]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-06-30T09:21:27.371150Z node 9 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/initialization/migrations]
: Error: LookupError, code: 2005 2025-06-30T09:21:27.371181Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2413: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: ReadyState, Session closed due to explicit close event 2025-06-30T09:21:27.371186Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:21:27.371189Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-30T09:21:27.371191Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2644: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: unknown state, Cleanup temp tables: 0 2025-06-30T09:21:27.371209Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2735: SessionId: ydb://session/3?node_id=9&id=MmRjNDUzMzctMjJiZDE1YTItNTFlYjMyOTgtZDE0YzQ4MmM=, ActorId: [9:7521670086917693018:2777], ActorState: unknown state, Session actor destroyed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpConstraints::AlterTableAddNotNullWithDefault [GOOD] Test command err: Trying to start YDB, gRPC: 2311, MsgBus: 26889 2025-06-30T09:21:13.378347Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670026823995612:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:13.378419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047f6/r3tmp/tmpN2TV8W/pdisk_1.dat 2025-06-30T09:21:13.509096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:13.509151Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:13.510290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:13.513406Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2311, node 1 2025-06-30T09:21:13.562926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:13.562938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:13.562939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:13.562979Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26889 TClient is connected to server localhost:26889 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:13.731856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:13.734984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:13.747195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:13.818716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:13.855133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:13.925382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:14.159312Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670031118964395:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.159345Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.291403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.326418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.361181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.378805Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:14.379302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.399498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.423565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.439131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.470502Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670031118965060:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.470535Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.470674Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670031118965065:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.471761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:14.477282Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670031118965067:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:14.557182Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670031118965119:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:14.755623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 23486, MsgBus: 16334 2025-06-30T09:21:15.276997Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670033184763766:2154];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047f6/r3tmp/tmpI5ZMC7/pdisk_1.dat 2025-06-30T09:21:15.284609Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=access ... existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047f6/r3tmp/tmpeHzsQm/pdisk_1.dat 2025-06-30T09:21:23.507755Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7211, node 7 2025-06-30T09:21:23.522541Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:23.522555Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:23.522558Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:23.522618Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19568 TClient is connected to server localhost:19568 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:23.591777Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:23.591816Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:23.592288Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:23.592758Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:23.595002Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:23.603618Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:23.615826Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:23.645308Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:23.661250Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.953866Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670070603547857:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:23.953924Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:23.960116Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.970891Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.984167Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.001989Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.018095Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.028409Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.041517Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.067615Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670074898515806:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:24.067650Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:24.067753Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670074898515811:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:24.068685Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:24.071024Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670074898515813:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:24.151966Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670074898515864:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:24.415933Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.494922Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:24.521094Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710757:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:24.567040Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) >> KqpOlapTypes::Decimal35 [GOOD] >> KqpOlapTypes::AttributeNegative >> KqpAcl::AclDml+UseSink-IsOlap [GOOD] >> KqpAcl::AclDml-UseSink+IsOlap >> KqpScheme::RenameTable-СolumnTable [GOOD] >> KqpScheme::RenameTableWithVectorIndex >> KqpScheme::CreateTableWithVectorIndex [GOOD] >> KqpScheme::CreateTableWithVectorIndexCaseIncentive ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AlterTableAddUniqIndexPublicApiFeatureOff [GOOD] Test command err: Trying to start YDB, gRPC: 16947, MsgBus: 29085 2025-06-30T09:21:18.093357Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670049519774282:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:18.094819Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047e1/r3tmp/tmpGjQpgW/pdisk_1.dat 2025-06-30T09:21:18.182633Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 16947, node 1 2025-06-30T09:21:18.199340Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:18.214799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:18.218987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:18.227187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:18.234110Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:18.234123Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:18.234126Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:18.234170Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29085 TClient is connected to server localhost:29085 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:18.368619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:18.371794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:21:18.383808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.439330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:18.528044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:18.568000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:18.748942Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670049519775832:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.748970Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.802633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.815108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.830164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.888543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.899725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.961533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.972599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:18.992852Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670049519776488:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.992896Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.993244Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670049519776493:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:18.994340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:19.001302Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670049519776495:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:19.054502Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670053814743842:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:19.102244Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:19.244001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.307505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/r ... d=5&id=ZmU0ODY3LTgzODU3YjYwLWY0ZTVlZTJiLTg5MGM0NTU1, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-30T09:21:26.907620Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 27031, MsgBus: 11792 2025-06-30T09:21:27.142888Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670086883115548:2245];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047e1/r3tmp/tmpc0qLyh/pdisk_1.dat 2025-06-30T09:21:27.148954Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:27.163531Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:27.165848Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7521670086883115318:2080] 1751275287137572 != 1751275287137575 TServer::EnableGrpc on GrpcPort 27031, node 6 2025-06-30T09:21:27.174904Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:27.174919Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:27.174922Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:27.174971Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11792 TClient is connected to server localhost:11792 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:27.247260Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:27.247290Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:27.247656Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:27.249185Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:27.251400Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:21:27.259666Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:27.286343Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.325778Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:27.345776Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.530117Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670086883116908:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.530142Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.539182Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.548615Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.605152Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.617926Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.631446Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.645919Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.659574Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.674893Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670086883117564:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.674914Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670086883117569:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.674915Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.675745Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:27.679028Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670086883117571:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:27.765367Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670086883117622:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:27.966899Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) |81.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |81.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots >> TImmediateControlsConfiguratorTests::TestMaxLimit [GOOD] >> TImmediateControlsConfiguratorTests::TestDynamicMap >> KqpScheme::CreateAndAlterTableWithMinMaxPartitionsUncompat [GOOD] >> KqpAcl::AclRevoke-UseSink+IsOlap [GOOD] >> KqpAcl::AlterDatabasePrivilegesRequiredToChangeSchemeLimits+AsClusterAdmin ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateExternalDataSourceValidationLocation [GOOD] Test command err: Trying to start YDB, gRPC: 10340, MsgBus: 23747 2025-06-30T09:21:07.871355Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669999727829910:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:07.872252Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004846/r3tmp/tmpnX0i3z/pdisk_1.dat TServer::EnableGrpc on GrpcPort 10340, node 1 2025-06-30T09:21:07.928633Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:07.940621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:07.940639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:07.940641Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:07.940706Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23747 2025-06-30T09:21:07.972971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:07.973002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:07.974100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23747 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:08.010891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:08.029562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.049984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.076712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.091430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.279714Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670004022798769:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.279745Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.336962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.347615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.360190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.375553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.388925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.403550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.416889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.440766Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670004022799421:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.440807Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.440874Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670004022799426:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.441966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.444911Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670004022799428:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:08.509173Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670004022799479:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:08.699854Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037911 not found 2025-06-30T09:21:08.703952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 7105, MsgBus: 9979 2025-06-30T09:21:08.992575Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670005650597734:2200];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004846/r3tmp/tmpeMKk5E/pdisk_1.dat 2025-06-30T09:21:08.994471Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:09.007161Z node 2 ... Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:26.046955Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:26.135769Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521670081436675587:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 12384, MsgBus: 28636 2025-06-30T09:21:27.113262Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670087777901450:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:27.114418Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004846/r3tmp/tmp1uZLhi/pdisk_1.dat 2025-06-30T09:21:27.160281Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12384, node 6 2025-06-30T09:21:27.171726Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:27.171739Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:27.171741Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:27.171795Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28636 2025-06-30T09:21:27.226998Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:27.227034Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:27.231081Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28636 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:27.341408Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:27.347308Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:27.364037Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:27.398111Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:27.424263Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:27.492349Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:27.642155Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670087777903026:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.642185Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.653064Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.661804Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.672979Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.689347Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.701317Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.715403Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.729246Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.744981Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670087777903682:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.745024Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.745041Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670087777903687:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.745798Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:27.748291Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670087777903689:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:27.842241Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670087777903740:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:28.007921Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670092072871307:3572] txid# 281474976715672, issues: { message: "(NKikimr::NExternalSource::TExternalSourceException) It is not allowed to access hostname \'my-bucket\'" severity: 1 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AlterTransfer_QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 64645, MsgBus: 8762 2025-06-30T09:21:07.778122Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669998406149386:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:07.778162Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004842/r3tmp/tmpVhkSSk/pdisk_1.dat TServer::EnableGrpc on GrpcPort 64645, node 1 2025-06-30T09:21:07.834468Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:07.841054Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:07.841067Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:07.841069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:07.841117Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8762 2025-06-30T09:21:07.879885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:07.879916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:07.880914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8762 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:07.911466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:07.925909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:07.952198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:07.976730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:07.989104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:08.205389Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670002701118246:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.205414Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.273724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.283194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.298518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.311205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.325704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.341633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.354372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:08.371840Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670002701118900:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.371879Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.371955Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670002701118905:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.373016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:08.381398Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670002701118907:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:08.435469Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670002701118958:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 65213, MsgBus: 3397 2025-06-30T09:21:08.906610Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670003851768554:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.907805Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004842/r3tmp/tmpQjSQJ1/pdisk_1.dat 2025-06-30T09:21:08.925525Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65213, node 2 2025-06-30T09:21:08.936667Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.936683Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.936686Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.936742Z node 2 :NET_CLASSIFIER ERROR ... s { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { TokenSecretName: "mysecret" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> { RETURN <| id:$x._offset |> };\n" } RunAsUser: "root@builtin" } } 2025-06-30T09:21:28.056287Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976715687 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { TokenSecretName: "mysecret" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> { RETURN <| id:$x._offset |> };\n" } RunAsUser: "root@builtin" } } 2025-06-30T09:21:28.056482Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete 2025-06-30T09:21:28.057699Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715687, at schemeshard: 72057594046644480 2025-06-30T09:21:28.066979Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670091666475573:3934] txid# 281474976715688, issues: { message: "User is not set" severity: 1 } 2025-06-30T09:21:28.067179Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=6&id=YWE0ZTJmOS05NDZhYjU5Yi05ZTIyOWE3LWM5ODA0NDJj, ActorId: [6:7521670087371507682:2470], ActorState: ExecuteState, TraceId: 01jz028dfy5bcgvswv1499bdc1, Create QueryResponse for error on request, msg: 2025-06-30T09:21:28.069458Z node 6 :REPLICATION_CONTROLLER DEBUG: controller.cpp:589: [controller 72075186224037925] Process queues: boot# 1: stop# 0 2025-06-30T09:21:28.072392Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670091666475587:3941] txid# 281474976715690, issues: { message: "User is not set" severity: 1 } 2025-06-30T09:21:28.072504Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=6&id=YWE0ZTJmOS05NDZhYjU5Yi05ZTIyOWE3LWM5ODA0NDJj, ActorId: [6:7521670087371507682:2470], ActorState: ExecuteState, TraceId: 01jz028dg4fmd040s7wya921ac, Create QueryResponse for error on request, msg: 2025-06-30T09:21:28.077472Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTransfer, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp:513) 2025-06-30T09:21:28.077983Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037925] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976715692 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> { RETURN <| id:$x._offset |> };\n" } RunAsUser: "root@builtin" } } 2025-06-30T09:21:28.078038Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976715692 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> { RETURN <| id:$x._offset |> };\n" } RunAsUser: "root@builtin" } } 2025-06-30T09:21:28.078192Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete 2025-06-30T09:21:28.083530Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTransfer, opId: 281474976715693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp:513) 2025-06-30T09:21:28.084025Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037925] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976715693 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" Password: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> { RETURN <| id:$x._offset |> };\n" } RunAsUser: "root@builtin" } } 2025-06-30T09:21:28.084084Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976715693 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" Password: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> { RETURN <| id:$x._offset |> };\n" } RunAsUser: "root@builtin" } } 2025-06-30T09:21:28.084241Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete 2025-06-30T09:21:28.097039Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTransfer, opId: 281474976715694:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp:513) 2025-06-30T09:21:28.097735Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037925] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976715694 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" PasswordSecretName: "password_secret_name" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> { RETURN <| id:$x._offset |> };\n" } RunAsUser: "root@builtin" } } 2025-06-30T09:21:28.097807Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976715694 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" PasswordSecretName: "password_secret_name" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> { RETURN <| id:$x._offset |> };\n" } RunAsUser: "root@builtin" } } 2025-06-30T09:21:28.098921Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete 2025-06-30T09:21:28.110921Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTransfer, opId: 281474976715695:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp:513) 2025-06-30T09:21:28.111538Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037925] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976715695 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "new_user" Password: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> { RETURN <| id:$x._offset |> };\n" } RunAsUser: "root@builtin" } } 2025-06-30T09:21:28.111600Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976715695 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "new_user" Password: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> { RETURN <| id:$x._offset |> };\n" } RunAsUser: "root@builtin" } } 2025-06-30T09:21:28.111794Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete 2025-06-30T09:21:28.128910Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTransfer, opId: 281474976715696:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp:513) 2025-06-30T09:21:28.129465Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037925] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976715696 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "new_user" Password: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> {\n RETURN CAST($x as String);\n };\n" } RunAsUser: "root@builtin" } } 2025-06-30T09:21:28.129523Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976715696 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "new_user" Password: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> {\n RETURN CAST($x as String);\n };\n" } RunAsUser: "root@builtin" } } 2025-06-30T09:21:28.129571Z node 6 :REPLICATION_CONTROLLER NOTICE: tx_alter_replication.cpp:110: [controller 72075186224037925][TxAlterReplication] Alter replication: rid# 1, pathId# [OwnerId: 72057594046644480, LocalPathId: 19] 2025-06-30T09:21:28.129727Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete >> KqpScheme::DropNonExistingExternalDataSource [GOOD] >> KqpScheme::OlapSharding_KeyOnly [GOOD] >> KqpScheme::DropNonExistingResourcePool [GOOD] >> KqpScheme::DropResourcePoolClassifier >> KqpScheme::InvalidationAfterDropCreateCompatSchema [GOOD] >> KqpScheme::InvalidationAfterDropCreateTable2 >> KqpScheme::CreateAndAlterTableWithMinMaxPartitionsCompat [GOOD] >> KqpScheme::CreateAndAlterTableWithBloomFilterUncompat >> KqpScheme::DisableCreateExternalDataSource [GOOD] >> KqpScheme::CreateTransfer_QueryService >> KqpConstraints::SerialTypeNegative1 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateAndAlterTableWithMinMaxPartitionsUncompat [GOOD] Test command err: Trying to start YDB, gRPC: 14610, MsgBus: 29597 2025-06-30T09:21:20.386467Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670057639682532:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:20.386496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047b6/r3tmp/tmpx2cm3C/pdisk_1.dat 2025-06-30T09:21:20.491373Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:20.496396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:20.496423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 14610, node 1 2025-06-30T09:21:20.500655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:20.523889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:20.523902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:20.523904Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:20.523953Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29597 TClient is connected to server localhost:29597 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:20.621366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:20.625684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:20.630042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:20.660823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:20.695518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:20.711808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.871049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670057639684098:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.871086Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.931535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.943019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.954538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.971920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.993144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.016222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.036595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.059547Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670061934652045:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.059584Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.059682Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670061934652050:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.060614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:21.065132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:21.065198Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670061934652052:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:21.143826Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670061934652103:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:21.360355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.392164Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 22324, MsgBus: 2788 2025-06-30T09:21:21.782288Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670061219762811:2075];send_to=[0:7307199536658146131:7762515 ... ersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:27.741222Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:27.741254Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:27.742283Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:27.747772Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:27.755360Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:27.765854Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:27.793808Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:27.807419Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.133232Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670091449933846:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.133253Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.144101Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.154420Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.212748Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.279238Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.291428Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.302891Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.317042Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.337001Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670091449934501:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.337025Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.337068Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670091449934506:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.337883Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:28.343768Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670091449934508:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:28.415572Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670091449934559:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:28.598796Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient::Ls request: /Root/TableWithMinMaxPartitions TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableWithMinMaxPartitions" PathId: 17 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715672 CreateStep: 1751275288651 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableWithMinMaxPartitions" Columns { Name: "Key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "String" TypeId: 4097 Id: 2 NotNull: false IsBuildInProgress: fa... (TRUNCATED) 2025-06-30T09:21:28.623538Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) TClient::Ls request: /Root/TableWithMinMaxPartitions TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableWithMinMaxPartitions" PathId: 17 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715672 CreateStep: 1751275288651 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableWithMinMaxPartitions" Columns { Name: "Key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "String" TypeId: 4097 Id: 2 NotNull: false IsBuildInProgress: fa... (TRUNCATED) 2025-06-30T09:21:28.641040Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::DropNonExistingExternalDataSource [GOOD] Test command err: Trying to start YDB, gRPC: 28381, MsgBus: 27568 2025-06-30T09:21:20.985421Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670056497586687:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:20.986927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047b0/r3tmp/tmpk9hW3V/pdisk_1.dat 2025-06-30T09:21:21.070841Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28381, node 1 2025-06-30T09:21:21.086985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:21.087012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:21.089698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:21.093248Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:21.093259Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:21.093261Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:21.093301Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27568 TClient is connected to server localhost:27568 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:21.208265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:21.217908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:21.236231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.272403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.319724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.340035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.508739Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670060792555541:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.508765Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.565178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.587886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.609551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.631746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.650301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.673957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.688774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.708085Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670060792556196:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.708144Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.708280Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670060792556204:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.709492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:21.716298Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670060792556206:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:21.804253Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670060792556257:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:21.987622Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup;
: Error: Type annotation, code: 1030
:3:47: Error: At function: KiAlterTable!
:3:47: Error: AlterTable : db.[/Root/KeyValue] Column: "Key" is a key column. Key column drop is not supported Trying to start YDB, gRPC: 13121, MsgBus: 26533 2025-06-30T09:21:22.388904Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670065491280167:2088];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:22.390060Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047b0/r3tmp/tmpotHrwe/pdisk_1.dat 2025-06-30T09:21:22.421609Z node 2 :IMPORT WARN: ... 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:352) 2025-06-30T09:21:27.525587Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521670088121678756:3611] txid# 281474976715674, issues: { message: "Other entities depend on this data source, please remove them at the beginning: /Root/ExternalTable" severity: 1 } 2025-06-30T09:21:27.693186Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 4477, MsgBus: 25783 2025-06-30T09:21:27.907510Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670085915843164:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:27.907858Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047b0/r3tmp/tmpyVuzTF/pdisk_1.dat 2025-06-30T09:21:27.941531Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4477, node 6 2025-06-30T09:21:27.971893Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:27.971908Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:27.971909Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:27.971951Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25783 2025-06-30T09:21:28.016512Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:28.016545Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:28.017533Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25783 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:28.067902Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:28.069776Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:28.078065Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.103395Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.166342Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.179256Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.365403Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670090210812024:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.365426Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.378774Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.390169Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.400725Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.415061Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.473584Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.486458Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.543045Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.565497Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670090210812681:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.565534Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.565674Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670090210812686:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.567028Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:28.570170Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670090210812688:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:28.659641Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670090210812739:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:28.856301Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670090210813009:3572] txid# 281474976715672, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } >> KqpScheme::UseDroppedTable [GOOD] >> KqpScheme::UnknownFamilyTest >> TConsoleTests::TestRemoveSharedTenantWithServerlessTenants [GOOD] >> TConsoleTests::TestRemoveSharedTenantAfterRemoveServerlessTenant |81.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |81.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test >> KqpScheme::CreateFamilyWithCompressionLevel >> TImmediateControlsConfiguratorTests::TestDynamicMap [GOOD] |81.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |81.3%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::OlapSharding_KeyOnly [GOOD] Test command err: Trying to start YDB, gRPC: 4085, MsgBus: 28129 2025-06-30T09:21:21.061174Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670061483882306:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:21.061341Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047a7/r3tmp/tmpJeWxHu/pdisk_1.dat 2025-06-30T09:21:21.130306Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:21.132542Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670061483882205:2080] 1751275281058689 != 1751275281058692 TServer::EnableGrpc on GrpcPort 4085, node 1 2025-06-30T09:21:21.160697Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:21.160713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:21.160716Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:21.160762Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28129 2025-06-30T09:21:21.211165Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:21.211201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:21.212352Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28129 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:21.265482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:21.269015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:21:21.272837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.344520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.380967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.401346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.492928Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670061483883805:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.492950Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.558072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.579976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.593545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.609205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.620602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.636242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.653142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.720044Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670061483884464:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.720076Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.720151Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670061483884469:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.721025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:21.724379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:21.724460Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670061483884471:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:21.801534Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670061483884522:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:22.061937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.063669Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 8751, MsgBus: 7046 test_client.cpp: ... -06-30T09:21:26.931660Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:26.931668Z node 4 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:21:26.931675Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:21:26.931681Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:21:26.931767Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:21:26.931774Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:21:26.931788Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:21:26.931793Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:21:26.968688Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[4:7521670081959240698:2480];ev=NActors::IEventHandle;tablet_id=72075186224037922;tx_id=281474976715672;this=19406704896992;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275286967;max=18446744073709551615;plan=0;src=[4:7521670077664271067:2140];cookie=352:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=18;result=not_found; 2025-06-30T09:21:26.982154Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=18;result=not_found; 2025-06-30T09:21:26.985927Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:21:27.015475Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:27.027613Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Trying to start YDB, gRPC: 65228, MsgBus: 4873 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047a7/r3tmp/tmpD41LyT/pdisk_1.dat 2025-06-30T09:21:27.392390Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:27.402260Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:27.403099Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7521670085976684570:2080] 1751275287363131 != 1751275287363134 TServer::EnableGrpc on GrpcPort 65228, node 5 2025-06-30T09:21:27.412973Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:27.412986Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:27.412989Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:27.413047Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4873 2025-06-30T09:21:27.483336Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:27.483375Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:27.487275Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4873 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:27.572665Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:27.575674Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:27.939180Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521670085976685192:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.939212Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Trying to start YDB, gRPC: 18584, MsgBus: 16535 2025-06-30T09:21:28.344982Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670090504681064:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:28.345013Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047a7/r3tmp/tmp5vTll4/pdisk_1.dat 2025-06-30T09:21:28.361329Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18584, node 6 2025-06-30T09:21:28.371168Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:28.371182Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:28.371184Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:28.371230Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16535 TClient is connected to server localhost:16535 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:28.450131Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:28.450347Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:28.450362Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:28.451944Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:28.811171Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670090504681651:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.811195Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.816340Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670090504681671:2296] txid# 281474976715658, issues: { message: "sharding column name have to been primary key column: Value1" severity: 1 } |81.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator >> KqpOlapTypes::AttributeNegative [GOOD] >> KqpScheme::InvalidationAfterDropCreateTable2MultiStageTx [GOOD] >> KqpScheme::InvalidationAfterDropCreateTable2NoEffects |81.3%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator >> KqpSysColV1::InnerJoinTables >> KqpOlapScheme::DropColumn >> KqpScheme::CreateTableWithVectorIndexCaseIncentive [GOOD] >> KqpScheme::RenameTableWithVectorIndex [GOOD] >> KqpScheme::ResourcePoolClassifiersValidation >> TImportTests::CompletedImportEndTime [GOOD] >> TImportTests::CorruptedPermissions |81.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/sysview/unittest >> KqpAcl::AlterDatabasePrivilegesRequiredToChangeSchemeLimits+AsClusterAdmin [GOOD] >> KqpAcl::AlterDatabasePrivilegesRequiredToChangeSchemeLimits-AsClusterAdmin >> KqpScheme::BuildingUniqIndexDeniesTableModificationsSql [GOOD] >> KqpScheme::ChangefeedAwsRegion ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpOlapTypes::AttributeNegative [GOOD] Test command err: Trying to start YDB, gRPC: 2638, MsgBus: 21694 2025-06-30T09:21:22.435629Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670064735874892:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:22.439770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00478a/r3tmp/tmpDmq8Zg/pdisk_1.dat 2025-06-30T09:21:22.508532Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2638, node 1 2025-06-30T09:21:22.537592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:22.537607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:22.537609Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:22.537653Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21694 2025-06-30T09:21:22.587118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:22.587152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:22.591085Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21694 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:22.661476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:22.675280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 CREATE TABLE `/Root/TableWithFamily` (Key Uint64 NOT NULL, Value1 String FAMILY family1, Value2 Uint32 FAMILY family1, PRIMARY KEY (Key), FAMILY family1 (COMPRESSION="lz4")) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:21:22.959362Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670064735875473:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.959390Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:23.008820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:23.023355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670069030842831:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:23.023412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670069030842831:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:23.023477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670069030842831:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:23.023497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670069030842831:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:23.023514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670069030842831:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:23.023536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670069030842831:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:23.023554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670069030842831:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:23.023573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670069030842831:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:23.023594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670069030842831:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:23.023613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670069030842831:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:23.023637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670069030842831:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:23.023655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670069030842831:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:23.025094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:23.025108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:23.025124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:23.025130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:23.025154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:23.025160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:23.025175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:23.025180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:23.025189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:23.025194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:23.025222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:23.025227Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:23.025249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:23.025260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:23.025279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:23.025285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:23.025293Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id= ... 7859Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275288000, txId: 18446744073709551615] shutting down 2025-06-30T09:21:28.106940Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275288056, txId: 18446744073709551615] shutting down 2025-06-30T09:21:28.162994Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275288133, txId: 18446744073709551615] shutting down 2025-06-30T09:21:28.207860Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275288189, txId: 18446744073709551615] shutting down 2025-06-30T09:21:28.264376Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275288245, txId: 18446744073709551615] shutting down Trying to start YDB, gRPC: 6283, MsgBus: 21735 2025-06-30T09:21:28.611428Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670092339814784:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:28.611590Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00478a/r3tmp/tmpD2Yy0n/pdisk_1.dat 2025-06-30T09:21:28.628055Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6283, node 6 2025-06-30T09:21:28.650976Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:28.650994Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:28.650998Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:28.651056Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21735 TClient is connected to server localhost:21735 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:21:28.715096Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:28.715129Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:28.719664Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:28.719688Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:28.720961Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:28.726382Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.799423Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.829643Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.842974Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:29.092104Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670096634783549:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.092127Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.106654Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.118288Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.175626Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.186883Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.200717Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.213271Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.227513Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.244908Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670096634784204:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.244940Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.244965Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670096634784209:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.245680Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:29.254840Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670096634784211:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:29.326246Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670096634784262:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/console/ut/unittest >> TImmediateControlsConfiguratorTests::TestDynamicMap [GOOD] Test command err: 2025-06-30T09:20:40.954521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:40.954549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:40.954554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:40.954560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:40.954576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:40.954582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:40.954594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:40.954611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:40.954759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:40.954841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:40.959960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:40.959988Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:40.963314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:40.963378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:40.963418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046578944 2025-06-30T09:20:40.965437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:40.965588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:40.965702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:40.965821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: dc-1, pathId: [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:40.966639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:40.966688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:40.967011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:40.967024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:40.967095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:40.967133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046578944, domainId: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:40.967140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:40.967165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.017033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "hdd" } StoragePools { Name: "" Kind: "hdd-3" } StoragePools { Name: "" Kind: "hdd-1" } StoragePools { Name: "" Kind: "hdd-2" } } } TxId: 1 TabletId: 72057594046578944 , at schemeshard: 72057594046578944 2025-06-30T09:20:41.017116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //dc-1, opId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.017206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 0 2025-06-30T09:20:41.017213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046578944, LocalPathId: 1] source path: 2025-06-30T09:20:41.017277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046578944 2025-06-30T09:20:41.017298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:41.018353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046578944 PathId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.018419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //dc-1 2025-06-30T09:20:41.018493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.018508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046578944 2025-06-30T09:20:41.018515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:41.018522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:41.019160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.019178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046578944 2025-06-30T09:20:41.019185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:41.019661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.019672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.019679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.019688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:41.020401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046578944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:41.021019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046578944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:41.021071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:41.021335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.021344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:20:41.021348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.704728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.704808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046578944, at schemeshard: 72057594046578944 2025-06-30T09:20:41.704824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.704933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:41.704949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.705006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 1 2025-06-30T09:20:41.705024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:41.706165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:41.706189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046578944, txId: 1, path id: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:41.70624 ... ediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinInplacedSizeHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxInplacedSizeHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinInplacedSizeSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxInplacedSizeSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinOccupancyPerMille was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxOccupancyPerMille was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinLogChunkCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxLogChunkCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.MaxInProgressSyncCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TabletControls.MaxCommitRedoMB was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.SlowDiskThreshold was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.PredictedDelayMultiplier was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.LongRequestThresholdMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.MaxNumOfSlowDisks was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.SlowDiskThresholdHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.PredictedDelayMultiplierHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.MaxNumOfSlowDisksHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.SlowDiskThresholdSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.PredictedDelayMultiplierSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.MaxNumOfSlowDisksSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.RequestReportingSettings.BucketSize was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.RequestReportingSettings.LeakDurationMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.RequestReportingSettings.LeakRate was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control PDiskControls.MaxCommonLogChunksHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control PDiskControls.MaxCommonLogChunksSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control PDiskControls.UseNoopSchedulerHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control PDiskControls.UseNoopSchedulerSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control BlobStorageControllerControls.EnableSelfHealWithDegraded was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TableServiceControls.EnableMergeDatashardReads was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TestShardControls.DisableWrites was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. 2025-06-30T09:21:29.400311Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:193: StateInit, received event# 273481728, Sender [131:463:2400], Recipient [131:404:2359]: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionRequest 2025-06-30T09:21:29.400333Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:202: StateInit, processing event TEvConfigsDispatcher::TEvSetConfigSubscriptionRequest 2025-06-30T09:21:29.414168Z node 131 :BS_CONTROLLER DEBUG: {BSC19@console_interaction.cpp:74} Console proposed config response Response# {Status: ReverseCommit ConsoleConfigVersion: 0 YAML: "" } 2025-06-30T09:21:29.424782Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:193: StateInit, received event# 273285146, Sender [131:410:2359], Recipient [131:404:2359]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Generation: 1 Config { FeatureFlags { EnableExternalHive: false EnableColumnStatistics: false EnableScaleRecommender: true } } RawConsoleConfig { } } 2025-06-30T09:21:29.424807Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:199: StateInit, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-30T09:21:29.424845Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: AllowEditYamlInUiItem 2025-06-30T09:21:29.424857Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [131:440:2373]: Config { } ItemKinds: 75 Local: true 2025-06-30T09:21:29.424875Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: MonitoringConfigItem 2025-06-30T09:21:29.424880Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [131:405:2360]: Config { } ItemKinds: 10 Local: true 2025-06-30T09:21:29.424883Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: MonitoringConfigItem 2025-06-30T09:21:29.424894Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [131:408:2358]: Config { } ItemKinds: 10 Local: true 2025-06-30T09:21:29.424910Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: ImmediateControlsConfigItem 2025-06-30T09:21:29.424917Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [131:463:2400]: Config { } ItemKinds: 39 Local: true 2025-06-30T09:21:29.425668Z node 131 :TENANT_POOL DEBUG: tenant_pool.cpp:486: TDomainTenantPool(dc-1) Got new monitoring config: 2025-06-30T09:21:29.425690Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [131:405:2360], Recipient [131:404:2359]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:21:29.425695Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:21:29.425724Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [131:463:2400], Recipient [131:404:2359]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:21:29.425727Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:21:29.425734Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [131:408:2358], Recipient [131:404:2359]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:21:29.425740Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:21:29.425747Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [131:440:2373], Recipient [131:404:2359]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:21:29.425750Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse 2025-06-30T09:21:29.449394Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273285146, Sender [131:410:2359], Recipient [131:404:2359]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Generation: 1 Config { FeatureFlags { EnableExternalHive: false EnableColumnStatistics: false EnableScaleRecommender: true } ImmediateControlsConfig { GRpcControls { RequestConfigs { key: "FooBar" value { MaxInFlight: 10 } } } } Version { Items { Kind: 39 Id: 1 Generation: 1 } } } AffectedKinds: 39 RawConsoleConfig { ImmediateControlsConfig { GRpcControls { RequestConfigs { key: "FooBar" value { MaxInFlight: 10 } } } } Version { Items { Kind: 39 Id: 1 Generation: 1 } } } } 2025-06-30T09:21:29.449424Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:221: StateWork, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-30T09:21:29.449469Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:1036: Sending for kinds: ImmediateControlsConfigItem 2025-06-30T09:21:29.449491Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:361: Send TEvConsole::TEvConfigNotificationRequest to [131:463:2400]: Config { ImmediateControlsConfig { GRpcControls { RequestConfigs { key: "FooBar" value { MaxInFlight: 10 } } } } } ItemKinds: 39 Local: true 2025-06-30T09:21:29.449689Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:215: StateWork, received event# 273286162, Sender [131:463:2400], Recipient [131:404:2359]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationResponse { SubscriptionId: 0 ConfigId { } } 2025-06-30T09:21:29.449698Z node 131 :CONFIGS_DISPATCHER TRACE: configs_dispatcher.cpp:227: StateWork, processing event TEvConsole::TEvConfigNotificationResponse >> TImportTests::CorruptedPermissions [GOOD] >> TImportTests::ChangefeedsWithTablePermissions >> KqpScheme::CreateAndAlterTableWithBloomFilterUncompat [GOOD] >> KqpScheme::CreateAndAlterTableWithBloomFilterCompat |81.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_table_writer/unittest >> KqpScheme::InvalidationAfterDropCreateTable2 [GOOD] >> KqpScheme::FamilyColumnTest >> KqpAcl::AclDml-UseSink+IsOlap [GOOD] >> KqpAcl::AclDml+UseSink+IsOlap >> KqpConstraints::SerialTypeNegative1 [GOOD] >> KqpConstraints::SerialTypeForNonKeyColumn |81.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/sysview/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateTableWithVectorIndexCaseIncentive [GOOD] Test command err: Trying to start YDB, gRPC: 7905, MsgBus: 16506 2025-06-30T09:21:22.012081Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670063095060760:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:22.012592Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004799/r3tmp/tmpd9UPkW/pdisk_1.dat 2025-06-30T09:21:22.144987Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670063095060736:2080] 1751275282010300 != 1751275282010303 2025-06-30T09:21:22.146535Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7905, node 1 2025-06-30T09:21:22.174804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:22.174819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:22.174821Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:22.174872Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:22.195148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:22.195182Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:22.199111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16506 TClient is connected to server localhost:16506 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:22.280259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:22.290936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:22.303061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:22.383712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:22.412139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:22.436277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:22.598445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670063095062340:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.598473Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.658947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.678182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.694321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.738087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.804668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.875489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.890502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.901925Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670063095062999:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.901959Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.901971Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670063095063004:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.902693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:22.912129Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670063095063006:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:22.973665Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670063095063057:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:23.011256Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:23.131296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 30973, MsgBus: 19481 2025-06-30T09:21:23.346177Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670069328059610:2154];send_to=[0:73071995366581 ... esource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:28.363470Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 18348, MsgBus: 10051 2025-06-30T09:21:28.759071Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670090251402147:2152];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:28.760438Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004799/r3tmp/tmpWZjbVZ/pdisk_1.dat 2025-06-30T09:21:28.788341Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18348, node 6 2025-06-30T09:21:28.798988Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:28.799001Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:28.799004Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:28.799052Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10051 2025-06-30T09:21:28.860801Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:28.860840Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:28.862973Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10051 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:28.877922Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:28.934223Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.968654Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:29.032004Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:29.046075Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:29.327574Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670094546370929:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.327603Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.344575Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.359574Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.380389Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.393900Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.410452Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.427020Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.441390Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.460111Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670094546371586:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.460137Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.460296Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670094546371591:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.461325Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:29.466275Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:29.466414Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670094546371593:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:29.524167Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670094546371644:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:29.756620Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.762801Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> SelfHeal::ReassignThrottling [GOOD] >> SelfHeal::TestOneMaintenanceRequestMirror3dc >> KqpScheme::AddChangefeed [GOOD] >> KqpScheme::AddChangefeedNegative >> KqpScheme::UnknownFamilyTest [GOOD] >> KqpSecrets::RequireDbPrefixInSecretWithInvalidName >> KqpScheme::CreateTransfer_QueryService [GOOD] >> KqpScheme::CreateFamilyWithCompressionLevel [GOOD] >> KqpScheme::CreateExternalTable |81.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore/unittest >> KqpScheme::InvalidationAfterDropCreateTable2NoEffects [GOOD] >> KqpScheme::InvalidationAfterDropCreateTable2MultiStageTxNoEffects >> TConsoleTests::TestAuthorization [GOOD] >> TConsoleTests::TestAuthorizationExtSubdomain >> TConsoleTests::TestAttributes [GOOD] >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunning >> TChargeBTreeIndex::FewNodes_History [GOOD] >> TChargeBTreeIndex::FewNodes_Sticky |81.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore/unittest >> TNodeBrokerTest::Test999NodesSubscribers [GOOD] >> KqpScheme::ResourcePoolClassifiersValidation [GOOD] >> KqpScheme::ResourcePoolClassifiersRankValidation >> KqpOlapScheme::DropColumn [GOOD] >> KqpOlapScheme::DropColumnAfterAdd ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateTransfer_QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 22847, MsgBus: 1120 2025-06-30T09:21:21.753629Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670058855309736:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:21.755434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00479e/r3tmp/tmpStefBb/pdisk_1.dat 2025-06-30T09:21:21.835207Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:21.844563Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 22847, node 1 2025-06-30T09:21:21.859004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:21.859042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:21.864133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:21.874881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:21.874896Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:21.874899Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:21.874944Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1120 TClient is connected to server localhost:1120 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:21.991730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:22.007126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:21:22.015248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.061568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:22.128420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:22.174599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.499817Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670063150278575:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.499847Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.607660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.625517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.644334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.656871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.669584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.685379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.700615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.722780Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670063150279227:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.722800Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.722980Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670063150279232:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.724002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:22.727873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:22.728137Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670063150279234:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:22.755945Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:22.823861Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670063150279294:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:23.086634Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670067445246874:3579] txid# 281474976715673, issues: { message: "User already exists" severity: 1 } Trying to start YDB, gRPC: 4582, MsgBus: 11264 2025-06-30T09:21:23.459182Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670068521400308:2243];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00479e/r3tmp/tmpKt7UmG/pdis ... 5936Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:30.015998Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670099332113563:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:30.097685Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670099332113614:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:30.253703Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7521670099332113887:2475], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:4:87: Error: CONNECTION_STRING and ENDPOINT/DATABASE are mutually exclusive 2025-06-30T09:21:30.254498Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=6&id=NTU1MDdjMjktZjVhOGY1ZWUtZjM2MDZhZDctZmEyYTFiM2Q=, ActorId: [6:7521670099332113878:2469], ActorState: ExecuteState, TraceId: 01jz028fm50prw64ppxq8dzqrb, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:21:30.264901Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7521670099332113891:2477], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:4:87: Error: Neither CONNECTION_STRING nor ENDPOINT/DATABASE are provided 2025-06-30T09:21:30.265565Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=6&id=NTU1MDdjMjktZjVhOGY1ZWUtZjM2MDZhZDctZmEyYTFiM2Q=, ActorId: [6:7521670099332113878:2469], ActorState: ExecuteState, TraceId: 01jz028fmkbsy2bq6t4btcwts8, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:21:30.272276Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7521670099332113895:2479], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:4:87: Error: Neither CONNECTION_STRING nor ENDPOINT/DATABASE are provided 2025-06-30T09:21:30.273060Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=6&id=NTU1MDdjMjktZjVhOGY1ZWUtZjM2MDZhZDctZmEyYTFiM2Q=, ActorId: [6:7521670099332113878:2469], ActorState: ExecuteState, TraceId: 01jz028fmvdtmfez0041jpzhtn, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:21:30.279335Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7521670099332113899:2481], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:4:87: Error: TOKEN and USER/PASSWORD are mutually exclusive 2025-06-30T09:21:30.279425Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=6&id=NTU1MDdjMjktZjVhOGY1ZWUtZjM2MDZhZDctZmEyYTFiM2Q=, ActorId: [6:7521670099332113878:2469], ActorState: ExecuteState, TraceId: 01jz028fn309vw2spata8fb1wz, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:21:30.285439Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7521670099332113903:2483], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:4:87: Error: TOKEN and TOKEN_SECRET_NAME are mutually exclusive 2025-06-30T09:21:30.285527Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=6&id=NTU1MDdjMjktZjVhOGY1ZWUtZjM2MDZhZDctZmEyYTFiM2Q=, ActorId: [6:7521670099332113878:2469], ActorState: ExecuteState, TraceId: 01jz028fn9aa9eakgkw8mhwh80, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:21:30.289245Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7521670099332113907:2485], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:4:87: Error: PASSWORD and PASSWORD_SECRET_NAME are mutually exclusive 2025-06-30T09:21:30.289353Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=6&id=NTU1MDdjMjktZjVhOGY1ZWUtZjM2MDZhZDctZmEyYTFiM2Q=, ActorId: [6:7521670099332113878:2469], ActorState: ExecuteState, TraceId: 01jz028fneapaaa07s6fa011v6, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:21:30.294068Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7521670099332113911:2487], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:4:87: Error: USER is not provided 2025-06-30T09:21:30.294227Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=6&id=NTU1MDdjMjktZjVhOGY1ZWUtZjM2MDZhZDctZmEyYTFiM2Q=, ActorId: [6:7521670099332113878:2469], ActorState: ExecuteState, TraceId: 01jz028fnjbzhxzfj9r03pkm7s, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:21:30.300014Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7521670099332113915:2489], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:4:87: Error: PASSWORD or PASSWORD_SECRET_NAME are not provided 2025-06-30T09:21:30.300181Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=6&id=NTU1MDdjMjktZjVhOGY1ZWUtZjM2MDZhZDctZmEyYTFiM2Q=, ActorId: [6:7521670099332113878:2469], ActorState: ExecuteState, TraceId: 01jz028fnq7rcf6c55615ywg67, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:21:30.305904Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7521670099332113919:2491], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:4:87: Error: batch_size_bytes must be greater than 0 but 0 2025-06-30T09:21:30.306527Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=6&id=NTU1MDdjMjktZjVhOGY1ZWUtZjM2MDZhZDctZmEyYTFiM2Q=, ActorId: [6:7521670099332113878:2469], ActorState: ExecuteState, TraceId: 01jz028fnxd0scw6d4wy9hcdrn, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:21:30.311788Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7521670099332113923:2493], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:4:87: Error: flush_interval must be Interval 2025-06-30T09:21:30.311978Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=6&id=NTU1MDdjMjktZjVhOGY1ZWUtZjM2MDZhZDctZmEyYTFiM2Q=, ActorId: [6:7521670099332113878:2469], ActorState: ExecuteState, TraceId: 01jz028fp379evq2trm23dqnt3, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:21:30.316903Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7521670099332113927:2495], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:4:87: Error: consumer must be not empty 2025-06-30T09:21:30.317021Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=6&id=NTU1MDdjMjktZjVhOGY1ZWUtZjM2MDZhZDctZmEyYTFiM2Q=, ActorId: [6:7521670099332113878:2469], ActorState: ExecuteState, TraceId: 01jz028fp9517asx5h58rqf85b, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:21:30.322521Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670099332113935:3597] txid# 281474976715672, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-30T09:21:30.322618Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=6&id=NTU1MDdjMjktZjVhOGY1ZWUtZjM2MDZhZDctZmEyYTFiM2Q=, ActorId: [6:7521670099332113878:2469], ActorState: ExecuteState, TraceId: 01jz028fpe38pphahva5xc63kk, Create QueryResponse for error on request, msg: 2025-06-30T09:21:30.328324Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.414138Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTransfer, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp:473) 2025-06-30T09:21:30.470703Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:30.475154Z node 6 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:21:30.478890Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:30.482369Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTransfer, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp:473) 2025-06-30T09:21:30.484044Z node 6 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:21:30.494303Z node 6 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:21:30.496179Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:21:30.498889Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTransfer, opId: 281474976715680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp:473) 2025-06-30T09:21:30.504019Z node 6 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:21:30.511451Z node 6 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:21:30.517836Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) |81.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_cluster_discovery/ut/unittest |81.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |81.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity >> TImportTests::ChangefeedsWithTablePermissions [GOOD] >> KqpAcl::AlterDatabasePrivilegesRequiredToChangeSchemeLimits-AsClusterAdmin [GOOD] >> KqpScheme::DropResourcePoolClassifier [GOOD] >> KqpScheme::DropNonExistingResourcePoolClassifier >> KqpScheme::ChangefeedAwsRegion [GOOD] >> KqpScheme::ChangefeedRetentionPeriod >> TCdcStreamWithRebootsTests::CreateStreamWithInitialScan[TabletReboots] >> KqpSysColV1::InnerJoinTables [GOOD] >> TOlapReboots::CreateMultipleStandaloneTables >> TOlapReboots::DropMultipleTables >> KqpScheme::CreateAndAlterTableWithBloomFilterCompat [GOOD] >> KqpScheme::CreateAndAlterTableComplex >> KqpConstraints::IndexAutoChooseAndNonReadyIndex [GOOD] >> KqpConstraints::IndexedTableAndNotNullColumnAddNotNullColumn ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::Test999NodesSubscribers [GOOD] Test command err: 2025-06-30T09:20:52.993496Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.998041Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.998097Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.998129Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.998163Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.998194Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:52.998217Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.002844Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.002974Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.003033Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.003093Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.003160Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.003205Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.003244Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.003340Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.003394Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.003435Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.004899Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.004974Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.005013Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.005050Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.005086Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.005177Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.006660Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.006846Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.006913Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.006964Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.007022Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.007050Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.007075Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.007096Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.007966Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.008109Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.008156Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.008195Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.008225Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.008288Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.008341Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:53.008430Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.008474Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.009607Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.009659Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.009694Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.009833Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.010000Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.015468Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.015543Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.015559Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.015723Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.015924Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.016010Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.016415Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.016634Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.016701Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.017030Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.017224Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.017635Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.018220Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.020643Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.020966Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.022329Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.022627Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:53.040619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:53.040644Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:53.046208Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:53.046709Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:53.046837Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:53.047092Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:53.047675Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:53.047715Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:53.047765Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:53.047782Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:53.047788Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:53.047805Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:53.047843Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:53.047850Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:53.047855Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-30T09:20:53.047863Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:20:53.047888Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:134 ... 025-06-30T09:21:29.655947Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:29.671129Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.671169Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.671186Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5579:7159] 2025-06-30T09:21:29.676140Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9681:11208], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:29.676240Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:29.676249Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:29.676267Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:29.697134Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.697168Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.697187Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5584:7164] 2025-06-30T09:21:29.702158Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9683:11210], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:29.702272Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:29.702282Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:29.702301Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:29.715602Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.715629Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.715643Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5589:7169] 2025-06-30T09:21:29.719126Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9685:11212], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:29.719195Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:29.719200Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:29.719214Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:29.731956Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.731983Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.731997Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5594:7174] 2025-06-30T09:21:29.735244Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9687:11214], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:29.735328Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:29.735333Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:29.735349Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:29.750283Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.750320Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.750352Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5599:7179] 2025-06-30T09:21:29.756257Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9689:11216], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:29.756364Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:29.756374Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:29.756392Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:29.769977Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.770013Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.770030Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5604:7184] 2025-06-30T09:21:29.779327Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9691:11218], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:29.779478Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:29.779491Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:29.779509Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:29.795116Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.795153Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.795170Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5609:7189] 2025-06-30T09:21:29.800426Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9693:11220], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:29.800505Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:29.800513Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:29.800530Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:29.813519Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.813552Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.813581Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5614:7194] 2025-06-30T09:21:29.818099Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9695:11222], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:29.818296Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:29.818304Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:29.818320Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:29.834510Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.834545Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.834566Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5619:7199] 2025-06-30T09:21:29.842428Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:9697:11224], Recipient [1:567:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:21:29.842529Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:633:2213], Recipient [1:567:2185]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:21:29.842537Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:21:29.842555Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.3 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-30T09:21:29.857549Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [1:567:2185]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.857585Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-30T09:21:29.857605Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v2 -> v3 to [1:5624:7204] >> TOlapReboots::DropTableThenStore ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpAcl::AlterDatabasePrivilegesRequiredToChangeSchemeLimits-AsClusterAdmin [GOOD] Test command err: Trying to start YDB, gRPC: 14702, MsgBus: 17478 2025-06-30T09:21:20.633610Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670054339774934:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:20.633633Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047b5/r3tmp/tmpybvMlJ/pdisk_1.dat 2025-06-30T09:21:20.818200Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:20.820277Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670054339774903:2080] 1751275280633439 != 1751275280633442 TServer::EnableGrpc on GrpcPort 14702, node 1 2025-06-30T09:21:20.863001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:20.863016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:20.863019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:20.863069Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:20.871196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:20.871229Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:20.871762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17478 TClient is connected to server localhost:17478 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:20.952158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:20.959590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:20.966238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:20.968250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.016203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.067975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.085192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.275823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670058634743806:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.275846Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.337946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.353292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.366810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.424632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.436545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.451310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.466300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.487964Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670058634744461:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.488023Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.488215Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670058634744466:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.489293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:21.493462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715670, at schemeshard: 72057594046644480 2025-06-30T09:21:21.493532Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670058634744468:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2025-06-30T09:21:21.564909Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670058634744519:3408] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:21.642797Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:21.772546Z node 1 :TX_PROXY_SCHEME_CACHE WARN: ... 8 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [8:7521670100069656450:2844] txid# 281474976715662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715662 TabletId# 72057594046644480} 2025-06-30T09:21:30.880582Z node 8 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [8:7521670100069656450:2844] txid# 281474976715662 HANDLE EvClientConnected 2025-06-30T09:21:30.889056Z node 8 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [8:7521670100069656450:2844] txid# 281474976715662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715662} 2025-06-30T09:21:30.889081Z node 8 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [8:7521670100069656450:2844] txid# 281474976715662 SEND to# [8:7521670100069656449:2291] Source {TEvProposeTransactionStatus txid# 281474976715662 Status# 48} 2025-06-30T09:21:30.928187Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:30.926557Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [8:7521670100069655331:2139] Handle TEvProposeTransaction 2025-06-30T09:21:30.926571Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [8:7521670100069655331:2139] TxId# 281474976715663 ProcessProposeTransaction 2025-06-30T09:21:30.926581Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [8:7521670100069655331:2139] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [8:7521670100069656491:2866] 2025-06-30T09:21:30.927490Z node 8 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [8:7521670100069656491:2866] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpModifyACL ModifyACL { Name: "Test" NewOwner: "databaseadmin" } ApplyIf { PathTypes: EPathTypeSubDomain PathTypes: EPathTypeExtSubDomain } } } UserToken: "\n\025cluster_admin@builtin\022\030\022\026\n\024all-users@well-known\032\025cluster_admin@builtin\"\007Builtin*\027clus****ltin (2AB0E265)" DatabaseName: "" RequestType: "" PeerName: "ipv6:[::1]:54898" 2025-06-30T09:21:30.927507Z node 8 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [8:7521670100069656491:2866] txid# 281474976715663 Bootstrap, UserSID: cluster_admin@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:21:30.927512Z node 8 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [8:7521670100069656491:2866] txid# 281474976715663 Bootstrap, UserSID: cluster_admin@builtin IsClusterAdministrator: 1 2025-06-30T09:21:30.927525Z node 8 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [8:7521670100069656491:2866] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:21:30.927621Z node 8 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [8:7521670100069656491:2866] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:21:30.927654Z node 8 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [8:7521670100069656491:2866] HANDLE EvNavigateKeySetResult, txid# 281474976715663 shardToRequest# 72075186224037897 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 2] DomainInfo.Params# Version: 3 PlanResolution: 10 Coordinators: 72075186224037895 Coordinators: 72075186224037893 Coordinators: 72075186224037896 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037892 Mediators: 72075186224037890 Mediators: 72075186224037889 SchemeShard: 72075186224037897 Hive: 72075186224037888 SysViewProcessor: 72075186224037891 StatisticsAggregator: 72075186224037894 RedirectRequired# true 2025-06-30T09:21:30.927666Z node 8 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [8:7521670100069656491:2866] txid# 281474976715663 SEND to# 72075186224037897 shardToRequest {TEvModifySchemeTransaction txid# 281474976715663 TabletId# 72075186224037897} 2025-06-30T09:21:30.927894Z node 8 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [8:7521670100069656491:2866] txid# 281474976715663 HANDLE EvClientConnected 2025-06-30T09:21:30.929602Z node 8 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [8:7521670100069656491:2866] txid# 281474976715663 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715663} 2025-06-30T09:21:30.929617Z node 8 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [8:7521670100069656491:2866] txid# 281474976715663 SEND to# [8:7521670100069656490:2291] Source {TEvProposeTransactionStatus txid# 281474976715663 Status# 48} 2025-06-30T09:21:30.944518Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [8:7521670100069655331:2139] Handle TEvNavigate describe path /Root/Test 2025-06-30T09:21:30.946424Z node 8 :TX_PROXY DEBUG: describe.cpp:272: Actor# [8:7521670100069656504:2873] HANDLE EvNavigateScheme /Root/Test 2025-06-30T09:21:30.946527Z node 8 :TX_PROXY DEBUG: describe.cpp:356: Actor# [8:7521670100069656504:2873] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-30T09:21:30.946567Z node 8 :TX_PROXY DEBUG: describe.cpp:435: Actor# [8:7521670100069656504:2873] SEND to# 72075186224037897 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/Test" Options { ShowPrivateTable: false } 2025-06-30T09:21:30.947252Z node 8 :TX_PROXY DEBUG: describe.cpp:448: Actor# [8:7521670100069656504:2873] Handle TEvDescribeSchemeResult Forward to# [8:7521670100069656502:2320] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 0 Record# Status: StatusSuccess Path: "/Root/Test" PathDescription { Self { Name: "Root/Test" PathId: 1 SchemeshardId: 72075186224037897 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "databaseadmin" ACL: "" EffectiveACL: "\n \010\001\020\200\010\032\025cluster_admin@builtin \003(\001" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 1 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 2 ProcessingParams { Version: 3 PlanResolution: 10 Coordinators: 72075186224037895 Coordinators: 72075186224037893 Coordinators: 72075186224037896 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037892 Mediators: 72075186224037890 Mediators: 72075186224037889 SchemeShard: 72075186224037897 Hive: 72075186224037888 SysViewProcessor: 72075186224037891 StatisticsAggregator: 72075186224037894 } DomainKey { SchemeShard: 72057594046644480 PathId: 2 } StoragePools { Name: "/Root/Test:hdd1" Kind: "hdd1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 10 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root/Test" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186224037897 2025-06-30T09:21:30.952690Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [8:7521670100069655331:2139] Handle TEvProposeTransaction 2025-06-30T09:21:30.952706Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [8:7521670100069655331:2139] TxId# 281474976715664 ProcessProposeTransaction 2025-06-30T09:21:30.952717Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [8:7521670100069655331:2139] Cookie# 0 userReqId# "" txid# 281474976715664 SEND to# [8:7521670100069656516:2878] 2025-06-30T09:21:30.953524Z node 8 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [8:7521670100069656516:2878] txid# 281474976715664 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "Test" SchemeLimits { MaxPaths: 10 MaxShards: 20 } } } } UserToken: "\n\rdatabaseadmin\022\030\022\026\n\024all-users@well-known\032\335\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9Sb290Il0sImV4cCI6MTc1MTMxODQ5MCwiaWF0IjoxNzUxMjc1MjkwLCJzdWIiOiJkYXRhYmFzZWFkbWluIn0.JHgjPpOgc9TZJe4QilzPcPhtd3nriXzeYJ23w4fECg2BWHwcrEjnLMnoJ6pCmlxtIjVw5q9z-a5lt_C6GPPiYAPPghKmZnzck3pzQzRifWIr7rn4Nm7sp0plDoOcAnEBwE1lKgBjK619XOnqrvLggGT37_Jk1HnS3LWFdcEHGapylTNsiBh4OgzrO6T8jkPeieS9bOchkjGQXOYKCqXxvbewwLgNUnuXc9zZFDIIzGqHpu26Oo3iKijGzvJoJURbwkVKY8y5Dh7b3CAyZEvk3Gvly-cUHFBjqEX_17P08FQCIpZxcB23_bCmig7IR6wGqlXpN-DaJ9JqMVTGsIqLHw\"\005Login*\211\001eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9Sb290Il0sImV4cCI6MTc1MTMxODQ5MCwiaWF0IjoxNzUxMjc1MjkwLCJzdWIiOiJkYXRhYmFzZWFkbWluIn0.**" DatabaseName: "" RequestType: "" PeerName: "ipv6:[::1]:54898" 2025-06-30T09:21:30.953540Z node 8 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [8:7521670100069656516:2878] txid# 281474976715664 Bootstrap, UserSID: databaseadmin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:21:30.953547Z node 8 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [8:7521670100069656516:2878] txid# 281474976715664 Bootstrap, UserSID: databaseadmin IsClusterAdministrator: 0 2025-06-30T09:21:30.953561Z node 8 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [8:7521670100069656516:2878] txid# 281474976715664 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:21:30.953818Z node 8 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [8:7521670100069656516:2878] txid# 281474976715664 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:21:30.953826Z node 8 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [8:7521670100069656516:2878] txid# 281474976715664, Access denied for databaseadmin on path /Root/Test, only cluster admins can alter databases 2025-06-30T09:21:30.953853Z node 8 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [8:7521670100069656516:2878] txid# 281474976715664, issues: { message: "Access denied for databaseadmin on path /Root/Test" issue_code: 200000 severity: 1 } 2025-06-30T09:21:30.953859Z node 8 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [8:7521670100069656516:2878] txid# 281474976715664 SEND to# [8:7521670100069656515:2309] Source {TEvProposeTransactionStatus Status# 5} 2025-06-30T09:21:30.954956Z node 8 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=8&id=ODE1ZmQ5Ny1iZWJmNTczYS01Mjg3YjY3YS00YThkNjkzYQ==, ActorId: [8:7521670100069656476:2309], ActorState: ExecuteState, TraceId: 01jz028ga58z7e3s4830862n56, Create QueryResponse for error on request, msg: 2025-06-30T09:21:30.955336Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [8:7521670100069655331:2139] Handle TEvExecuteKqpTransaction 2025-06-30T09:21:30.955348Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [8:7521670100069655331:2139] TxId# 281474976715665 ProcessProposeKqpTransaction 2025-06-30T09:21:30.955496Z node 8 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz028ga58z7e3s4830862n56, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=8&id=ODE1ZmQ5Ny1iZWJmNTczYS01Mjg3YjY3YS00YThkNjkzYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:30.964895Z node 8 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 9 2025-06-30T09:21:30.965029Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connected -> Disconnected >> KqpConstraints::SerialTypeForNonKeyColumn [GOOD] >> KqpConstraints::SerialTypeSmallSerial >> TOlapReboots::DropMultipleStandaloneTables >> VectorIndexBuildTestReboots::BaseCase+Prefixed[TabletReboots] [GOOD] >> KqpScheme::CreateExternalTable [GOOD] >> KqpScheme::FamilyColumnTest [GOOD] >> KqpScheme::Int8Int16 >> KqpScheme::CreateExternalTableCheckPrimaryKey ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::InnerJoinTables [GOOD] Test command err: Trying to start YDB, gRPC: 8129, MsgBus: 7393 2025-06-30T09:21:29.937417Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670096224046024:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:29.938818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004683/r3tmp/tmpVVHCOZ/pdisk_1.dat 2025-06-30T09:21:30.016890Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:30.016978Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670096224045887:2080] 1751275289931620 != 1751275289931623 TServer::EnableGrpc on GrpcPort 8129, node 1 2025-06-30T09:21:30.030582Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:30.030618Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:30.031629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:30.033708Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:30.033717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:30.033720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:30.033775Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7393 TClient is connected to server localhost:7393 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:30.123013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:21:30.131365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.164362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:30.198785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:30.218232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:30.460303Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670100519014786:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.460341Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.509103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.519466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.580599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.595247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.607020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.621382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.635849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.678155Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670100519015440:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.678194Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.678406Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670100519015445:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.679753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:30.683892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:30.683996Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670100519015447:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:30.779339Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670100519015498:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:30.930491Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpScheme::AddChangefeedNegative [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_restore/unittest >> TImportTests::ChangefeedsWithTablePermissions [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:21:04.378044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:04.378069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.378074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:04.378077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:04.378082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:04.378085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:04.378100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.378113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:04.378214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:04.378279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:04.390392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:21:04.390421Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:04.393638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:04.393700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:04.393754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:04.395446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:04.395507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:04.395612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.395665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:04.396253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.396288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:04.396513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.396521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.396542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:04.396548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:04.396554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:04.396569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.397725Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:04.413935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:04.414009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.414091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:04.414097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:04.414144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:04.414154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:04.414806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.414846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:04.414908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.414918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:04.414924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:04.414929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:04.415291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.415301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:04.415309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:04.415596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.415605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.415611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.415619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:04.416083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:04.416407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:04.416437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:04.416577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.416595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:04.416600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.416644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:04.416649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.416672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:04.416680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:04.416972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.416978Z node 1 :FLAT_TX_SCHEMESHARD ... 2393Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/updates_feed2" PathDescription { Self { Name: "updates_feed2" PathId: 7 SchemeshardId: 72057594046678944 PathType: EPathTypeCdcStream CreateFinished: true CreateTxId: 281474976710763 CreateStep: 5000008 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "\n\020\010\001\020\211\004\032\005alice \003(\001\n\021\010\001\020\366\213\001\032\005alice \003(\001\n\016\010\001\020\211\004\032\003bob \003(\001" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 CdcStreamVersion: 1 } ChildrenExist: true } Children { Name: "streamImpl" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976710763 CreateStep: 5000008 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409552 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } CdcStreamDescription { Name: "updates_feed2" Mode: ECdcStreamModeUpdate PathId { OwnerId: 72057594046678944 LocalId: 7 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatJson VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } } PathId: 7 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:31.202438Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/updates_feed2/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:31.202454Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/updates_feed2/streamImpl" took 17us result status StatusSuccess 2025-06-30T09:21:31.202524Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/updates_feed2/streamImpl" PathDescription { Self { Name: "streamImpl" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976710763 CreateStep: 5000008 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "\n\020\010\001\020\211\004\032\005alice \003(\001\n\021\010\001\020\366\213\001\032\005alice \003(\001\n\016\010\001\020\211\004\032\003bob \003(\001" PathVersion: 3 PathSubType: EPathSubTypeStreamImpl Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "streamImpl" PathId: 8 TotalGroupCount: 1 PartitionPerTablet: 2 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 } TopicName: "updates_feed2" TopicPath: "/MyRoot/Table/updates_feed2/streamImpl" YdbDatabasePath: "/MyRoot" PartitionKeySchema { Name: "key" TypeId: 4608 } MeteringMode: METERING_MODE_REQUEST_UNITS Consumers { Name: "my_consumer" } } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:31.202579Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/updates_feed3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:31.202602Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/updates_feed3" took 24us result status StatusSuccess 2025-06-30T09:21:31.202657Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/updates_feed3" PathDescription { Self { Name: "updates_feed3" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeCdcStream CreateFinished: true CreateTxId: 281474976710759 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "\n\020\010\001\020\211\004\032\005alice \003(\001\n\021\010\001\020\366\213\001\032\005alice \003(\001\n\016\010\001\020\211\004\032\003bob \003(\001" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 CdcStreamVersion: 1 } ChildrenExist: true } Children { Name: "streamImpl" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976710759 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409548 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } CdcStreamDescription { Name: "updates_feed3" Mode: ECdcStreamModeUpdate PathId { OwnerId: 72057594046678944 LocalId: 3 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatJson VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:31.202705Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/updates_feed3/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:31.202719Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/updates_feed3/streamImpl" took 16us result status StatusSuccess 2025-06-30T09:21:31.202811Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/updates_feed3/streamImpl" PathDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976710759 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "\n\020\010\001\020\211\004\032\005alice \003(\001\n\021\010\001\020\366\213\001\032\005alice \003(\001\n\016\010\001\020\211\004\032\003bob \003(\001" PathVersion: 3 PathSubType: EPathSubTypeStreamImpl Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409548 } PersQueueGroup { Name: "streamImpl" PathId: 4 TotalGroupCount: 1 PartitionPerTablet: 2 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 } TopicName: "updates_feed3" TopicPath: "/MyRoot/Table/updates_feed3/streamImpl" YdbDatabasePath: "/MyRoot" PartitionKeySchema { Name: "key" TypeId: 4608 } MeteringMode: METERING_MODE_REQUEST_UNITS Consumers { Name: "my_consumer" } } Partitions { PartitionId: 0 TabletId: 72075186233409547 Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409548 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TOlapReboots::CreateDropTable >> KqpScheme::InvalidationAfterDropCreateTable2MultiStageTxNoEffects [GOOD] >> KqpScheme::ModifyPermissions |81.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> KqpOlapScheme::DropColumnAfterAdd [GOOD] >> KqpOlapScheme::DropColumnErrors ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AddChangefeedNegative [GOOD] Test command err: Trying to start YDB, gRPC: 22592, MsgBus: 20424 2025-06-30T09:21:08.113493Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670005522641556:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:08.113524Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004833/r3tmp/tmpCiTEUa/pdisk_1.dat TServer::EnableGrpc on GrpcPort 22592, node 1 2025-06-30T09:21:08.171573Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:08.183306Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:08.183327Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:08.183329Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:08.183377Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20424 2025-06-30T09:21:08.215571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:08.215606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:08.216720Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20424 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:08.254387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... CREATE TABLE `/Root/ColumnTableTest` (id Int64 NOT NULL, timestamp Timestamp NOT NULL, ui64_type Uint64 NOT NULL, PRIMARY KEY (id)) PARTITION BY HASH(id) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:21:08.590317Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670005522642153:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.590344Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:08.628929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:08.641767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670005522642216:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:08.641854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670005522642216:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:08.641915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670005522642216:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:08.641941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670005522642216:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:08.641963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670005522642216:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:08.641993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670005522642216:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:08.642017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670005522642216:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:08.642040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670005522642216:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:08.642067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670005522642216:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:08.642091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670005522642216:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:08.642117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670005522642216:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:08.642142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670005522642216:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:08.642753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:08.642763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:08.642778Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:08.642784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:08.642807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:08.642812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:08.642826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:08.642831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:08.642840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:08.642845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:08.642872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:08.642877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:08.642901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:08.642909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:08.642925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:08.642930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:08.642938Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:21:08.642946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=7207518622403788 ... cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:30.963447Z node 18 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:30.964748Z node 18 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [18:7521670099827174401:2080] 1751275290919328 != 1751275290919331 TServer::EnableGrpc on GrpcPort 18951, node 18 2025-06-30T09:21:30.983381Z node 18 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:30.983394Z node 18 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:30.983396Z node 18 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:30.983437Z node 18 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3676 2025-06-30T09:21:31.039154Z node 18 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:31.039184Z node 18 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:31.040219Z node 18 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3676 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:31.058329Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:31.065280Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:31.082323Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:31.112694Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:31.170725Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:31.343657Z node 18 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [18:7521670104122143299:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:31.343688Z node 18 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:31.350113Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.358921Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.369519Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.386293Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.400134Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.411738Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.427617Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.444074Z node 18 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [18:7521670104122143953:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:31.444107Z node 18 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:31.444210Z node 18 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [18:7521670104122143958:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:31.445160Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:31.453018Z node 18 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [18:7521670104122143960:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:31.523394Z node 18 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [18:7521670104122144011:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:31.739518Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.768492Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.780849Z node 18 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [18:7521670104122144455:3679] txid# 281474976715674, issues: { message: "DYNAMODB_STREAMS_JSON format incompatible with specified stream mode" severity: 1 } 2025-06-30T09:21:31.786067Z node 18 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [18:7521670104122144472:3687] txid# 281474976715675, issues: { message: "DEBEZIUM_JSON format incompatible with specified stream mode" severity: 1 } 2025-06-30T09:21:31.923737Z node 18 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |81.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest |81.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> KqpScheme::ChangefeedRetentionPeriod [GOOD] >> KqpSecrets::RequireDbPrefixInSecretWithInvalidName [GOOD] >> KqpScheme::ChangefeedAttributes >> KqpSecrets::RequireDbPrefixInSecretWithValidName >> KqpAcl::AclDml+UseSink+IsOlap [GOOD] >> KqpAcl::AclRevoke+UseSink+IsOlap |81.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> KqpScheme::CreateAndAlterTableComplex [GOOD] >> KqpScheme::CreateAlterUserWithHash >> KqpConstraints::SerialTypeSmallSerial [GOOD] >> KqpConstraints::SerialTypeSerial2 >> LocalTableWriter::WaitTxIds >> DstCreator::ExistingDst >> DstCreator::ColumnsSizeMismatch >> TNetClassifierUpdaterTest::TestFiltrationByNetboxTags [GOOD] >> KqpScheme::CreateExternalTableCheckPrimaryKey [GOOD] >> KqpScheme::CreateExternalTableValidation >> KqpOlapScheme::DropColumnErrors [GOOD] >> KqpOlapScheme::DropColumnAfterInsert >> KqpScheme::DropNonExistingResourcePoolClassifier [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query [GOOD] >> KqpScheme::Int8Int16 [GOOD] >> KqpScheme::Int8Int16Olap >> DstCreator::GlobalConsistency >> DstCreator::SameOwner >> KqpScheme::ChangefeedAttributes [GOOD] >> KqpScheme::ChangefeedOnIndexTable >> KqpScheme::ResourcePoolClassifiersRankValidation [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/console/ut/unittest >> TNetClassifierUpdaterTest::TestFiltrationByNetboxTags [GOOD] Test command err: 2025-06-30T09:20:40.662118Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669882904223761:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:40.662144Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpKstwAW/pdisk_1.dat 2025-06-30T09:20:40.708364Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:40.709677Z node 1 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#26,[::1]:15835) connection closed with error: Connection refused 2025-06-30T09:20:40.709798Z node 1 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:20:40.763742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:40.763782Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:40.764843Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:41.664627Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:42.915707Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669890814374491:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:42.915724Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpLBZSTd/pdisk_1.dat 2025-06-30T09:20:42.930160Z node 2 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#28,[::1]:4320) connection closed with error: Connection refused 2025-06-30T09:20:42.930244Z node 2 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:20:42.931191Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:43.019015Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:43.019057Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:43.020177Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:43.917430Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:45.206222Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521669904822524091:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:45.206252Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpp1kJcO/pdisk_1.dat 2025-06-30T09:20:45.222122Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:45.225184Z node 3 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#26,[::1]:23438) connection closed with error: Connection refused 2025-06-30T09:20:45.225276Z node 3 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:20:45.310866Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:45.310914Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:45.311862Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:46.208591Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:47.474651Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521669914130827519:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:47.474663Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmp97YP1S/pdisk_1.dat 2025-06-30T09:20:47.488403Z node 4 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#28,[::1]:13119) connection closed with error: Connection refused 2025-06-30T09:20:47.488542Z node 4 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:20:47.488906Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:47.489109Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521669914130827498:2080] 1751275247474553 != 1751275247474556 2025-06-30T09:20:47.578951Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:47.578974Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:47.579919Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:48.476761Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:49.740481Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521669922934527252:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:49.740506Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpu25wgy/pdisk_1.dat 2025-06-30T09:20:49.752487Z node 5 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#30,[::1]:32336) connection closed with error: Connection refused 2025-06-30T09:20:49.752581Z node 5 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:20:49.754469Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:49.754668Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7521669922934527233:2080] 1751275249740376 != 1751275249740379 2025-06-30T09:20:49.844014Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:49.844047Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:49.845005Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:50.741854Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:52.012039Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521669935683457963:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:52.012059Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpEFk2ST/pdisk_1.dat 2025-06-30T09:20:52.022872Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:52.022992Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7521669935683457941:2080] 1751275252011938 != 1751275252011941 2025-06-30T09:20:52.025055Z node 6 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#32,[::1]:2966) connection closed with error: Connection refused 2025-06-30T09:20:52.025168Z node 6 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:20:52.115781Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:52.115827Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:52.116793Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:53.013581Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:54.274234Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521669945100791789:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:54.274251Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpg3SUKW/pdisk_1.dat 2025-06-30T09:20:54.286089Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:54.286244Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7521669945100791770:2080] 1751275254274148 != 1751275254274151 2025-06-30T09:20:54.288294Z node 7 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#34,[::1]:1469) connection closed with error: Connection refused 2025-06-30T09:20:54.288366Z node 7 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:20:54.377717Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:20:54.377742Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:20:54.378827Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:20:55.275625Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:20:56.542126Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7521669952025975145:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:20:56.5421 ... :14.983306Z node 16 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#38,[::1]:23110) connection closed with error: Connection refused 2025-06-30T09:21:14.983412Z node 16 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:21:14.987915Z node 16 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [16:7521670029035546777:2080] 1751275274921527 != 1751275274921530 2025-06-30T09:21:14.989361Z node 16 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:15.051214Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:15.051254Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:15.051864Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:15.923727Z node 16 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpiwXYPt/pdisk_1.dat 2025-06-30T09:21:17.300263Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:17.311477Z node 17 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#26,[::1]:28078) connection closed with error: Connection refused 2025-06-30T09:21:17.311571Z node 17 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:21:17.315557Z node 17 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:17.400152Z node 17 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:17.400196Z node 17 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:17.401510Z node 17 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:18.301231Z node 17 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpHI7Siu/pdisk_1.dat 2025-06-30T09:21:19.650866Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:19.651485Z node 18 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#28,[::1]:22294) connection closed with error: Connection refused 2025-06-30T09:21:19.655096Z node 18 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:21:19.655334Z node 18 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:19.727579Z node 18 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:19.727616Z node 18 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:19.729509Z node 18 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:20.627142Z node 18 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpJ8KKUd/pdisk_1.dat 2025-06-30T09:21:22.012201Z node 19 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#30,[::1]:31010) connection closed with error: Connection refused 2025-06-30T09:21:22.014553Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:22.014988Z node 19 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:21:22.022153Z node 19 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:22.024316Z node 19 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [19:7521670059315032710:2080] 1751275281955535 != 1751275281955538 2025-06-30T09:21:22.079328Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:22.079360Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:22.081816Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:22.954948Z node 19 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:24.250252Z node 20 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[20:7521670073393315538:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:24.250270Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpOnWjiv/pdisk_1.dat 2025-06-30T09:21:24.285034Z node 20 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#32,[::1]:5992) connection closed with error: Connection refused 2025-06-30T09:21:24.285736Z node 20 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:21:24.285874Z node 20 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:24.353987Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:24.354018Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:24.354949Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:25.250644Z node 20 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:26.614958Z node 21 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[21:7521670082437569265:2173];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:26.615108Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpl4MnNK/pdisk_1.dat 2025-06-30T09:21:26.632789Z node 21 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:26.636141Z node 21 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#34,[::1]:25754) connection closed with error: Connection refused 2025-06-30T09:21:26.636716Z node 21 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:21:26.718363Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:26.718400Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:26.723236Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:27.615596Z node 21 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:28.944451Z node 22 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[22:7521670088499394228:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:28.945505Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpCVA4bx/pdisk_1.dat 2025-06-30T09:21:28.960418Z node 22 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#36,[::1]:10818) connection closed with error: Connection refused 2025-06-30T09:21:28.960557Z node 22 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:21:28.962811Z node 22 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [22:7521670088499394198:2080] 1751275288944203 != 1751275288944206 2025-06-30T09:21:28.963728Z node 22 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:29.044844Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:29.044873Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:29.046115Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:29.947203Z node 22 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:31.281524Z node 23 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[23:7521670105292252555:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:31.281544Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0043b0/r3tmp/tmpsfNnO2/pdisk_1.dat 2025-06-30T09:21:31.297125Z node 23 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:31.300120Z node 23 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#38,[::1]:12565) connection closed with error: Connection refused 2025-06-30T09:21:31.300281Z node 23 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-30T09:21:31.386786Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:31.386821Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:31.388002Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:32.283408Z node 23 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::DropNonExistingResourcePoolClassifier [GOOD] Test command err: Trying to start YDB, gRPC: 31028, MsgBus: 8359 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00479a/r3tmp/tmpqjAwNU/pdisk_1.dat 2025-06-30T09:21:22.406728Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670065085079291:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:22.409385Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:22.452308Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:22.457837Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670065085079080:2080] 1751275282312896 != 1751275282312899 TServer::EnableGrpc on GrpcPort 31028, node 1 2025-06-30T09:21:22.503283Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:22.503297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:22.503299Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:22.503340Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:22.518971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:22.519001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:22.523090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8359 TClient is connected to server localhost:8359 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:22.634432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:22.639684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:22.651837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:22.694268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:22.735857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:22.769977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:23.177089Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670069380047978:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:23.177126Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:23.242524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.260601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.293843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.310987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.322905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.325522Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:23.337738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.352423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.378847Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670069380048630:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:23.378873Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:23.378988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670069380048635:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:23.380110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:23.390955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:23.393083Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670069380048637:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:23.468786Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670069380048697:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:23.778877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:23.880533Z node 1 :FLAT_TX_SCHEMESHARD WARN: s ... localhost:62078 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:31.666757Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:31.671155Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:31.711571Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:31.738946Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:31.771131Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:31.788262Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:32.231122Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670106196241609:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:32.231150Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:32.239640Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.298900Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.309145Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.323105Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.336421Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.351552Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.368132Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.395969Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670106196242268:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:32.396003Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:32.396114Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670106196242273:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:32.397233Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:32.400506Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:32.400598Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670106196242275:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:32.499749Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670106196242326:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:32.504563Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:32.667621Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.764361Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:32.868166Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.963888Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:33.066907Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:33.178526Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) >> KqpScheme::ModifyPermissions [GOOD] >> KqpScheme::ModifyPermissionsByRelativePath >> DstCreator::ExistingDst [GOOD] >> DstCreator::EmptyReplicationConfig >> DstCreator::WithSyncIndexAndIntermediateDir >> DstCreator::NonExistentSrc >> KqpScheme::CreateAlterUserWithHash [GOOD] >> KqpScheme::CreateAlterUserLoginNoLogin >> KqpConstraints::IndexedTableAndNotNullColumnAddNotNullColumn [GOOD] >> DstCreator::ReplicationModeMismatch >> TRestoreWithRebootsTests::ShouldSucceedOnMultiShardTable[Zstd] [GOOD] >> TRestoreWithRebootsTests::ShouldSucceedOnMultipleFrames >> DstCreator::ColumnsSizeMismatch [GOOD] >> DstCreator::ColumnTypeMismatch >> KqpConstraints::SerialTypeSerial2 [GOOD] >> KqpConstraints::SerialTypeSerial >> DstCreator::WithSyncIndex >> KqpScheme::DisableMetadataObjectsOnServerless [GOOD] >> DstCreator::GlobalConsistency [GOOD] >> DstCreator::KeyColumnNameMismatch >> DstCreator::SameOwner [GOOD] >> DstCreator::SamePartitionCount >> KqpOlapScheme::DropColumnAfterInsert [GOOD] >> KqpOlapScheme::CreateWithoutColumnFamily ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::ResourcePoolClassifiersRankValidation [GOOD] Test command err: Trying to start YDB, gRPC: 1563, MsgBus: 28615 2025-06-30T09:21:21.385193Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670060423630779:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:21.386194Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047a3/r3tmp/tmpY10C2A/pdisk_1.dat 2025-06-30T09:21:21.473752Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1563, node 1 2025-06-30T09:21:21.484182Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:21.484219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:21.485194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:21.492518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:21.492532Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:21.492535Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:21.492581Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28615 TClient is connected to server localhost:28615 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:21.571891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:21.575401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:21.587884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:21.672306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.753321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.828044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:21.897445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670060423632241:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.897480Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:21.960816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.973776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.992348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.008509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.021888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.042664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.057642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.090834Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670064718600189:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.090887Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.091070Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670064718600194:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.092133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:22.095981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:22.096120Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670064718600196:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:22.169808Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670064718600247:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:22.391097Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:23.474397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715917:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:23.492116Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [1:7521670069013570866:3105], TxId: 281474976715908, task: 1. Ctx: { TraceId : 01jz0288yc5vg453da5ghp5a7a. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=O ... eshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:31.397492Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:31.400124Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:31.414019Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:31.443378Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:31.460457Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:31.718766Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670103887398852:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:31.718811Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:31.727816Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.746072Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.775998Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.807106Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.823650Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.856568Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.881296Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:31.918994Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670103887399503:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:31.919038Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:31.919154Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670103887399508:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:31.920301Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:31.923647Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:31.923757Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670103887399510:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:32.008512Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670108182366857:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:32.295454Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:32.298095Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.433491Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:32.502149Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.579475Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.662038Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:32.749523Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715689:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:33.193817Z node 6 :KQP_GATEWAY WARN: query_actor.cpp:372: [TQueryBase] [TRanksCheckerActor] TraceId: /Root, Finish with ALREADY_EXISTS, Issues: {
: Error: Classifier with rank 42 already exists, its name ClassifierRank42 }, SessionId: ydb://session/3?node_id=6&id=ZWI3NmFkZDktNzI1MjJiZTQtMjk4MzA4MDQtNmYyYWFiOGE=, TxId: 01jz028jg2dhr9b48wq14ehv1q 2025-06-30T09:21:33.679533Z node 6 :KQP_GATEWAY WARN: query_actor.cpp:372: [TQueryBase] [TRanksCheckerActor] TraceId: /Root, Finish with ALREADY_EXISTS, Issues: {
: Error: Classifier with rank 42 already exists, its name ClassifierRank42 }, SessionId: ydb://session/3?node_id=6&id=NDJkYTU1ZTktNGIxNWRkYTQtMmEyYzYzN2YtNDZkNWYwYzg=, TxId: 01jz028jyq7afw387xbk0bvk8q 2025-06-30T09:21:33.695480Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670112477335516:2798], DatabaseId: /Root, PoolId: test_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-30T09:21:33.695507Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool test_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } >> KqpScheme::CreateExternalTableValidation [GOOD] >> KqpScheme::CreateExternalTableWithSettings ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpConstraints::IndexedTableAndNotNullColumnAddNotNullColumn [GOOD] Test command err: Trying to start YDB, gRPC: 6764, MsgBus: 28122 2025-06-30T09:21:16.577681Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670040478422752:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:16.577725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047ea/r3tmp/tmp6yqaWs/pdisk_1.dat 2025-06-30T09:21:16.651262Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:16.655999Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 6764, node 1 2025-06-30T09:21:16.675090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:16.675116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:16.676119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:16.682948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:16.682959Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:16.682961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:16.683005Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28122 TClient is connected to server localhost:28122 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:16.761355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:21:16.763566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:16.764960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:16.807955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:16.841862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:16.911745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:17.016184Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670044773391427:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:17.016216Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:17.077398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.088267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.100820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.111379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.123644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.138618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.153839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:17.176015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670044773392080:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:17.176060Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:17.176230Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670044773392085:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:17.177036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:17.179044Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670044773392087:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:17.244531Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670044773392138:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:17.577214Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:17.604795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 17777, MsgBus: 13582 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047ea/r3tmp/tmp4S0IQd/pdisk_1.dat 2025-06-30T09:21:17.998132Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:75216700418688 ... rt proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.574322Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.592789Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.604220Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:32.624792Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670108989328582:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:32.624822Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:32.625174Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670108989328588:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:32.625970Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:32.628418Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670108989328590:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:32.712608Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670108989328641:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:32.929881Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:32.981603Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:33.357611Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710757:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:33.385557Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:21:33.431761Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710762:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:33.457528Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710765:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:21:33.522553Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710767:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:33.542730Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710770:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:21:33.603415Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710772:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:33.627924Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710775:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:21:33.686042Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710777:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:33.716490Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710780:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:21:33.804883Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710782:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:33.851786Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710785:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:21:33.967765Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710787:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:33.989115Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710790:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:21:34.070066Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710792:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:34.102686Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710795:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:21:34.169011Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710797:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:34.194251Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710800:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query [GOOD] Test command err: 2025-06-30T09:19:14.025341Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669516782167701:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:14.029734Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a53/r3tmp/tmpgAUUMy/pdisk_1.dat 2025-06-30T09:19:14.063401Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:19:14.106009Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:14.106377Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669516782167481:2080] 1751275154016758 != 1751275154016761 TServer::EnableGrpc on GrpcPort 11595, node 1 2025-06-30T09:19:14.150478Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004a53/r3tmp/yandexqtLkQO.tmp 2025-06-30T09:19:14.150494Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004a53/r3tmp/yandexqtLkQO.tmp 2025-06-30T09:19:14.150582Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004a53/r3tmp/yandexqtLkQO.tmp 2025-06-30T09:19:14.150627Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:14.165044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:14.165079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:14.168724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:14.176000Z INFO: TTestServer started on Port 64007 GrpcPort 11595 TClient is connected to server localhost:64007 PQClient connected to localhost:11595 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:14.275812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:19:14.283397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:14.303283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:19:14.309631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:19:14.400782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:14.422888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:19:14.685696Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669516782168256:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:14.685781Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:14.686966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669516782168268:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:14.688114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:14.691064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-30T09:19:14.691258Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669516782168270:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:19:14.748969Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669516782168334:2435] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:14.758623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:14.787142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:14.795949Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669516782168342:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:19:14.796589Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NmEzZjdkZmItNzdiMmFiNmMtNmIxMDI2MTQtYTg4ZTA2ODg=, ActorId: [1:7521669516782168253:2296], ActorState: ExecuteState, TraceId: 01jz024b7ff3f9gcrx37dvc61k, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:19:14.797258Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:19:14.843895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669516782168626:2609] 2025-06-30T09:19:15.026838Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:19.029861Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669516782167701:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:19.029935Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:19:20.123335Z :Sinks_Oltp_WriteToTopic_1_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:19:20.130048Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:19:20.137213Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521669542551972629:2704] connected; active server actors: 1 2025-06-30T09:19:20.137300Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:19:20.137474Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:19:20.137516Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_ ... losed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:21:33.770912Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer test-consumer session test-consumer_5_3_16260020861641811786_v1 grpc read done: success# 0, data# { } 2025-06-30T09:21:33.770932Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer test-consumer session test-consumer_5_3_16260020861641811786_v1 grpc read failed 2025-06-30T09:21:33.770941Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer test-consumer session test-consumer_5_3_16260020861641811786_v1 grpc closed 2025-06-30T09:21:33.770950Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer test-consumer session test-consumer_5_3_16260020861641811786_v1 is DEAD 2025-06-30T09:21:33.771236Z node 5 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [5:7521670084681858739:2555]: session cookie 4 consumer test-consumer session test-consumer_5_3_16260020861641811786_v1 grpc read done: success# 0, data# { } 2025-06-30T09:21:33.771250Z node 5 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [5:7521670084681858739:2555]: session cookie 4 consumer test-consumer session test-consumer_5_3_16260020861641811786_v1grpc read failed 2025-06-30T09:21:33.771255Z node 5 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [5:7521670084681858739:2555]: session cookie 4 consumer test-consumer session test-consumer_5_3_16260020861641811786_v1 grpc closed 2025-06-30T09:21:33.771258Z node 5 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [5:7521670084681858739:2555]: session cookie 4 consumer test-consumer session test-consumer_5_3_16260020861641811786_v1 proxy is DEAD 2025-06-30T09:21:33.771800Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037897][topic_B] pipe [5:7521670084681858729:2548] disconnected; active server actors: 1 2025-06-30T09:21:33.771816Z node 5 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037897][topic_B] pipe [5:7521670084681858729:2548] client test-consumer disconnected session test-consumer_5_3_16260020861641811786_v1 2025-06-30T09:21:33.771847Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037896] Destroy direct read session test-consumer_5_3_16260020861641811786_v1 2025-06-30T09:21:33.771855Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [5:7521670084681858732:2551] destroyed 2025-06-30T09:21:33.771866Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_5_3_16260020861641811786_v1 2025-06-30T09:21:33.774799Z :INFO: [/Root] [/Root] [c0530a85-31f79927-74efcd52-933cdbf2] Closing read session. Close timeout: 0.000000s 2025-06-30T09:21:33.774824Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:3:4 2025-06-30T09:21:33.774834Z :INFO: [/Root] [/Root] [c0530a85-31f79927-74efcd52-933cdbf2] Counters: { Errors: 0 CurrentSessionLifetimeMs: 8038 BytesRead: 40 MessagesRead: 4 BytesReadCompressed: 40 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:21:33.774856Z :NOTICE: [/Root] [/Root] [c0530a85-31f79927-74efcd52-933cdbf2] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:21:33.774867Z :DEBUG: [/Root] [/Root] [c0530a85-31f79927-74efcd52-933cdbf2] [] Abort session to cluster 2025-06-30T09:21:33.775014Z :DEBUG: [/Root] 0x00007169F9A9DB10 TDirectReadSessionManager ServerSessionId=test-consumer_5_1_15003441557129310709_v1 Close 2025-06-30T09:21:33.775060Z :DEBUG: [/Root] 0x00007169F9A9DB10 TDirectReadSessionManager ServerSessionId=test-consumer_5_1_15003441557129310709_v1 Close 2025-06-30T09:21:33.775075Z :NOTICE: [/Root] [/Root] [c0530a85-31f79927-74efcd52-933cdbf2] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:21:33.775622Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|24bfff40-dc3b2493-fcd058ff-d4f985c4_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:21:33.775631Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|24bfff40-dc3b2493-fcd058ff-d4f985c4_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:21:33.775637Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|24bfff40-dc3b2493-fcd058ff-d4f985c4_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:21:33.775379Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_5_1_15003441557129310709_v1 grpc read done: success# 0, data# { } 2025-06-30T09:21:33.775398Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_5_1_15003441557129310709_v1 grpc read failed 2025-06-30T09:21:33.775405Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_5_1_15003441557129310709_v1 grpc closed 2025-06-30T09:21:33.775415Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_5_1_15003441557129310709_v1 is DEAD 2025-06-30T09:21:33.775723Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|24bfff40-dc3b2493-fcd058ff-d4f985c4_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:21:33.775536Z node 5 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [5:7521670076091924054:2514]: session cookie 2 consumer test-consumer session test-consumer_5_1_15003441557129310709_v1 grpc read done: success# 0, data# { } 2025-06-30T09:21:33.775728Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|24bfff40-dc3b2493-fcd058ff-d4f985c4_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:21:33.775542Z node 5 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [5:7521670076091924054:2514]: session cookie 2 consumer test-consumer session test-consumer_5_1_15003441557129310709_v1grpc read failed 2025-06-30T09:21:33.775551Z node 5 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [5:7521670076091924054:2514]: session cookie 2 consumer test-consumer session test-consumer_5_1_15003441557129310709_v1 grpc closed 2025-06-30T09:21:33.775555Z node 5 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [5:7521670076091924054:2514]: session cookie 2 consumer test-consumer session test-consumer_5_1_15003441557129310709_v1 proxy is DEAD 2025-06-30T09:21:33.776071Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [5:7521670076091924033:2507] disconnected; active server actors: 1 2025-06-30T09:21:33.776082Z node 5 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [5:7521670076091924033:2507] client test-consumer disconnected session test-consumer_5_1_15003441557129310709_v1 2025-06-30T09:21:33.776110Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_5_1_15003441557129310709_v1 2025-06-30T09:21:33.776116Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [5:7521670076091924043:2512] destroyed 2025-06-30T09:21:33.776125Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message_group_id|24bfff40-dc3b2493-fcd058ff-d4f985c4_0 grpc read done: success: 0 data: 2025-06-30T09:21:33.776127Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message_group_id|24bfff40-dc3b2493-fcd058ff-d4f985c4_0 grpc read failed 2025-06-30T09:21:33.776133Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message_group_id|24bfff40-dc3b2493-fcd058ff-d4f985c4_0 grpc closed 2025-06-30T09:21:33.776136Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message_group_id|24bfff40-dc3b2493-fcd058ff-d4f985c4_0 is DEAD 2025-06-30T09:21:33.776299Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:21:33.776306Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:21:33.776334Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_5_1_15003441557129310709_v1 2025-06-30T09:21:33.776432Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [5:7521670076091924019:2504] destroyed 2025-06-30T09:21:33.776444Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [5:7521670076091924024:2504] destroyed 2025-06-30T09:21:33.776455Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:21:33.783072Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|67f588f8-1b6f11d3-b0fb5e54-77c3cf6b_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:21:33.783086Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|67f588f8-1b6f11d3-b0fb5e54-77c3cf6b_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:21:33.783096Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|67f588f8-1b6f11d3-b0fb5e54-77c3cf6b_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:21:33.783270Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|67f588f8-1b6f11d3-b0fb5e54-77c3cf6b_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:21:33.783276Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|67f588f8-1b6f11d3-b0fb5e54-77c3cf6b_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:21:33.785420Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|67f588f8-1b6f11d3-b0fb5e54-77c3cf6b_0 grpc read done: success: 0 data: 2025-06-30T09:21:33.785433Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|67f588f8-1b6f11d3-b0fb5e54-77c3cf6b_0 grpc read failed 2025-06-30T09:21:33.785444Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|67f588f8-1b6f11d3-b0fb5e54-77c3cf6b_0 grpc closed 2025-06-30T09:21:33.785448Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|67f588f8-1b6f11d3-b0fb5e54-77c3cf6b_0 is DEAD 2025-06-30T09:21:33.785796Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:21:33.785804Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:21:33.786090Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [5:7521670076091923967:2493] destroyed 2025-06-30T09:21:33.786099Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [5:7521670076091923970:2493] destroyed 2025-06-30T09:21:33.786111Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> DstCreator::EmptyReplicationConfig [GOOD] >> DstCreator::WithSyncIndexAndIntermediateDir [GOOD] >> DstCreator::NonExistentSrc [GOOD] >> DstCreator::KeyColumnsSizeMismatch >> KqpScheme::Int8Int16Olap [GOOD] >> LocalTableWriter::WaitTxIds [GOOD] >> KqpScheme::ChangefeedOnIndexTable [GOOD] >> TChargeBTreeIndex::FewNodes_Sticky [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History >> DstCreator::ReplicationModeMismatch [GOOD] >> DstCreator::ReplicationConsistencyLevelMismatch >> DstCreator::ColumnTypeMismatch [GOOD] >> DstCreator::KeyColumnNameMismatch [GOOD] >> DstCreator::WithSyncIndex [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::DisableMetadataObjectsOnServerless [GOOD] Test command err: 2025-06-30T09:21:19.234687Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670052338736287:2155];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047cf/r3tmp/tmpFkyaHE/pdisk_1.dat 2025-06-30T09:21:19.286486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:19.347164Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:19.359525Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:19.359556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:19.375041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28647, node 1 2025-06-30T09:21:19.426974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:19.426986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:19.426989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:19.427043Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3360 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:19.519571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:19.535095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:19.712452Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-30T09:21:19.712607Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670052338736921:2291], Start check tables existence, number paths: 2 2025-06-30T09:21:19.720021Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-30T09:21:19.720044Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-30T09:21:19.720059Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 3 2025-06-30T09:21:19.721983Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670052338736921:2291], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-30T09:21:19.721992Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670052338736921:2291], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-30T09:21:19.721996Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670052338736921:2291], Successfully finished 2025-06-30T09:21:19.723052Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-30T09:21:19.723510Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZjlhMzNkNDYtYWYxMjE2MjYtZDJiNDA2ZDktMjE1ZDcwZjI=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZjlhMzNkNDYtYWYxMjE2MjYtZDJiNDA2ZDktMjE1ZDcwZjI= 2025-06-30T09:21:19.723699Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZjlhMzNkNDYtYWYxMjE2MjYtZDJiNDA2ZDktMjE1ZDcwZjI=, ActorId: [1:7521670052338736939:2293], ActorState: unknown state, session actor bootstrapped 2025-06-30T09:21:19.751849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:21:19.762461Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670050645434994:2182];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:19.762985Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-dedicated/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:19.767799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:19.767830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:19.770408Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-30T09:21:19.771005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:19.820931Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:19.820995Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:19.821007Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:19.821022Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:19.821031Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:19.821056Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:19.821066Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:19.821077Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:19.821086Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:19.882875Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:19.882905Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:19.891162Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:19.936412Z node 3 :STATISTICS WARN: tx_init.cpp:287: [72075186224037894] TTxInit::Complete. EnableColumnStatistics=false 2025-06-30T09:21:19.936557Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:20.100951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:21:20.123613Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670057155438314:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:20.123714Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-shared/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:20.131388Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:20.131415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:20.134335Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:21:20.139361Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:20.180289Z node 3 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-30T09:21:20.180597Z node 3 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7521670054940402833:2305], Start check tables existence, number paths: 2 2025-06-30T09:21:20.180614Z node 3 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-30T09:21:20.180618Z node 3 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-30T09:21:20.181450Z node 3 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 3 2025-06-30T09:21:20.181466Z node 3 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7521670054940402833:2305], Describe table /Root/test-dedicated/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-30T09:21:20.181476Z node 3 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7521670054940402 ... ext: SELECT * FROM `//Root/test-shared/.metadata/secrets/values`; rpcActor: [9:7521670118252657906:3018] database: /Root/test-shared databaseId: /Root/test-shared pool id: default 2025-06-30T09:21:34.191846Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=9&id=MTA0MWE1ZGYtYTUwMGQ0MjAtZjg0OTUzOTMtMTFlNTIxN2I=, ActorId: [9:7521670118252657905:3017], ActorState: ReadyState, TraceId: 01jz028kff7vrw30tnj9b21678, request placed into pool from cache: default 2025-06-30T09:21:34.191866Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:618: SessionId: ydb://session/3?node_id=9&id=MTA0MWE1ZGYtYTUwMGQ0MjAtZjg0OTUzOTMtMTFlNTIxN2I=, ActorId: [9:7521670118252657905:3017], ActorState: ExecuteState, TraceId: 01jz028kff7vrw30tnj9b21678, Sending CompileQuery request 2025-06-30T09:21:34.194208Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][9:7521670101072786777:2984][/Root/test-shared/.metadata/secrets/values] Sync is incomplete in one of the ring groups: cookie# 50 2025-06-30T09:21:34.194233Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][9:7521670101072786777:2984][/Root/test-shared/.metadata/secrets/values] Sync is incomplete in one of the ring groups: cookie# 51 2025-06-30T09:21:34.194470Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7521670118252657908:3019], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/secrets/values]
: Error: LookupError, code: 2005 2025-06-30T09:21:34.194555Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=9&id=MTA0MWE1ZGYtYTUwMGQ0MjAtZjg0OTUzOTMtMTFlNTIxN2I=, ActorId: [9:7521670118252657905:3017], ActorState: ExecuteState, TraceId: 01jz028kff7vrw30tnj9b21678, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-06-30T09:21:34.194574Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=9&id=MTA0MWE1ZGYtYTUwMGQ0MjAtZjg0OTUzOTMtMTFlNTIxN2I=, ActorId: [9:7521670118252657905:3017], ActorState: ExecuteState, TraceId: 01jz028kff7vrw30tnj9b21678, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:21:34.194578Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=9&id=MTA0MWE1ZGYtYTUwMGQ0MjAtZjg0OTUzOTMtMTFlNTIxN2I=, ActorId: [9:7521670118252657905:3017], ActorState: ExecuteState, TraceId: 01jz028kff7vrw30tnj9b21678, EndCleanup, isFinal: 0 2025-06-30T09:21:34.194616Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2368: SessionId: ydb://session/3?node_id=9&id=MTA0MWE1ZGYtYTUwMGQ0MjAtZjg0OTUzOTMtMTFlNTIxN2I=, ActorId: [9:7521670118252657905:3017], ActorState: ExecuteState, TraceId: 01jz028kff7vrw30tnj9b21678, Sent query response back to proxy, proxyRequestId: 82, proxyId: [9:7521670092482850519:2147] 2025-06-30T09:21:34.194854Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/secrets/values]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-06-30T09:21:34.194940Z node 9 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/secrets/values]
: Error: LookupError, code: 2005 2025-06-30T09:21:34.194956Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2413: SessionId: ydb://session/3?node_id=9&id=MTA0MWE1ZGYtYTUwMGQ0MjAtZjg0OTUzOTMtMTFlNTIxN2I=, ActorId: [9:7521670118252657905:3017], ActorState: ReadyState, Session closed due to explicit close event 2025-06-30T09:21:34.194962Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=9&id=MTA0MWE1ZGYtYTUwMGQ0MjAtZjg0OTUzOTMtMTFlNTIxN2I=, ActorId: [9:7521670118252657905:3017], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:21:34.194964Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=9&id=MTA0MWE1ZGYtYTUwMGQ0MjAtZjg0OTUzOTMtMTFlNTIxN2I=, ActorId: [9:7521670118252657905:3017], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-30T09:21:34.194967Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2644: SessionId: ydb://session/3?node_id=9&id=MTA0MWE1ZGYtYTUwMGQ0MjAtZjg0OTUzOTMtMTFlNTIxN2I=, ActorId: [9:7521670118252657905:3017], ActorState: unknown state, Cleanup temp tables: 0 2025-06-30T09:21:34.194984Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2735: SessionId: ydb://session/3?node_id=9&id=MTA0MWE1ZGYtYTUwMGQ0MjAtZjg0OTUzOTMtMTFlNTIxN2I=, ActorId: [9:7521670118252657905:3017], ActorState: unknown state, Session actor destroyed 2025-06-30T09:21:34.456104Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg== 2025-06-30T09:21:34.456286Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: unknown state, session actor bootstrapped 2025-06-30T09:21:34.456392Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: ReadyState, TraceId: 01jz028kqratk2eb9qeqzsr2tv, received request, proxyRequestId: 84 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: SELECT * FROM `//Root/test-shared/.metadata/secrets/values`; rpcActor: [9:7521670118252657919:3025] database: /Root/test-shared databaseId: /Root/test-shared pool id: default 2025-06-30T09:21:34.456407Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: ReadyState, TraceId: 01jz028kqratk2eb9qeqzsr2tv, request placed into pool from cache: default 2025-06-30T09:21:34.456427Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:618: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: ExecuteState, TraceId: 01jz028kqratk2eb9qeqzsr2tv, Sending CompileQuery request 2025-06-30T09:21:34.458876Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][9:7521670101072786777:2984][/Root/test-shared/.metadata/secrets/values] Sync is incomplete in one of the ring groups: cookie# 52 2025-06-30T09:21:34.458899Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][9:7521670101072786777:2984][/Root/test-shared/.metadata/secrets/values] Sync is incomplete in one of the ring groups: cookie# 53 2025-06-30T09:21:34.461343Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7521670118252657921:3026], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/secrets/values]
: Error: LookupError, code: 2005 2025-06-30T09:21:34.461503Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: ExecuteState, TraceId: 01jz028kqratk2eb9qeqzsr2tv, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-06-30T09:21:34.461520Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: ExecuteState, TraceId: 01jz028kqratk2eb9qeqzsr2tv, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:21:34.461525Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: ExecuteState, TraceId: 01jz028kqratk2eb9qeqzsr2tv, EndCleanup, isFinal: 0 2025-06-30T09:21:34.461576Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2368: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: ExecuteState, TraceId: 01jz028kqratk2eb9qeqzsr2tv, Sent query response back to proxy, proxyRequestId: 84, proxyId: [9:7521670092482850519:2147] 2025-06-30T09:21:34.461860Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/secrets/values]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-06-30T09:21:34.461952Z node 9 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/secrets/values]
: Error: LookupError, code: 2005 2025-06-30T09:21:34.461967Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2413: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: ReadyState, Session closed due to explicit close event 2025-06-30T09:21:34.461973Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:21:34.461976Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-30T09:21:34.461979Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2644: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: unknown state, Cleanup temp tables: 0 2025-06-30T09:21:34.461998Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2735: SessionId: ydb://session/3?node_id=9&id=ZmEzZjZhNC1mNjhkNDVlNy01ZjIyZWM2MC02YjJiMDdlYg==, ActorId: [9:7521670118252657918:3024], ActorState: unknown state, Session actor destroyed |81.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |81.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |81.4%| [LD] {RESULT} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut >> TConsoleTests::TestRemoveSharedTenantAfterRemoveServerlessTenant [GOOD] >> TConsoleTests::TestRemoveServerlessTenant >> KqpAcl::AclRevoke+UseSink+IsOlap [GOOD] >> DstCreator::SamePartitionCount [GOOD] >> KqpOlapScheme::CreateWithoutColumnFamily [GOOD] >> KqpOlapScheme::DropColumnAndResetTtl >> DstCreator::WithIntermediateDir >> DstCreator::Basic ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithSyncIndexAndIntermediateDir [GOOD] Test command err: 2025-06-30T09:21:34.411221Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670116869819450:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:34.448386Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00406b/r3tmp/tmpySYvrr/pdisk_1.dat 2025-06-30T09:21:34.483240Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:65504 TServer::EnableGrpc on GrpcPort 26153, node 1 2025-06-30T09:21:34.523455Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:34.523471Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:34.523474Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:34.523533Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65504 2025-06-30T09:21:34.563433Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:34.563475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:34.571261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:34.605433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:34.608929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275294713 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294657 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275294713 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-06-30T09:21:34.689484Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.689528Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.689530Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:34.689721Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:34.925505Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275294713, tx_id: 281474976715658 } } } 2025-06-30T09:21:34.925658Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:34.926364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.927115Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-30T09:21:34.927119Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-30T09:21:34.942458Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 TClient::Ls request: /Root/Dir/Replicated 2025-06-30T09:21:34.942888Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dir/Replicated" PathDescription { Self { Name: "Replicated" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294986 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InM ... pshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 8 PathOwnerId: 72057594046644480 } 2025-06-30T09:21:34.954269Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 2] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 8] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "index_by_value" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294986 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294986 ParentPathId: 7 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { ... (TRUNCATED) TClient::Ls request: /Root/Dir/Replicated/index_by_value/indexImplTable TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294986 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294986 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } Path: "/Root/Dir/Replicated/index_by_value/indexImplTable" ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::EmptyReplicationConfig [GOOD] Test command err: 2025-06-30T09:21:33.508726Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670113651389577:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:33.508872Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0040b7/r3tmp/tmpWuvtw9/pdisk_1.dat 2025-06-30T09:21:33.605156Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:33.608513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:33.608545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:33.612839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:33.615030Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TClient is connected to server localhost:24835 TServer::EnableGrpc on GrpcPort 31011, node 1 2025-06-30T09:21:33.658990Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:33.659007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:33.659009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:33.659069Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24835 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:33.747813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:33.763287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:33.767557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:33.835506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275293796 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275293908 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275293796 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275293908 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-30T09:21:33.883230Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:33.883262Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:33.883264Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:33.883446Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:34.180791Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275293873, tx_id: 281474976715658 } } } 2025-06-30T09:21:34.180880Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:34.181384Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:481} 2025-06-30T09:21:34.181860Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275293908 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8 ... athId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:34.452497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:34.454703Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:34.476697Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:34.476730Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:34.478106Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:21:34.510316Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294503 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1751275294573 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294503 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1751275294573 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-30T09:21:34.536283Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.536312Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.536315Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:34.536522Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:34.834157Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275294552, tx_id: 281474976710658 } } } 2025-06-30T09:21:34.834241Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:34.834677Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:481} 2025-06-30T09:21:34.835008Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1751275294573 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-30T09:21:34.835048Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Empty replication config >> TConsoleTests::TestAuthorizationExtSubdomain [GOOD] >> TConsoleTests::TestAttributesExtSubdomain >> KqpScheme::ModifyPermissionsByRelativePath [GOOD] >> KqpScheme::ModifyPermissionsByIncorrectPaths ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::ColumnTypeMismatch [GOOD] Test command err: 2025-06-30T09:21:33.691473Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670113069175689:2238];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00408d/r3tmp/tmpjun7E4/pdisk_1.dat 2025-06-30T09:21:33.811108Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:33.841108Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:22072 TServer::EnableGrpc on GrpcPort 26094, node 1 2025-06-30T09:21:33.879374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:33.879402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:33.879882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:33.903023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:33.903037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:33.903039Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:33.903088Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22072 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:33.984206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:33.988813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:33.989998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:34.068112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294034 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294125 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294034 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294125 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-30T09:21:34.089993Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.090023Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.090026Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:34.090227Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:34.332677Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275294104, tx_id: 281474976715658 } } } 2025-06-30T09:21:34.332789Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:34.333296Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:481} 2025-06-30T09:21:34.334007Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294125 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "extra" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } Backup ... 615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:34.635482Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:34.635520Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:34.636308Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:34.637797Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:21:34.640059Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.653554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294685 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294713 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294685 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294713 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-30T09:21:34.681093Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.681126Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.681127Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:34.681288Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:35.027309Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275294699, tx_id: 281474976715658 } } } 2025-06-30T09:21:35.027425Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:35.027953Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:481} 2025-06-30T09:21:35.028249Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294713 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-30T09:21:35.028289Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Column type mismatch: name: value, expected: Utf8, got: Uint32 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::Int8Int16Olap [GOOD] Test command err: Trying to start YDB, gRPC: 31632, MsgBus: 19991 2025-06-30T09:21:26.830946Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670081208007229:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:26.831142Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004780/r3tmp/tmpV1nhaU/pdisk_1.dat TServer::EnableGrpc on GrpcPort 31632, node 1 2025-06-30T09:21:26.947374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:26.947400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:26.959763Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:26.966104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:26.978964Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:26.978980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:26.978982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:26.979030Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19991 TClient is connected to server localhost:19991 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:27.085601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:27.088164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:27.094416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:27.133196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:27.166983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:27.204058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:27.419525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670085502976068:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.419559Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.463822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.473081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.484250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.499963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.555878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.567966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.583284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.602926Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670085502976723:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.602947Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.603056Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670085502976728:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.604016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:27.609271Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670085502976730:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:27.668061Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670085502976781:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:27.832802Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:27.839240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.840480Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037911 not found Trying to start YDB, gRPC: 10643, MsgBus: 20477 2025-06-30T09:21:28.166928Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670090404508840:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T0 ... atabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:34.516777Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:34.519811Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670116249384110:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:34.571610Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670116249384161:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:34.753545Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:34.769947Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:34.770062Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:34.770141Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:34.770167Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:34.770192Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:34.770217Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:34.770244Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:34.770275Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:34.770298Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:34.770321Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:34.770345Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:34.770371Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:34.775401Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:34.775429Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:34.775449Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:34.775456Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:34.775485Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:34.775492Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:34.775505Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:34.775513Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:34.775523Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:34.775530Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:34.775569Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:34.775578Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:34.775608Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:34.775624Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:34.775642Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:34.775649Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:34.775660Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:21:34.775669Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:21:34.775676Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:21:34.775787Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:21:34.775796Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:21:34.775814Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:21:34.775819Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:21:34.781721Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[6:7521670116249384474:2475];ev=NActors::IEventHandle;tablet_id=72075186224037922;tx_id=281474976715672;this=18221295986240;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275294780;max=18446744073709551615;plan=0;src=[6:7521670111954414881:2149];cookie=352:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:21:34.788875Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:21:34.791952Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=368;columns=4; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=368;columns=4; 2025-06-30T09:21:34.846897Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:34.881867Z node 6 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275294839, txId: 18446744073709551615] shutting down >> DstCreator::KeyColumnsSizeMismatch [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::WaitTxIds [GOOD] Test command err: 2025-06-30T09:21:33.489598Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670113237956227:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:33.507112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00475c/r3tmp/tmp08qhY5/pdisk_1.dat 2025-06-30T09:21:33.630550Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:27220 TServer::EnableGrpc on GrpcPort 32248, node 1 2025-06-30T09:21:33.682559Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:33.682579Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:33.682582Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:33.682646Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27220 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:33.743888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:33.751122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:33.752368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:33.827156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:33.827194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:33.835275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275293964 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-30T09:21:33.932031Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handshake: worker# [1:7521670113237956741:2349] 2025-06-30T09:21:33.932141Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:21:33.932249Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-30T09:21:33.932286Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Send handshake: worker# [1:7521670113237956741:2349] 2025-06-30T09:21:33.932424Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-30T09:21:33.933420Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-30T09:21:33.933469Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 }] } 2025-06-30T09:21:33.933527Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670113237956744:2348] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-30T09:21:33.933539Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:33.933554Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670113237956744:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-30T09:21:33.934785Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670113237956744:2348] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:33.934808Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:33.934817Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } 2025-06-30T09:21:34.489834Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:34.932780Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 20 TxId: 0 } TxId: 2 } 2025-06-30T09:21:34.932847Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 2 BodySize: 49 }] } 2025-06-30T09:21:34.932884Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670113237956744:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 2 Group: 0 Step: 11 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b }] } 2025-06-30T09:21:34.936413Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670113237956744:2348] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:34.936450Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:34.936463Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670113237956740:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [2] } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithSyncIndex [GOOD] Test command err: 2025-06-30T09:21:34.645579Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670116538680759:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:34.645832Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004054/r3tmp/tmpltFc9p/pdisk_1.dat 2025-06-30T09:21:34.732560Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:34.734676Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:21:34.748339Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:34.748367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:34.749424Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10111 TServer::EnableGrpc on GrpcPort 7788, node 1 2025-06-30T09:21:34.785343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:34.785355Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:34.785357Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:34.785406Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10111 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:34.867436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:34.871155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1751275294958 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294916 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1751275294958 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-06-30T09:21:34.936529Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.936570Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.936572Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:34.936742Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:35.146991Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275294958, tx_id: 281474976710658 } } } 2025-06-30T09:21:35.147124Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:35.147665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:35.148378Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-30T09:21:35.148382Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-30T09:21:35.167268Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-30T09:21:35.167777Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Replicated" PathDescription { Self { Name: "Replicated" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1751275295210 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Thre ... 0 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 7 PathOwnerId: 72057594046644480 } 2025-06-30T09:21:35.177464Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 2] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 7] TClient::Ls request: /Root/Replicated/index_by_value TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "index_by_value" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1751275295210 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1751275295210 ParentPathId: 6 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { ... (TRUNCATED) TClient::Ls request: /Root/Replicated/index_by_value/indexImplTable TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1751275295210 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1751275295210 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } Path: "/Root/Replicated/index_by_value/indexImplTable" >> KqpConstraints::SerialTypeSerial [GOOD] >> KqpConstraints::SerialTypeSerial4 >> KqpScheme::CreateAlterUserLoginNoLogin [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::ChangefeedOnIndexTable [GOOD] Test command err: Trying to start YDB, gRPC: 15110, MsgBus: 15130 2025-06-30T09:21:18.783962Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:18.784048Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:18.784060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047e8/r3tmp/tmpaJfRjD/pdisk_1.dat 2025-06-30T09:21:18.882548Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 15110, node 1 TClient is connected to server localhost:15130 TClient is connected to server localhost:15130 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:18.974186Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:18.974208Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:18.974212Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:18.974284Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:18.974372Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275277652172 != 1751275277652176 2025-06-30T09:21:19.058841Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:19.058876Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:19.059656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:19.062370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:19.158791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:19.387905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:19.531382Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:19.718926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:19.957363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:20.342410Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1686:3281], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.342464Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.354298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.539625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.775988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.023873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.273166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.495599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:21.798115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.075752Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2357:3776], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.075799Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.075895Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2362:3781], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:22.077371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:22.242562Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2364:3783], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:22.290871Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2422:3822] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:22.488071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:22.671872Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__create.cpp:23: TIndexBuilder::TXTYPE_CREATE_INDEX_BUILD: DoExecute TxId: 281474976715673 DatabaseName: "/Root" S ... p; Trying to start YDB, gRPC: 31367, MsgBus: 25379 2025-06-30T09:21:34.134684Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670117348423867:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:34.135677Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047e8/r3tmp/tmpNgUTYl/pdisk_1.dat 2025-06-30T09:21:34.152160Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31367, node 6 2025-06-30T09:21:34.160177Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:34.160190Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:34.160192Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:34.160244Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25379 TClient is connected to server localhost:25379 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:34.239647Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:34.239681Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:34.240156Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:34.243173Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:34.243801Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.308078Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:34.336287Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:34.354042Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.506640Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670117348425416:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:34.506664Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:34.519139Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.529679Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.539720Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.554243Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.568201Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.582676Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.598047Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.685304Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670117348426071:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:34.685339Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:34.685514Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670117348426076:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:34.686650Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:34.689807Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:34.689903Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670117348426078:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:34.769069Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670117348426129:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:34.965317Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.992061Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670117348426603:3698] txid# 281474976715673, issues: { message: "Cannot add changefeed to index table" severity: 1 } 2025-06-30T09:21:35.015863Z node 6 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037924:1][6:7521670121643394023:2497] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:23:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::KeyColumnNameMismatch [GOOD] Test command err: 2025-06-30T09:21:33.953240Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670110586334843:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:33.953275Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004082/r3tmp/tmpC7AR7q/pdisk_1.dat 2025-06-30T09:21:34.083636Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:8913 TServer::EnableGrpc on GrpcPort 25701, node 1 2025-06-30T09:21:34.135445Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:34.135515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:34.135959Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:34.147018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:34.147034Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:34.147037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:34.147087Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8913 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:34.215607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:34.219272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275294321 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294265 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275294321 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-30T09:21:34.289404Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.289488Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.289496Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:34.289669Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:34.460732Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275294321, tx_id: 281474976715658 } } } 2025-06-30T09:21:34.460843Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:34.461259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.461484Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-30T09:21:34.461487Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 TClient::Ls request: /Root/Replicated 2025-06-30T09:21:34.469032Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-06-30T09:21:34.469050Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294517 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-30T09:21:34.676017Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004082/r3tmp/tmpUSgxxB/pdisk_1.dat 2025-06-30T09:21:34.701765Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:23954 TServer::EnableGrpc on GrpcPort 3714, node 2 2025-06-30T09:21:34.728255Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:34.728275Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:34.728278Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:34.728333Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23954 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: ... . 2025-06-30T09:21:34.775817Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:34.775858Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:34.776282Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:34.778097Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:34.778428Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:34.779563Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:34.793025Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294825 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294853 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294825 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294853 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-30T09:21:34.813488Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.813520Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.813522Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:34.815091Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:35.127046Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275294839, tx_id: 281474976715658 } } } 2025-06-30T09:21:35.127161Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:35.127690Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:481} 2025-06-30T09:21:35.128028Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294853 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-30T09:21:35.128076Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Key column name mismatch: position: 0, expected: key, got: value ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::SamePartitionCount [GOOD] Test command err: 2025-06-30T09:21:34.043031Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670116886793751:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:34.043235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004079/r3tmp/tmp3UaKxf/pdisk_1.dat 2025-06-30T09:21:34.118795Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:14366 TServer::EnableGrpc on GrpcPort 23882, node 1 2025-06-30T09:21:34.147130Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:34.147162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:34.151216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:34.152734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:34.152738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:34.152740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:34.152790Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14366 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:34.193707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:34.197267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:34.198404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1751275294300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294244 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1751275294300 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-30T09:21:34.267191Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.267220Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.267221Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:34.267343Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:34.502136Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275294300, tx_id: 281474976710659 } } } 2025-06-30T09:21:34.502277Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:34.502853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.503165Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710660} 2025-06-30T09:21:34.503179Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710660 2025-06-30T09:21:34.513913Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710660 2025-06-30T09:21:34.513932Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1751275294559 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004079/r3tmp/tmpNrqWvi/pdisk_1.dat 2025-06-30T09:21:34.754987Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:34.756060Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:31636 TServer::EnableGrpc on GrpcPort 8525, node 2 2025-06-30T09:21:34.792447Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:34.792461Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:34.792464Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:34.792515Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31636 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:34.842862Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:34.842893Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:34.843576Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:34.843807Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:34.845804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:34.846764Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275294909 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294888 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275294909 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-30T09:21:34.871228Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.871258Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.871260Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:34.871471Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:35.177318Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275294909, tx_id: 281474976715658 } } } 2025-06-30T09:21:35.177421Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:35.177945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:35.178382Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-30T09:21:35.178384Z node 2 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 TClient::Ls request: /Root/Table 2025-06-30T09:21:35.194683Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-06-30T09:21:35.194705Z node 2 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275294909 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275295238 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpAcl::AclRevoke+UseSink+IsOlap [GOOD] Test command err: Trying to start YDB, gRPC: 20515, MsgBus: 18785 2025-06-30T09:21:19.829043Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670053872435266:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:19.829066Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047c6/r3tmp/tmp8FbU9F/pdisk_1.dat 2025-06-30T09:21:19.933731Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:19.933775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:19.938230Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:19.938797Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670053872435245:2080] 1751275279828901 != 1751275279828904 2025-06-30T09:21:19.945594Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20515, node 1 2025-06-30T09:21:19.971455Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:19.971467Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:19.971470Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:19.971513Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18785 TClient is connected to server localhost:18785 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:20.064208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:20.069023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:21:20.072839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:20.103295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:20.146761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:20.165027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.246727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670058167404139:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.246919Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.307118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.318027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.334013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.344332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.359031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.371661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.388189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.408049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670058167404792:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.408081Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.408234Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670058167404797:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.409299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:20.413183Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670058167404799:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:21:20.488444Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670058167404850:3398] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:20.706117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:20.727134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[1:7521670058167405189:2480];tablet_id=72075186224037923;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:20.727209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[1:7521670058167405189:2480];tablet_id=7207518622403 ... T09:21:34.747028Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7521670118289350802:2867], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Cannot find table 'db.[/Root/test_acl]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:21:34.747686Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=7&id=NmY1YTMwYy1hMDg5ZTQ2ZS0yOGM5OGI4ZC03ZGRhODViNg==, ActorId: [7:7521670118289350279:2789], ActorState: ExecuteState, TraceId: 01jz028m0n26dvz44adkps9mya, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:21:34.759997Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:34.797875Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715708. Ctx: { TraceId: 01jz028m1z2z1gxacp0532e6f5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.802128Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715709. Ctx: { TraceId: 01jz028m1z2z1gxacp0532e6f5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.807772Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715709;tx_id=281474976715709;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715709; 2025-06-30T09:21:34.813280Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715710. Ctx: { TraceId: 01jz028m2w8n805qkstzspz08v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.818486Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715711. Ctx: { TraceId: 01jz028m2w8n805qkstzspz08v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.824885Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715711;tx_id=281474976715711;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715711; 2025-06-30T09:21:34.834427Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715712. Ctx: { TraceId: 01jz028m3fd1m73x9521atafxs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.840347Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715713. Ctx: { TraceId: 01jz028m3fd1m73x9521atafxs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.848203Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715713;tx_id=281474976715713;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715713; 2025-06-30T09:21:34.856580Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715714. Ctx: { TraceId: 01jz028m47c8r33kb6162av5b4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.859566Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715715. Ctx: { TraceId: 01jz028m47c8r33kb6162av5b4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.863560Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715715;tx_id=281474976715715;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715715; 2025-06-30T09:21:34.876454Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715716. Ctx: { TraceId: 01jz028m4kd0pbgts0q74nasd2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.879960Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715717. Ctx: { TraceId: 01jz028m4kd0pbgts0q74nasd2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.883106Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715717;tx_id=281474976715717;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715717; 2025-06-30T09:21:34.899077Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715718. Ctx: { TraceId: 01jz028m5b5h2h9ww7a7yc2rsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.903139Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715719. Ctx: { TraceId: 01jz028m5b5h2h9ww7a7yc2rsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.906755Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715719;tx_id=281474976715719;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715719; 2025-06-30T09:21:34.912937Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715720. Ctx: { TraceId: 01jz028m5z7542j4vvzxf9baq3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.917195Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715721. Ctx: { TraceId: 01jz028m5z7542j4vvzxf9baq3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.919281Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715721;tx_id=281474976715721;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715721; 2025-06-30T09:21:34.924682Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715722. Ctx: { TraceId: 01jz028m6b2skabvtrdn891zrx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.928930Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715723. Ctx: { TraceId: 01jz028m6b2skabvtrdn891zrx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.934759Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715723;tx_id=281474976715723;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715723; 2025-06-30T09:21:34.943819Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715724. Ctx: { TraceId: 01jz028m6vb73n1vxtkd3njs9n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.947557Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715725. Ctx: { TraceId: 01jz028m6vb73n1vxtkd3njs9n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.950279Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715725;tx_id=281474976715725;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715725; 2025-06-30T09:21:34.957052Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715726. Ctx: { TraceId: 01jz028m7bcjfza2x8gzsk4n0t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.960810Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715727. Ctx: { TraceId: 01jz028m7bcjfza2x8gzsk4n0t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:34.969833Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715727;tx_id=281474976715727;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715727; 2025-06-30T09:21:34.981467Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715728:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:34.984515Z node 7 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [7:7521670118289351127:5521], for# user0@builtin, access# UpdateRow 2025-06-30T09:21:34.984628Z node 7 :KQP_EXECUTER ERROR: kqp_table_resolver.cpp:275: TxId: 281474976715729. Error resolving keys for entry: { TableId: [OwnerId: 72057594046644480, LocalPathId: 17] Access: 2 SyncVersion: false Status: AccessDenied Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Uint64 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-30T09:21:34.984712Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=7&id=NTk3ZTQ5MGMtYmE1Y2U3N2UtOWQ0YmEwOTgtYTY0Zjg=, ActorId: [7:7521670118289350845:2880], ActorState: ExecuteState, TraceId: 01jz028m8721zapqeq8g40q8js, Create QueryResponse for error on request, msg: >> DstCreator::ReplicationConsistencyLevelMismatch [GOOD] >> DstCreator::WithIntermediateDir [GOOD] >> DstCreator::WithAsyncIndex >> KqpScheme::CreateExternalTableWithSettings [GOOD] >> KqpScheme::CreateExternalTableWithUpperCaseSettings >> CheckIntegrityBlock42::PlacementWrongDisks >> CheckIntegrityBlock42::PlacementOkWithErrors >> DstCreator::Basic [GOOD] >> DstCreator::CannotFindColumn ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::KeyColumnsSizeMismatch [GOOD] Test command err: 2025-06-30T09:21:34.491262Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670117008919655:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:34.491761Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00405e/r3tmp/tmp4hA83N/pdisk_1.dat 2025-06-30T09:21:34.548766Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:22715 TServer::EnableGrpc on GrpcPort 23213, node 1 2025-06-30T09:21:34.578801Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:34.578813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:34.578816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:34.578869Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:34.592702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:34.592758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:34.594339Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22715 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:34.628963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294678 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294678 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version... (TRUNCATED) 2025-06-30T09:21:34.635553Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.635678Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.635686Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:34.635850Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:34.928986Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { status: SCHEME_ERROR, issues: } } 2025-06-30T09:21:34.929005Z node 1 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Cannot describe table: status: SCHEME_ERROR, issue: 2025-06-30T09:21:35.186381Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670121493779826:2093];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:35.187274Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00405e/r3tmp/tmp71v6io/pdisk_1.dat 2025-06-30T09:21:35.215668Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:26299 TServer::EnableGrpc on GrpcPort 7146, node 2 2025-06-30T09:21:35.225943Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:35.225962Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:35.225964Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:35.226010Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26299 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:35.291798Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:35.291834Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:35.292450Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:35.295291Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:21:35.310910Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:35.323565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:35.354922Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275295336 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275295406 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275295336 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275295406 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-30T09:21:35.370564Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:35.370591Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:35.370593Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:35.370819Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:35.631900Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275295392, tx_id: 281474976715658 } } } 2025-06-30T09:21:35.632004Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:35.632532Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:481} 2025-06-30T09:21:35.633130Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275295406 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnNames: "value" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-30T09:21:35.633178Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Key columns size mismatch: expected: 1, got: 2 >> KqpOlapScheme::DropColumnAndResetTtl [GOOD] >> CheckIntegrityBlock42::PlacementWrongDisks [GOOD] >> CheckIntegrityMirror3dc::DataErrorOneCopy >> CheckIntegrityMirror3of4::PlacementOk >> CheckIntegrityMirror3dc::PlacementOk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateAlterUserLoginNoLogin [GOOD] Test command err: Trying to start YDB, gRPC: 4013, MsgBus: 65300 2025-06-30T09:21:28.199923Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670089920721560:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:28.201869Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00477d/r3tmp/tmpDTfoUH/pdisk_1.dat 2025-06-30T09:21:28.266805Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4013, node 1 2025-06-30T09:21:28.288525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:28.288541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:28.288543Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:28.288593Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65300 TClient is connected to server localhost:65300 2025-06-30T09:21:28.340549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:28.340579Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:28.341674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:28.359633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:28.370437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.395688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.458636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:28.473109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.621925Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670089920723129:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.621946Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.672048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.680464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.695675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.709880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.727038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.736472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.756096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.775722Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670089920723780:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.775763Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.775839Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670089920723785:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.776696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:28.779143Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670089920723787:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:28.879544Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670089920723838:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:29.063755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient::Ls request: /Root/TableWithMinMaxPartitions TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableWithMinMaxPartitions" PathId: 17 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715672 CreateStep: 1751275289113 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableWithMinMaxPartitions" Colu ... 5-06-30T09:21:34.155633Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521670115121921068:3645] txid# 281474976710687, issues: { message: "Field \'type\' must be equal \"argon2id\"" severity: 1 } 2025-06-30T09:21:34.166472Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521670115121921083:3654] txid# 281474976710689, issues: { message: "Cannot parse hash value; it should be in JSON-format" severity: 1 } 2025-06-30T09:21:34.179533Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521670115121921100:3665] txid# 281474976710691, issues: { message: "There should be strictly three fields here: salt, hash and type" severity: 1 } 2025-06-30T09:21:34.191557Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521670115121921116:3675] txid# 281474976710693, issues: { message: "Field \'hash\' must be in base64 format" severity: 1 } 2025-06-30T09:21:34.202360Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521670115121921132:3685] txid# 281474976710695, issues: { message: "Field \'salt\' must be in base64 format" severity: 1 } Trying to start YDB, gRPC: 28182, MsgBus: 1889 2025-06-30T09:21:34.549740Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670118143348020:2094];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00477d/r3tmp/tmpgxE2G5/pdisk_1.dat 2025-06-30T09:21:34.550091Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:34.589034Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28182, node 6 2025-06-30T09:21:34.600407Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:34.600421Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:34.600423Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:34.600489Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1889 2025-06-30T09:21:34.650001Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:34.650039Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:34.651766Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1889 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:34.686892Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:34.694316Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:34.713498Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:34.788680Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:34.824031Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:35.103606Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670122438316854:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:35.103642Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:35.112621Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:35.140435Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:35.201324Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:35.270511Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:35.285335Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:35.305951Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:35.367953Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:35.396601Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670122438317514:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:35.396674Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:35.396952Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670122438317519:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:35.398286Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:35.402713Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670122438317521:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:35.460044Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670122438317572:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:35.571052Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::ReplicationConsistencyLevelMismatch [GOOD] Test command err: 2025-06-30T09:21:34.583039Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670114553929811:2088];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:34.584780Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00405a/r3tmp/tmpKmlT4v/pdisk_1.dat 2025-06-30T09:21:34.664950Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:34.683017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:34.683049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:34.687191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16097 TServer::EnableGrpc on GrpcPort 27421, node 1 2025-06-30T09:21:34.715090Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:34.715104Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:34.715106Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:34.715166Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16097 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:34.771381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:34.775098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:34.776166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:34.806276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294818 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294860 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275294818 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294860 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-30T09:21:34.821072Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.821098Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:34.821100Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:34.821269Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:35.107762Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275294839, tx_id: 281474976715658 } } } 2025-06-30T09:21:35.107909Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:35.108640Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:481} 2025-06-30T09:21:35.109376Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275294860 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 ... lse } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:35.587790Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:35.590097Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:21:35.595761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:35.664859Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275295637 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275295721 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275295637 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275295721 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-30T09:21:35.691128Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:35.691164Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:35.691166Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:35.691530Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:36.059471Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275295707, tx_id: 281474976715658 } } } 2025-06-30T09:21:36.059575Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:36.060044Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:481} 2025-06-30T09:21:36.060362Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275295721 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_GLOBAL } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-30T09:21:36.060400Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Replication consistency level mismatch: expected: CONSISTENCY_LEVEL_ROW, got: 1 >> DstCreator::WithAsyncIndex [GOOD] >> CheckIntegrityBlock42::PlacementOkWithErrors [GOOD] >> CheckIntegrityBlock42::PlacementWithErrorsOnBlobDisks >> CheckIntegrityMirror3dc::DataErrorOneCopy [GOOD] >> CheckIntegrityMirror3dc::DataErrorManyCopies >> CheckIntegrityBlock42::PlacementOk >> CheckIntegrityMirror3of4::PlacementOk [GOOD] >> CheckIntegrityMirror3of4::PlacementMissingParts >> CheckIntegrityMirror3dc::PlacementOkWithErrors >> CheckIntegrityBlock42::PlacementWithErrorsOnBlobDisks [GOOD] >> CheckIntegrityBlock42::PlacementStatusUnknown ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpOlapScheme::DropColumnAndResetTtl [GOOD] Test command err: Trying to start YDB, gRPC: 6757, MsgBus: 2038 2025-06-30T09:21:30.169132Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670100576821226:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:30.169151Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004774/r3tmp/tmpXfVOZU/pdisk_1.dat 2025-06-30T09:21:30.236876Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 6757, node 1 2025-06-30T09:21:30.241339Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:30.250967Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:30.250986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:30.250989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:30.251036Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2038 2025-06-30T09:21:30.271115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:30.271157Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:30.271741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2038 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:30.319934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) CREATE TABLE `/Root/ColumnTableTest` (id Int32 NOT NULL, resource_id Utf8, level Int32, PRIMARY KEY (id)) PARTITION BY HASH(id) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:21:30.580887Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670100576821811:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.580919Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.623862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:21:30.636362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670100576821874:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:21:30.636433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670100576821874:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:21:30.636485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670100576821874:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:21:30.636501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670100576821874:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:21:30.636518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670100576821874:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:21:30.636531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670100576821874:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:30.636553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670100576821874:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:30.636572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670100576821874:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:30.636592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670100576821874:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:30.636613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670100576821874:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:30.636637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670100576821874:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:30.636663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521670100576821874:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:30.642258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:30.642283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:30.642300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:30.642308Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:30.642330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:30.642337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:30.642351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:30.642357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:30.642366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:30.642373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:30.642419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:30.642425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:30.642446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:30.642453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:30.642467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:30.642475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:30.642482Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for ... log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670123056847908:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:21:36.175241Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670123056847908:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:21:36.175266Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670123056847908:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:21:36.175292Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670123056847908:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:21:36.175312Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670123056847908:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:21:36.175332Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670123056847908:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:21:36.175356Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670123056847908:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:21:36.179909Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:21:36.179934Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:21:36.179949Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:21:36.179957Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:21:36.179984Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:21:36.179990Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:21:36.180001Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:21:36.180008Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:21:36.180016Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:21:36.180023Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:21:36.180056Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:21:36.180063Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:21:36.180088Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:21:36.180096Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:21:36.180110Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:21:36.180116Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:21:36.180126Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:21:36.180134Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:21:36.180140Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:21:36.180214Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:21:36.180221Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:21:36.180249Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:21:36.180253Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:21:36.183609Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670123056847908:2297];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=123932341839424;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275296183;max=18446744073709551615;plan=0;src=[6:7521670118761880287:2159];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:36.186923Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:21:36.188350Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:21:36.198851Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670123056847978:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:36.198894Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:36.200984Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:21:36.212310Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:21:36.227985Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670123056848005:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:36.228017Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:36.236772Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:21:36.244633Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:21:36.261598Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670123056848037:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:36.261652Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:36.263052Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:21:36.272870Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; >> CheckIntegrityMirror3dc::PlacementOk [GOOD] >> CheckIntegrityMirror3dc::PlacementOkHandoff >> CheckIntegrityBlock42::PlacementOk [GOOD] >> CheckIntegrityMirror3dc::DataErrorManyCopies [GOOD] >> CheckIntegrityBlock42::PlacementOkHandoff >> DstCreator::CannotFindColumn [GOOD] >> CheckIntegrityMirror3of4::PlacementMissingParts [GOOD] >> CheckIntegrityMirror3of4::PlacementDisintegrated >> CheckIntegrityBlock42::PlacementBlobIsLost ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithAsyncIndex [GOOD] Test command err: 2025-06-30T09:21:35.614408Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670120644487097:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:35.614806Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00404e/r3tmp/tmpDCBJg6/pdisk_1.dat 2025-06-30T09:21:35.703689Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:35.708883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:35.708911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:35.711255Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25441 TServer::EnableGrpc on GrpcPort 17634, node 1 2025-06-30T09:21:35.743084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:35.743100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:35.743102Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:35.743152Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25441 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:35.796517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:35.801142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:35.802315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275295903 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275295847 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275295903 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-30T09:21:35.880840Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:35.880921Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:35.880924Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:35.883314Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:36.086989Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275295903, tx_id: 281474976715658 } } } 2025-06-30T09:21:36.087114Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:36.087744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.088270Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-30T09:21:36.088273Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-30T09:21:36.146319Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-06-30T09:21:36.146336Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 4] TClient::Ls request: /Root/Dir/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275296190 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00404e/r3tmp/tmpA2KbkO/pdisk_1.dat 2025-06-30T09:21:36.422820Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:36.443406Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:36.446432Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670125174662217:2080] 1751275296407852 != 1751275296407855 TClient is connected to server localhost:62000 TServer::EnableGrpc on GrpcPort 28039, node 2 2025-06-30T09:21:36.474789Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:36.474806Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:36.474809Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:36.474861Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62000 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:36.523258Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:36.523287Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:36.523639Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:36.524998Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:21:36.528615Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:36.529759Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275296624 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275296568 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275296624 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-06-30T09:21:36.597109Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:36.597139Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:36.597141Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:36.597326Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:36.845055Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275296624, tx_id: 281474976715658 } } } 2025-06-30T09:21:36.845194Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:36.845707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.846188Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-30T09:21:36.846192Z node 2 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 TClient::Ls request: /Root/Replicated 2025-06-30T09:21:36.871863Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-06-30T09:21:36.871878Z node 2 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 5] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275296911 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key... (TRUNCATED) |81.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |81.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |81.4%| [LD] {RESULT} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut >> CheckIntegrityBlock42::PlacementStatusUnknown [GOOD] >> CheckIntegrityBlock42::DataOk >> LocalTableWriter::DataAlongWithHeartbeat >> CheckIntegrityBlock42::PlacementOkHandoff [GOOD] >> CheckIntegrityBlock42::PlacementMissingParts >> KqpScheme::ModifyPermissionsByIncorrectPaths [GOOD] >> KqpConstraints::SerialTypeSerial4 [GOOD] >> KqpConstraints::SerialTypeSerial8 >> CheckIntegrityMirror3dc::PlacementOkHandoff [GOOD] >> CheckIntegrityMirror3dc::PlacementMissingParts >> CheckIntegrityMirror3dc::PlacementOkWithErrors [GOOD] >> CheckIntegrityMirror3dc::PlacementOkWithErrorsOnBlobDisks >> CheckIntegrityMirror3dc::PlacementBlobIsLost >> CheckIntegrityBlock42::PlacementBlobIsLost [GOOD] >> CheckIntegrityBlock42::PlacementAllOnHandoff >> CheckIntegrityBlock42::DataErrorAdditionalUnequalParts >> CheckIntegrityBlock42::DataOk [GOOD] >> CheckIntegrityBlock42::DataOkAdditionalEqualParts >> CheckIntegrityBlock42::PlacementMissingParts [GOOD] >> CheckIntegrityMirror3of4::PlacementDisintegrated [GOOD] >> LocalTableWriter::ConsistentWrite >> LocalTableWriter::ApplyInCorrectOrder ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::DataErrorManyCopies [GOOD] Test command err: RandomSeed# 11350465534204112947 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 ], ver1 disks [ 2 ] ERROR: There are unequal parts *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 2 ], ver1 disks [ 3 4 5 ] ERROR: There are unequal parts >> CheckIntegrityMirror3dc::PlacementMissingParts [GOOD] >> LocalTableWriter::DecimalKeys >> CheckIntegrityBlock42::DataErrorAdditionalUnequalParts [GOOD] >> CheckIntegrityBlock42::DataErrorSixPartsOneBroken >> CheckIntegrityBlock42::DataOkAdditionalEqualParts [GOOD] >> CheckIntegrityBlock42::DataErrorSixPartsTwoBroken >> CheckIntegrityMirror3dc::PlacementOkWithErrorsOnBlobDisks [GOOD] >> CheckIntegrityMirror3of4::PlacementBlobIsLost ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::PlacementStatusUnknown [GOOD] Test command err: RandomSeed# 16316098134319074999 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::ModifyPermissionsByIncorrectPaths [GOOD] Test command err: Trying to start YDB, gRPC: 11280, MsgBus: 64719 2025-06-30T09:21:28.482796Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670088566149895:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:28.482826Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00477b/r3tmp/tmpYbm3E1/pdisk_1.dat 2025-06-30T09:21:28.544147Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11280, node 1 2025-06-30T09:21:28.580903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:28.580917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:28.580919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:28.580972Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64719 2025-06-30T09:21:28.622800Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:28.622832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:28.624133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64719 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:28.658906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:28.661538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:28.673364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:28.755649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.787535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.808151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:28.955484Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670088566151464:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:28.955521Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.018050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.027939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.038056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.053020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.067086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.080525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.102898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.123555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670092861119415:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.123592Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.123662Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670092861119420:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:29.124673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:29.127745Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670092861119422:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:29.216362Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670092861119473:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:29.439301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:29.442660Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037911 not found 2025-06-30T09:21:29.479773Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 32420, MsgBus: 22874 2025-06-30T09:21:29.892330Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670096106604041:2087];send_to=[0:7307199536658146131:7762515]; 2025-06-30T0 ... shard/schemeshard__operation_modify_acl.cpp:101) Trying to start YDB, gRPC: 13588, MsgBus: 17416 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00477b/r3tmp/tmp7Y9AJs/pdisk_1.dat 2025-06-30T09:21:35.895106Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670121832022691:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:35.904948Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:35.928415Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7521670121832022480:2080] 1751275295887754 != 1751275295887757 2025-06-30T09:21:35.930550Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13588, node 6 2025-06-30T09:21:35.940881Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:35.940897Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:35.940899Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:35.940956Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17416 2025-06-30T09:21:36.001486Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:36.001527Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:36.005699Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17416 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:36.030956Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:36.036951Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:21:36.046500Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:36.069127Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:36.098640Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:36.127819Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:36.609910Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670126126991358:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:36.610040Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:36.614081Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.630365Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.692254Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.704960Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.730340Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.745696Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.762675Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.784931Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670126126992022:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:36.784960Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:36.785040Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670126126992027:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:36.786109Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:36.789878Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670126126992029:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:21:36.855834Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670126126992080:3399] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:36.887974Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:37.273142Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670130421959668:3580] txid# 281474976710673, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-30T09:21:37.273422Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::CannotFindColumn [GOOD] Test command err: 2025-06-30T09:21:35.716870Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670120892970056:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:35.718805Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004048/r3tmp/tmpGX2Xr5/pdisk_1.dat 2025-06-30T09:21:35.815122Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:35.823344Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:35.823382Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:35.825579Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1854 TServer::EnableGrpc on GrpcPort 13722, node 1 2025-06-30T09:21:35.878992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:35.879006Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:35.879008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:35.879055Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1854 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:35.963486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:35.966056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:21:35.967140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1751275296071 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275296015 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1751275296071 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-30T09:21:36.052252Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:36.052283Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:36.052285Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:36.052469Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:36.262449Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275296071, tx_id: 281474976710658 } } } 2025-06-30T09:21:36.262565Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:36.263091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.263455Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-30T09:21:36.263461Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-30T09:21:36.287004Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-30T09:21:36.287019Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1751275296330 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-30T09:21:36.657382Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670126485659236:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:36.658989Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004048/r3tmp/tmpaq6GMG/pdisk_1.dat 2025-06-30T09:21:36.674598Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:26558 TServer::EnableGrpc on GrpcPort 24607, node 2 2025-06-30T09:21:36.714201Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:36.714216Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:36.714219Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:36.714275Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26558 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId ... CATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:36.761624Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:36.761662Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:36.762165Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:36.763890Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:36.764336Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:21:36.769456Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.796477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275296806 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275296855 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1751275296806 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275296855 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-30T09:21:36.815713Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:36.815747Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-30T09:21:36.815749Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-30T09:21:36.815927Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-30T09:21:37.239481Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275296841, tx_id: 281474976715658 } } } 2025-06-30T09:21:37.239563Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-30T09:21:37.240036Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:481} 2025-06-30T09:21:37.240328Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1751275296855 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-30T09:21:37.240362Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Cannot find column: name: value >> KqpScheme::CreateExternalTableWithUpperCaseSettings [GOOD] >> CheckIntegrityBlock42::PlacementAllOnHandoff [GOOD] >> CheckIntegrityBlock42::PlacementDisintegrated >> CheckIntegrityMirror3dc::PlacementBlobIsLost [GOOD] >> CheckIntegrityMirror3dc::PlacementDisintegrated >> CheckIntegrityBlock42::DataErrorSixPartsOneBroken [GOOD] >> CheckIntegrityBlock42::DataErrorFivePartsOneBroken >> LocalTableWriter::DataAlongWithHeartbeat [GOOD] >> CheckIntegrityBlock42::DataErrorSixPartsTwoBroken [GOOD] >> CheckIntegrityBlock42::DataOkErasureFiveParts >> CheckIntegrityMirror3of4::PlacementBlobIsLost [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3of4::PlacementDisintegrated [GOOD] Test command err: RandomSeed# 5118771720539971256 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** Group is disintegrated or has network problems ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::PlacementMissingParts [GOOD] Test command err: RandomSeed# 3367930734865874715 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:4:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** |81.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/query_stats/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::PlacementMissingParts [GOOD] Test command err: RandomSeed# 14378165551116911511 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** |81.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/query_stats/ut/unittest >> CheckIntegrityBlock42::PlacementDisintegrated [GOOD] >> CheckIntegrityBlock42::DataStatusUnknown >> CheckIntegrityBlock42::DataErrorFivePartsOneBroken [GOOD] >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken >> KqpSecrets::RequireDbPrefixInSecretWithValidName [GOOD] >> CheckIntegrityMirror3dc::PlacementDisintegrated [GOOD] >> CheckIntegrityMirror3dc::DataOk >> LocalTableWriter::ApplyInCorrectOrder [GOOD] >> CheckIntegrityBlock42::DataOkErasureFiveParts [GOOD] |81.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateExternalTableWithUpperCaseSettings [GOOD] Test command err: Trying to start YDB, gRPC: 21907, MsgBus: 27860 2025-06-30T09:21:29.663692Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670096669811099:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:29.663791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004777/r3tmp/tmpJQalZZ/pdisk_1.dat 2025-06-30T09:21:29.752815Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:29.765553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:29.765574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 21907, node 1 2025-06-30T09:21:29.768139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:29.784581Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:29.784605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:29.784607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:29.784655Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27860 TClient is connected to server localhost:27860 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:29.880648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:29.886588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:29.904085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:29.987198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:30.040270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.079855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:30.181103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670100964779776:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.181142Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.248938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.260177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.270802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.330056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.344264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.356802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.368004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.384412Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670100964780430:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.384442Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.384445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670100964780435:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.385329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:30.395514Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670100964780437:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:30.463848Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670100964780488:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:30.662890Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 14919, MsgBus: 15900 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004777/r3tmp/tmp7aYMrq/pdisk_1.dat 2025-06-30T09:21:30.975030Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670100283667825:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:30.976763Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:30.996669Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14919, node 2 2025-06-30T09:21:31.007720Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, b ... o start YDB, gRPC: 7835, MsgBus: 15625 2025-06-30T09:21:36.503680Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670122908188293:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:36.505663Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004777/r3tmp/tmpbpeQTc/pdisk_1.dat 2025-06-30T09:21:36.544153Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7835, node 6 2025-06-30T09:21:36.562942Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:36.562960Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:36.562962Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:36.563023Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15625 2025-06-30T09:21:36.587250Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:36.587286Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:36.587935Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15625 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:36.652057Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:36.656163Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:36.667956Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:36.692643Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.728968Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:36.756321Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:37.263370Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670127203157115:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:37.263410Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:37.275211Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:37.296539Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:37.314185Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:37.334291Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:37.356337Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:37.370765Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:37.382317Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:37.400756Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670127203157768:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:37.400795Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:37.401106Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670127203157773:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:37.402109Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:37.410395Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670127203157775:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:37.484445Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:37.490118Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670127203157835:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:37.677226Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:21:37.680718Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:352) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3of4::PlacementBlobIsLost [GOOD] Test command err: RandomSeed# 10846119606352479610 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:2:0] FINISHED WITH OK *** >> LocalTableWriter::ConsistentWrite [GOOD] >> LocalTableWriter::DecimalKeys [GOOD] >> CheckIntegrityBlock42::DataStatusUnknown [GOOD] >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken [GOOD] |81.4%| [TA] $(B)/ydb/core/tx/replication/controller/ut_dst_creator/test-results/unittest/{meta.json ... results_accumulator.log} |81.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/query_stats/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::DataAlongWithHeartbeat [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004740/r3tmp/tmpuyby02/pdisk_1.dat 2025-06-30T09:21:37.765549Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:37.785187Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:12138 TServer::EnableGrpc on GrpcPort 23531, node 1 2025-06-30T09:21:37.818887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:37.818903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:37.818906Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:37.818989Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:37.825874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:37.825911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:37.826938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12138 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:37.882052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:37.885939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:37.887075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275297989 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-30T09:21:37.964700Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670130982549713:2350] Handshake: worker# [1:7521670130982549714:2351] 2025-06-30T09:21:37.964815Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670130982549713:2350] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:21:37.964869Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670130982549713:2350] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-30T09:21:37.964894Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670130982549713:2350] Send handshake: worker# [1:7521670130982549714:2351] 2025-06-30T09:21:37.967025Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670130982549713:2350] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 19b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-30T09:21:37.973325Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670130982549713:2350] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-30T09:21:37.973379Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670130982549713:2350] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 }] } 2025-06-30T09:21:37.973433Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670130982549717:2350] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-30T09:21:37.973438Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670130982549713:2350] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:37.973450Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670130982549717:2350] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-30T09:21:37.974719Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670130982549717:2350] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:37.974731Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670130982549713:2350] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:37.974749Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670130982549713:2350] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } >> CheckIntegrityMirror3dc::DataOk [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::ConsistentWrite [GOOD] Test command err: 2025-06-30T09:21:37.972953Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670129739452866:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:37.972982Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004729/r3tmp/tmpCLFE0c/pdisk_1.dat 2025-06-30T09:21:38.052813Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:38.065645Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:21:38.074289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:38.074317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:38.076738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11662 TServer::EnableGrpc on GrpcPort 22336, node 1 2025-06-30T09:21:38.102560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:38.102583Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:38.102585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:38.102642Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11662 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:38.225284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:38.229516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275298332 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-30T09:21:38.307939Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handshake: worker# [1:7521670134034420687:2288] 2025-06-30T09:21:38.308059Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:21:38.308104Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-30T09:21:38.308126Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Send handshake: worker# [1:7521670134034420687:2288] 2025-06-30T09:21:38.308261Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-30T09:21:38.311183Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-30T09:21:38.311265Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 },{ Order: 2 BodySize: 48 },{ Order: 3 BodySize: 48 }] } 2025-06-30T09:21:38.311319Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670134034420782:2348] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-30T09:21:38.311327Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:38.311345Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670134034420782:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b },{ Order: 2 Group: 0 Step: 2 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b },{ Order: 3 Group: 0 Step: 3 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-30T09:21:38.317851Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670134034420782:2348] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:38.317887Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:38.317900Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3] } 2025-06-30T09:21:38.318129Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 19b Offset: 4 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-30T09:21:38.318287Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 49b Offset: 5 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 6 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 7 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 8 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-30T09:21:38.318420Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 20 TxId: 0 } TxId: 2 } VersionTxIds { Version { Step: 30 TxId: 0 } TxId: 3 } 2025-06-30T09:21:38.318444Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 5 BodySize: 49 },{ Order: 6 BodySize: 49 },{ Order: 7 BodySize: 49 },{ Order: 8 BodySize: 49 }] } 2025-06-30T09:21:38.318477Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670134034420782:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 5 Group: 0 Step: 11 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 6 Group: 0 Step: 12 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 7 Group: 0 Step: 21 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 8 Group: 0 Step: 22 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b }] } 2025-06-30T09:21:38.319666Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670134034420782:2348] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:38.319701Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:38.319714Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [5,6,7,8] } 2025-06-30T09:21:38.321133Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 49b Offset: 9 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 10 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-30T09:21:38.321193Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 9 BodySize: 49 },{ Order: 10 BodySize: 49 }] } 2025-06-30T09:21:38.321229Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670134034420782:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 9 Group: 0 Step: 13 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 10 Group: 0 Step: 23 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b }] } 2025-06-30T09:21:38.323183Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670134034420782:2348] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:38.323209Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:38.323220Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [9,10] } 2025-06-30T09:21:38.323377Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670134034420779:2348] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 19b Offset: 11 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataOkErasureFiveParts [GOOD] Test command err: RandomSeed# 1676280079593596738 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:4:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 0 ] part 2: ver0 disks [ 7 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 6 0 ]; part 2 disks [ 7 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 0 ]; part 2 disks [ 7 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::ApplyInCorrectOrder [GOOD] Test command err: 2025-06-30T09:21:38.007336Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670133993956626:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:38.008213Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004748/r3tmp/tmp6jAhXQ/pdisk_1.dat 2025-06-30T09:21:38.072354Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:38.076174Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TClient is connected to server localhost:64572 TServer::EnableGrpc on GrpcPort 25158, node 1 2025-06-30T09:21:38.106475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:38.106546Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:38.107558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:38.112444Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:38.112465Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:38.112468Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:38.112537Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64572 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:38.176427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:38.183460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:38.184774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275298283 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-30T09:21:38.255939Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handshake: worker# [1:7521670133993957074:2288] 2025-06-30T09:21:38.256078Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:21:38.256141Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-30T09:21:38.256163Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Send handshake: worker# [1:7521670133993957074:2288] 2025-06-30T09:21:38.260672Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-30T09:21:38.267814Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-30T09:21:38.267917Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 }] } 2025-06-30T09:21:38.267973Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670133993957169:2348] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-30T09:21:38.268088Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:38.268104Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670133993957169:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-30T09:21:38.270036Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670133993957169:2348] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:38.270067Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:38.270080Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } 2025-06-30T09:21:38.270870Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 49b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 19b Offset: 4 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-30T09:21:38.271054Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 20 TxId: 0 } TxId: 2 } 2025-06-30T09:21:38.271076Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 2 BodySize: 49 },{ Order: 3 BodySize: 48 }] } 2025-06-30T09:21:38.271108Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670133993957169:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 2 Group: 0 Step: 11 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 3 Group: 0 Step: 2 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-30T09:21:38.275002Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670133993957169:2348] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:38.275034Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:38.275049Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133993957166:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [2,3] } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken [GOOD] Test command err: RandomSeed# 8167711133268616813 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:4:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 ], ver1 disks [ 7 ], ver2 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] ERROR: There are unequal parts Erasure info: { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: Erasure info: ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:3:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 ], ver1 disks [ 0 ] part 2: ver0 disks [ 6 ], ver1 disks [ 1 ] part 3: ver0 disks [ 6 ], ver1 disks [ 2 ] part 4: ver0 disks [ 3 ], ver1 disks [ 6 ] part 5: ver0 disks [ 4 ], ver1 disks [ 6 ] part 6: ver0 disks [ 5 ], ver1 disks [ 6 ] ERROR: There are unequal parts Erasure info: { part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK ERROR: There are erasure restore fails ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpSecrets::RequireDbPrefixInSecretWithValidName [GOOD] Test command err: Trying to start YDB, gRPC: 29514, MsgBus: 2488 2025-06-30T09:21:25.790247Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670079485196477:2241];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:25.828504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004784/r3tmp/tmpD94yqP/pdisk_1.dat 2025-06-30T09:21:25.863769Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:25.864343Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670079485196255:2080] 1751275285773944 != 1751275285773947 TServer::EnableGrpc on GrpcPort 29514, node 1 2025-06-30T09:21:25.919008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:25.919019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:25.919021Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:25.919059Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:25.935057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:25.935088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:25.939119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2488 TClient is connected to server localhost:2488 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:26.060396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:26.063603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:26.071247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:26.072906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:26.097011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:26.167410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:26.196874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:26.401163Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670083780165156:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:26.401197Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:26.445472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.460276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.519853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.532629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.544323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.564999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.579216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.596052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670083780165814:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:26.596080Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:26.596174Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670083780165819:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:26.597090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:26.601824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715670, at schemeshard: 72057594046644480 2025-06-30T09:21:26.602158Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670083780165821:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2025-06-30T09:21:26.675874Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670083780165872:3405] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:26.780186Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:26.864041Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cac ... TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720667:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.524917Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720670:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.567653Z node 9 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:34.649197Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976720673:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:34.776817Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976720676:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:34.890085Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976725758:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:34.925714Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976725759:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:21:35.517278Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7521670121093795319:2355], DatabaseId: /Root/test-shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:35.517303Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7521670121093795308:2352], DatabaseId: /Root/test-shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:35.517332Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/test-shared, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:35.519747Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976730657:3, at schemeshard: 72075186224038898, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:35.567420Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [9:7521670121093795322:2356], DatabaseId: /Root/test-shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976730657 completed, doublechecking } 2025-06-30T09:21:35.672129Z node 9 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [9:7521670121093795413:2604] txid# 281474976730658, issues: { message: "Check failed: path: \'/Root/test-shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224038898, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:35.680609Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730659:1, at schemeshard: 72075186224038898, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:35.857419Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976730662:0, at schemeshard: 72075186224038898, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:36.017763Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730665:1, at schemeshard: 72075186224038898, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.225884Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730668:0, at schemeshard: 72075186224038898, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.427710Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976730671:0, at schemeshard: 72075186224038898, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:36.585524Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976730674:0, at schemeshard: 72075186224038898, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:36.727403Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976735758:2, at schemeshard: 72075186224038898, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:36.854962Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976735759:0, at schemeshard: 72075186224038898, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:21:37.062228Z node 9 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224038891] TEvIntervalQuerySummary, time mismath: node id# 9, interval end# 2025-06-30T09:21:37.000000Z, event interval end# 2025-06-30T09:21:36.000000Z 2025-06-30T09:21:37.383310Z node 10 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 10, interval end# 2025-06-30T09:21:37.000000Z, event interval end# 2025-06-30T09:21:36.000000Z 2025-06-30T09:21:37.806325Z node 8 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 9 2025-06-30T09:21:37.806518Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:21:37.806654Z node 8 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 10 2025-06-30T09:21:37.806703Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:21:37.817209Z node 9 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [9:7521670112503859712:2074], processor id# 72075186224038891, database# /Root/test-shared 2025-06-30T09:21:37.823704Z node 10 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [10:7521670111503961286:2074], processor id# 72075186224037891, database# /Root/test-dedicated 2025-06-30T09:21:38.278505Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][9:7521670125388763311:2957][/Root/test-shared/.metadata/secrets/values] Sync is incomplete in one of the ring groups: cookie# 18 2025-06-30T09:21:38.278532Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][9:7521670125388763311:2957][/Root/test-shared/.metadata/secrets/values] Sync is incomplete in one of the ring groups: cookie# 19 2025-06-30T09:21:38.278896Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7521670133978699060:2739], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/secrets/values]
: Error: LookupError, code: 2005 2025-06-30T09:21:38.279051Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=9&id=Mjk3NzMxZWItYTQ1MDAxYjUtZmExMzE0NWEtZWEwZGY5NWU=, ActorId: [9:7521670133978699057:2737], ActorState: ExecuteState, TraceId: 01jz028qf41rhyfrps7v290eqx, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-06-30T09:21:38.280489Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/secrets/values]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-06-30T09:21:38.280611Z node 9 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/secrets/values]
: Error: LookupError, code: 2005 >> KqpConstraints::SerialTypeSerial8 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::DecimalKeys [GOOD] Test command err: 2025-06-30T09:21:38.058235Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670133998977542:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:38.058263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004756/r3tmp/tmpsaGDHa/pdisk_1.dat 2025-06-30T09:21:38.170602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:38.170634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:38.171622Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:38.172978Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:22257 TServer::EnableGrpc on GrpcPort 65211, node 1 2025-06-30T09:21:38.241025Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:38.241039Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:38.241041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:38.241086Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22257 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:38.301242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:38.308300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:38.310385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275298409 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Decimal(1,0)" TypeId: 4865 Id: 1 NotNull: false TypeInfo { DecimalPrecision: 1 DecimalScale: 0 } IsBuildInProgress: false } Columns { Name: "value" Type: "Decimal(35,10)" TypeId: 4865 I... (TRUNCATED) 2025-06-30T09:21:38.378022Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133998978172:2348] Handshake: worker# [1:7521670133998978079:2287] 2025-06-30T09:21:38.378136Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133998978172:2348] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:21:38.378194Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133998978172:2348] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Decimal(1,0) : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-30T09:21:38.378222Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133998978172:2348] Send handshake: worker# [1:7521670133998978079:2287] 2025-06-30T09:21:38.378395Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133998978172:2348] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 57b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-30T09:21:38.378451Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133998978172:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 57 },{ Order: 2 BodySize: 57 },{ Order: 3 BodySize: 57 }] } 2025-06-30T09:21:38.378497Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670133998978175:2348] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-30T09:21:38.378505Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133998978172:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:38.378521Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670133998978175:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 3 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b }] } 2025-06-30T09:21:38.379438Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670133998978175:2348] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:38.379466Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133998978172:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:38.379475Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670133998978172:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3] } >> QueryStats::Ranges [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataStatusUnknown [GOOD] Test command err: RandomSeed# 9030255035031155156 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Group is disintegrated or has network problems *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: part 2: part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::DataOk [GOOD] Test command err: RandomSeed# 3939078356027908611 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Group is disintegrated or has network problems *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 2 ] |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/query_stats/ut/unittest >> KqpScheme::AlterAsyncReplication [GOOD] |81.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |81.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |81.5%| [TA] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/test-results/unittest/{meta.json ... results_accumulator.log} |81.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/query_stats/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpConstraints::SerialTypeSerial8 [GOOD] Test command err: Trying to start YDB, gRPC: 1130, MsgBus: 24042 2025-06-30T09:21:29.501478Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670094105388695:2182];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:29.502867Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00477a/r3tmp/tmpT6uE41/pdisk_1.dat 2025-06-30T09:21:29.560573Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1130, node 1 2025-06-30T09:21:29.594977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:29.594991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:29.595000Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:29.595048Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:29.595370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:29.595407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:29.596482Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24042 TClient is connected to server localhost:24042 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:29.679144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:29.682844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:29.687817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:29.764777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:29.807804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:29.870864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.043106Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670098400357412:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.043152Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.101357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.110398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.122525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.136883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.152281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.170580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.182026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:30.195419Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670098400358065:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.195455Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.195503Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670098400358070:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:30.196730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:30.206254Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670098400358072:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:30.303531Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670098400358123:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 17544, MsgBus: 63981 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00477a/r3tmp/tmpAcPzJ0/pdisk_1.dat 2025-06-30T09:21:30.659413Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670098231965316:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:30.661231Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:21:30.677932Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17544, node 2 2025-06-30T09:21:30.690351Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:30.690370Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty ... _type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:37.381647Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 62072, MsgBus: 23032 2025-06-30T09:21:37.756024Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521670130607891205:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:37.757576Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00477a/r3tmp/tmpWQXzct/pdisk_1.dat 2025-06-30T09:21:37.773601Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62072, node 7 2025-06-30T09:21:37.793311Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:37.793329Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:37.793332Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:37.793393Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23032 TClient is connected to server localhost:23032 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:37.863222Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:37.863259Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:37.863704Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:37.864275Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:21:37.866006Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:37.886819Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:37.908376Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:37.943041Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:37.961149Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:38.272478Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670134902860050:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:38.272510Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:38.281219Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:38.339849Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:38.351221Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:38.366529Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:38.383142Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:38.398117Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:38.413681Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:38.429453Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670134902860705:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:38.429492Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:38.429515Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670134902860710:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:38.430455Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:38.440645Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670134902860712:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:38.535060Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670134902860763:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:38.711746Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:38.760901Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/query_stats/ut/unittest |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/query_stats/ut/unittest >> LocalTableWriter::WriteTable |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/query_stats/ut/unittest >> QueryStats::Ranges [GOOD] |81.5%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/test-results/unittest/{meta.json ... results_accumulator.log} >> LocalTableWriter::SupportedTypes |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/query_stats/ut/unittest |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/query_stats/ut/unittest |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> YdbIndexTable::MultiShardTableOneIndex >> SplitPathTests::WithoutDatabaseShouldSuccess [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AlterAsyncReplication [GOOD] Test command err: Trying to start YDB, gRPC: 13348, MsgBus: 23511 2025-06-30T09:21:19.474665Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670050474487255:2246];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:19.474903Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047c3/r3tmp/tmpz14pDz/pdisk_1.dat 2025-06-30T09:21:19.577449Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:19.580175Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670050474487020:2080] 1751275279462208 != 1751275279462211 TServer::EnableGrpc on GrpcPort 13348, node 1 2025-06-30T09:21:19.606986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:19.606998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:19.607001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:19.607049Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23511 2025-06-30T09:21:19.647102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:19.647132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:19.651111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23511 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:19.743064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:19.748367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:19.756266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:19.785099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:19.835013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:19.872204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:19.999309Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670050474488616:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.999345Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.049215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.059636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.122675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.146625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.158887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.173973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.188372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.219365Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670054769456572:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.219390Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.219455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670054769456577:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:20.220341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:20.223461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:20.223704Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670054769456579:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:20.324110Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670054769456630:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:20.473061Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:20.513368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 15546, MsgBus: 27490 2025-06-30T0 ... pp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-30T09:21:39.451106Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp:513) 2025-06-30T09:21:39.451925Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715684 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { Token: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.451972Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715684 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { Token: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.452192Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-30T09:21:39.490878Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715685:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp:513) 2025-06-30T09:21:39.491607Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715685 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { Token: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.491656Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715685 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { Token: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.491835Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-30T09:21:39.500664Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715686:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp:513) 2025-06-30T09:21:39.501389Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715686 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { TokenSecretName: "mysecret" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.501449Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715686 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { TokenSecretName: "mysecret" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.501703Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-30T09:21:39.503249Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715686, at schemeshard: 72057594046644480 2025-06-30T09:21:39.508860Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670137408133253:4041] txid# 281474976715687, issues: { message: "User is not set" severity: 1 } 2025-06-30T09:21:39.514045Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670137408133262:4046] txid# 281474976715688, issues: { message: "User is not set" severity: 1 } 2025-06-30T09:21:39.518956Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715689:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp:513) 2025-06-30T09:21:39.519652Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715689 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.519699Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715689 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.519897Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-30T09:21:39.521130Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715689, at schemeshard: 72057594046644480 2025-06-30T09:21:39.526780Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp:513) 2025-06-30T09:21:39.527515Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715690 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" Password: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.527565Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715690 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" Password: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.527764Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-30T09:21:39.534940Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715691:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp:513) 2025-06-30T09:21:39.535794Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715691 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" PasswordSecretName: "password_secret_name" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.535854Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715691 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" PasswordSecretName: "password_secret_name" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.536053Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-30T09:21:39.550347Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp:513) 2025-06-30T09:21:39.552279Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715692 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "new_user" Password: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.552344Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715692 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "new_user" Password: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-30T09:21:39.552607Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete |81.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest |81.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildSuccess [GOOD] >> OperationMapping::IndexBuildCanceled [GOOD] >> SplitPathTests::WithDatabaseShouldFail [GOOD] >> OperationMapping::IndexBuildRejected [GOOD] |81.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithoutDatabaseShouldSuccess [GOOD] |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> SplitPathTests::WithDatabaseShouldSuccess [GOOD] >> LocalTableWriter::WriteTable [GOOD] >> YdbIndexTable::MultiShardTableOneUniqIndex |81.5%| [TA] $(B)/ydb/core/sys_view/query_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} |81.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> TConsoleTests::TestRemoveServerlessTenant [GOOD] >> TConsoleTests::TestRegisterComputationalUnitsForPending >> YdbIndexTable::MultiShardTableUniqAndNonUniqIndex >> LocalTableWriter::SupportedTypes [GOOD] |81.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest |81.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildSuccess [GOOD] |81.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildRejected [GOOD] |81.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldFail [GOOD] |81.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldSuccess [GOOD] >> YdbIndexTable::MultiShardTableOneIndexIndexOverlapDataColumn >> TConsoleTests::TestAttributesExtSubdomain [GOOD] >> TConsoleTests::TestDatabaseQuotas |81.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildCanceled [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::WriteTable [GOOD] Test command err: 2025-06-30T09:21:40.195275Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670142706277382:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:40.195359Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00471a/r3tmp/tmpmTiPLZ/pdisk_1.dat 2025-06-30T09:21:40.283817Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:21:40.285596Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:40.293610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:40.293641Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:40.295758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9134 TServer::EnableGrpc on GrpcPort 21062, node 1 2025-06-30T09:21:40.319758Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:40.319774Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:40.319777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:40.319834Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9134 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:40.386015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:40.391440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:40.424073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275300495 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-30T09:21:40.482883Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142706277837:2348] Handshake: worker# [1:7521670142706277747:2288] 2025-06-30T09:21:40.482964Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142706277837:2348] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:21:40.483008Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142706277837:2348] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-30T09:21:40.483033Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142706277837:2348] Send handshake: worker# [1:7521670142706277747:2288] 2025-06-30T09:21:40.483186Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142706277837:2348] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 36b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 36b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-30T09:21:40.483227Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142706277837:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 36 },{ Order: 2 BodySize: 36 },{ Order: 3 BodySize: 36 }] } 2025-06-30T09:21:40.483261Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670142706277840:2348] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-30T09:21:40.483267Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142706277837:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:40.483284Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670142706277840:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b },{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b },{ Order: 3 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-30T09:21:40.487115Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670142706277840:2348] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:40.487138Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142706277837:2348] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:40.487150Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142706277837:2348] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3] } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::SupportedTypes [GOOD] Test command err: 2025-06-30T09:21:40.264078Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670142516189909:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:40.264150Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00471c/r3tmp/tmpH39YjH/pdisk_1.dat 2025-06-30T09:21:40.357520Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:23936 2025-06-30T09:21:40.366050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected TServer::EnableGrpc on GrpcPort 2025-06-30T09:21:40.366088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 13668, node 1 2025-06-30T09:21:40.367317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:40.380904Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:40.380923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:40.380925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:40.380973Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23936 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:40.467757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:40.483359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:40.491241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275300600 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "int32_value" Type: "Int32" TypeId: 1 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "ui... (TRUNCATED) 2025-06-30T09:21:40.574838Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142516190444:2346] Handshake: worker# [1:7521670142516190353:2287] 2025-06-30T09:21:40.574979Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142516190444:2346] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:21:40.575058Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142516190444:2346] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-30T09:21:40.575081Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142516190444:2346] Send handshake: worker# [1:7521670142516190353:2287] 2025-06-30T09:21:40.575371Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142516190444:2346] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 45b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 4 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 41b Offset: 5 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 41b Offset: 6 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 7 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 44b Offset: 8 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 66b Offset: 9 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 71b Offset: 10 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 72b Offset: 11 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 12 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 13 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 51b Offset: 14 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 58b Offset: 15 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 51b Offset: 16 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 54b Offset: 17 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 18 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 76b Offset: 19 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 20 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 54b Offset: 21 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 61b Offset: 22 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 51b Offset: 23 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 24 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 46b Offset: 25 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 47b Offset: 26 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 50b Offset: 27 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 28 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 72b Offset: 29 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 30 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 64b Offset: 31 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-30T09:21:40.575571Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142516190444:2346] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 45 },{ Order: 2 BodySize: 45 },{ Order: 3 BodySize: 45 },{ Order: 4 BodySize: 45 },{ Order: 5 BodySize: 41 },{ Order: 6 BodySize: 41 },{ Order: 7 BodySize: 45 },{ Order: 8 BodySize: 44 },{ Order: 9 BodySize: 66 },{ Order: 10 BodySize: 71 },{ Order: 11 BodySize: 72 },{ Order: 12 BodySize: 49 },{ Order: 13 BodySize: 48 },{ Order: 14 BodySize: 51 },{ Order: 15 BodySize: 58 },{ Order: 16 BodySize: 51 },{ Order: 17 BodySize: 54 },{ Order: 18 BodySize: 57 },{ Order: 19 BodySize: 76 },{ Order: 20 BodySize: 45 },{ Order: 21 BodySize: 54 },{ Order: 22 BodySize: 61 },{ Order: 23 BodySize: 51 },{ Order: 24 BodySize: 45 },{ Order: 25 BodySize: 46 },{ Order: 26 BodySize: 47 },{ Order: 27 BodySize: 50 },{ Order: 28 BodySize: 49 },{ Order: 29 BodySize: 72 },{ Order: 30 BodySize: 57 },{ Order: 31 BodySize: 64 }] } 2025-06-30T09:21:40.575650Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670142516190448:2346] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-30T09:21:40.575656Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142516190444:2346] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:40.575702Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670142516190448:2346] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 3 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 4 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 5 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 41b },{ Order: 6 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 41b },{ Order: 7 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 8 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 44b },{ Order: 9 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 66b },{ Order: 10 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 71b },{ Order: 11 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 72b },{ Order: 12 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 13 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b },{ Order: 14 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 51b },{ Order: 15 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 58b },{ Order: 16 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 51b },{ Order: 17 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 54b },{ Order: 18 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 19 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 76b },{ Order: 20 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 21 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 54b },{ Order: 22 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 61b },{ Order: 23 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 51b },{ Order: 24 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 25 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 46b },{ Order: 26 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 47b },{ Order: 27 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 50b },{ Order: 28 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 29 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 72b },{ Order: 30 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 31 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 64b }] } 2025-06-30T09:21:40.599122Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7521670142516190448:2346] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-30T09:21:40.599159Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142516190444:2346] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-30T09:21:40.599174Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7521670142516190444:2346] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31] } |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest |81.5%| [TA] $(B)/ydb/core/grpc_services/ut/test-results/unittest/{meta.json ... results_accumulator.log} |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> TBackupCollectionWithRebootsTests::ParallelCreateDrop |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest |81.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest |81.6%| [TA] $(B)/ydb/core/tx/replication/service/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} >> TBackupCollectionWithRebootsTests::CreateDroppedWithReboots >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet >> YdbIndexTable::OnlineBuild >> TBackupCollectionWithRebootsTests::SimpleDropWithReboots2 >> TExternalDataSourceTest::ParallelCreateSameExternalDataSource >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] >> TExternalDataSourceTest::CreateExternalDataSource >> TExternalDataSourceTest::ReadOnlyMode >> TExternalDataSourceTest::RemovingReferencesFromDataSources >> TExternalDataSourceTest::ParallelCreateSameExternalDataSource [GOOD] >> TExternalDataSourceTest::ParallelReplaceExternalDataSourceIfNotExists >> TExternalDataSourceTest::SchemeErrors >> TExternalDataSourceTest::DropTableTwice >> TExternalDataSourceTest::CreateExternalDataSourceWithProperties >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] >> TExternalDataSourceTest::ParallelReplaceExternalDataSourceIfNotExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:42.957958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:42.957988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:42.957995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:42.958002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:42.958009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:42.958014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:42.958026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:42.958042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:42.958157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:42.958229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:42.975447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:42.975475Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:42.975590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:42.978432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:42.979283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:42.979331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:42.986704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:42.986832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:42.986975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:42.987070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:42.987771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:42.987838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:42.988212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:42.988228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:42.988260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:42.988272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:42.988280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:42.988322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.995449Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-30T09:21:43.032774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.032880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.032970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:43.032982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:43.033050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:43.033069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:43.037176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.037249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:43.037351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.037376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:43.037385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:43.037394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:43.041631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.041666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:43.041680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:43.048165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.048202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.048213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.048227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.049307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:43.051164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:43.051236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:43.051551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.051613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.051627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.051736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:43.051751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.051800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.051819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:43.056863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:43.056890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:43.056978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.056988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-30T09:21:43.057007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.057019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-30T09:21:43.057044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:21:43.057050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.057057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:21:43.057062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.057069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-30T09:21:43.057078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.057085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-30T09:21:43.057092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1:0 2025-06-30T09:21:43.057125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:21:43.057134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-30T09:21:43.057139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-30T09:21:43.057861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-30T09:21:43.057898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-30T09:21:43.057906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-30T09:21:43.057913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-30T09:21:43.057921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.057948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-30T09:21:43.063018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-30T09:21:43.063208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-30T09:21:43.063587Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:273:2262] Bootstrap 2025-06-30T09:21:43.066641Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:273:2262] Become StateWork (SchemeCache [1:278:2267]) 2025-06-30T09:21:43.067765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.067889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:337: [72057594046678944] CreateNewExternalDataSource, opId 101:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } 2025-06-30T09:21:43.067908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-30T09:21:43.067918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 101:1, propose status:StatusPreconditionFailed, reason: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-30T09:21:43.068258Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:273:2262] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:21:43.069530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 101, response: Status: StatusPreconditionFailed Reason: "Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.069618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/MyExternalDataSource 2025-06-30T09:21:43.069829Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-30T09:21:43.069897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-30T09:21:43.069907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-30T09:21:43.069996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-30T09:21:43.070023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.070030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:288:2277] TestWaitNotification: OK eventTxId 101 2025-06-30T09:21:43.070138Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:43.070180Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 53us result status StatusPathDoesNotExist 2025-06-30T09:21:43.070240Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/MyExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TBackupCollectionWithRebootsTests::CreateDroppedAndDropWithReboots >> TExternalDataSourceTest::CreateExternalDataSource [GOOD] >> TExternalDataSourceTest::CreateExternalDataSourceShouldFailIfSuchEntityAlreadyExists >> TExternalDataSourceTest::SchemeErrors [GOOD] >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExists >> TExternalDataSourceTest::DropTableTwice [GOOD] >> TExternalDataSourceTest::ParallelCreateExternalDataSource >> TExternalDataSourceTest::ReadOnlyMode [GOOD] >> TExternalDataSourceTest::PreventDeletionOfDependentDataSources >> TExternalDataSourceTest::RemovingReferencesFromDataSources [GOOD] >> TBackupCollectionWithRebootsTests::CreateWithReboots >> TExternalDataSourceTest::CreateExternalDataSourceShouldFailIfSuchEntityAlreadyExists [GOOD] |81.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] >> TExternalDataSourceTest::CreateExternalDataSourceWithProperties [GOOD] >> TExternalDataSourceTest::DropExternalDataSource >> TExternalDataSourceTest::PreventDeletionOfDependentDataSources [GOOD] |81.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ParallelReplaceExternalDataSourceIfNotExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:43.160596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:43.160623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.160630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:43.160637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:43.160645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:43.160650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:43.160662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.160679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:43.160796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.160862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:43.176722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:43.176756Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:43.176873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.181214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:43.187089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:43.187158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:43.196342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:43.196448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:43.196624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.196734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:43.197558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.197632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:43.198392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:43.198416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.198451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:43.198464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:43.198472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:43.198521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.201451Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-30T09:21:43.232682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.232783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.232870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:43.232881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:43.232949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:43.232970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:43.234109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.234197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:43.234288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.234311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:43.234318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:43.234324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:43.235178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.235200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:43.235208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:43.235748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.235765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.235773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.235782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.236566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:43.237157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:43.237211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:43.237453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.237489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.237498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.237586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:43.237597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.237634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.237649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21: ... chemeshard: 72057594046678944 2025-06-30T09:21:43.504122Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 126, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504130Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 119: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504133Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 119: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504138Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 127, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504152Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 120: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504155Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 120: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504162Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 128, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504177Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 129, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504194Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 121: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504197Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 121: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504207Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 130, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504216Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 122: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504219Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 122: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 131, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504260Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 123: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504262Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 123: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504274Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 124: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504277Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 124: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504286Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 125: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504289Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 125: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504305Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 126: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504307Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 126: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504315Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504321Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 127: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504324Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 127: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504335Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504342Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 128: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504345Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 128: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504355Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 129: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504358Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 129: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504365Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504376Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 130: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504380Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 130: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504384Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 131: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504386Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 131: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504397Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504416Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504421Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504451Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504455Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504476Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504481Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504502Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504506Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504529Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504532Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:398:2387] 2025-06-30T09:21:43.504548Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.504551Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [2:398:2387] TestWaitNotification: OK eventTxId 108 TestWaitNotification: OK eventTxId 109 TestWaitNotification: OK eventTxId 110 TestWaitNotification: OK eventTxId 111 TestWaitNotification: OK eventTxId 112 TestWaitNotification: OK eventTxId 113 TestWaitNotification: OK eventTxId 114 TestWaitNotification: OK eventTxId 115 TestWaitNotification: OK eventTxId 116 TestWaitNotification: OK eventTxId 117 TestWaitNotification: OK eventTxId 118 TestWaitNotification: OK eventTxId 119 TestWaitNotification: OK eventTxId 120 TestWaitNotification: OK eventTxId 121 TestWaitNotification: OK eventTxId 122 TestWaitNotification: OK eventTxId 123 TestWaitNotification: OK eventTxId 124 TestWaitNotification: OK eventTxId 125 TestWaitNotification: OK eventTxId 126 TestWaitNotification: OK eventTxId 127 TestWaitNotification: OK eventTxId 128 TestWaitNotification: OK eventTxId 129 TestWaitNotification: OK eventTxId 130 TestWaitNotification: OK eventTxId 131 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 TestWaitNotification: OK eventTxId 105 TestWaitNotification: OK eventTxId 106 TestWaitNotification: OK eventTxId 107 2025-06-30T09:21:43.504872Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:43.504929Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 71us result status StatusSuccess 2025-06-30T09:21:43.505033Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 2 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/other_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |81.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExists [GOOD] >> TExternalDataSourceTest::ParallelCreateExternalDataSource [GOOD] >> TBackupCollectionWithRebootsTests::DropWithReboots ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::SchemeErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:43.569286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:43.569315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.569322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:43.569328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:43.569336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:43.569341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:43.569352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.569367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:43.569491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.569561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:43.587154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:43.587179Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:43.587272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.589867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:43.590079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:43.590119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:43.591478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:43.591545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:43.591667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.591756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:43.592299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.592369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:43.592702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:43.592715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.592744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:43.592754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:43.592762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:43.592798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.594436Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-30T09:21:43.618889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.618980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.619063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:43.619073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:43.619139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:43.619158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:43.620151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.620203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:43.620276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.620295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:43.620303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:43.620310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:43.620811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.620826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:43.620833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:43.621243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.621255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.621262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.621271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.622079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:43.622548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:43.622596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:43.622847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.622878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.622888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.622971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:43.622981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.623019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.623034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21: ... ist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" } 2025-06-30T09:21:43.636981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:233: [72057594046678944] TCreateExternalDataSource Propose: opId# 126:0, path# /MyRoot/DirA/MyExternalDataSource 2025-06-30T09:21:43.637048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 126:1, propose status:StatusSchemeError, reason: Authorization method isn't specified, at schemeshard: 72057594046678944 2025-06-30T09:21:43.637621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 126, response: Status: StatusSchemeError Reason: "Authorization method isn\'t specified" TxId: 126 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.637670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 126, database: /MyRoot, subject: , status: StatusSchemeError, reason: Authorization method isn't specified, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/MyExternalDataSource TestModificationResult got TxId: 126, wait until txId: 126 TestModificationResults wait txId: 127 2025-06-30T09:21:43.638425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } } TxId: 127 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.638482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:337: [72057594046678944] CreateNewExternalDataSource, opId 127:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } 2025-06-30T09:21:43.638498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:233: [72057594046678944] TCreateExternalDataSource Propose: opId# 127:0, path# /MyRoot/DirA/MyExternalDataSource 2025-06-30T09:21:43.638546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 127:1, propose status:StatusSchemeError, reason: Maximum length of location must be less or equal equal to 1000 but got 1001, at schemeshard: 72057594046678944 2025-06-30T09:21:43.639069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 127, response: Status: StatusSchemeError Reason: "Maximum length of location must be less or equal equal to 1000 but got 1001" TxId: 127 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.639118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 127, database: /MyRoot, subject: , status: StatusSchemeError, reason: Maximum length of location must be less or equal equal to 1000 but got 1001, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/MyExternalDataSource TestModificationResult got TxId: 127, wait until txId: 127 TestModificationResults wait txId: 128 2025-06-30T09:21:43.639823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Installation: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.639871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:337: [72057594046678944] CreateNewExternalDataSource, opId 128:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Installation: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } 2025-06-30T09:21:43.639883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:233: [72057594046678944] TCreateExternalDataSource Propose: opId# 128:0, path# /MyRoot/DirA/MyExternalDataSource 2025-06-30T09:21:43.639912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 128:1, propose status:StatusSchemeError, reason: Maximum length of installation must be less or equal equal to 1000 but got 1001, at schemeshard: 72057594046678944 2025-06-30T09:21:43.640464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 128, response: Status: StatusSchemeError Reason: "Maximum length of installation must be less or equal equal to 1000 but got 1001" TxId: 128 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.640502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusSchemeError, reason: Maximum length of installation must be less or equal equal to 1000 but got 1001, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/MyExternalDataSource TestModificationResult got TxId: 128, wait until txId: 128 TestModificationResults wait txId: 129 2025-06-30T09:21:43.641148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } } } TxId: 129 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.641182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:337: [72057594046678944] CreateNewExternalDataSource, opId 129:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } } 2025-06-30T09:21:43.641194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:233: [72057594046678944] TCreateExternalDataSource Propose: opId# 129:0, path# /MyRoot/DirA/ 2025-06-30T09:21:43.641212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 129:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/DirA/', error: path part shouldn't be empty, source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:102, at schemeshard: 72057594046678944 2025-06-30T09:21:43.641689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 129, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/DirA/\', error: path part shouldn\'t be empty, source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:102" TxId: 129 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.641724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 129, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/DirA/', error: path part shouldn't be empty, source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:102, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/ TestModificationResult got TxId: 129, wait until txId: 129 >> TExternalDataSourceTest::DropExternalDataSource [GOOD] >> TBackupCollectionWithRebootsTests::SimpleDropWithReboots |81.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |81.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |81.6%| [TA] {RESULT} $(B)/ydb/core/sys_view/query_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::RemovingReferencesFromDataSources [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:43.557886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:43.557916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.557923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:43.557930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:43.557937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:43.557943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:43.557956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.557972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:43.558101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.558187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:43.597133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:43.597157Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:43.597286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.607748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:43.608058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:43.608110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:43.609841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:43.609933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:43.610064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.610145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:43.610708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.610792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:43.611137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:43.611154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.611183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:43.611193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:43.611200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:43.611242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.612873Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-30T09:21:43.674641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.678816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.678957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:43.678971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:43.679043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:43.679068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:43.687171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.687245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:43.687348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.687373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:43.687381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:43.687389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:43.688260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.688278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:43.688286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:43.688753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.688767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.688774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.688784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.689691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:43.690209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:43.690262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:43.690499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.690533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.690543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.690624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:43.690633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.690673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.690686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21: ... 7594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-06-30T09:21:43.725871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.725902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.725913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-06-30T09:21:43.725943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:21:43.725963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 104:0 128 -> 240 2025-06-30T09:21:43.726005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.726016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:21:43.726213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:21:43.726269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 FAKE_COORDINATOR: Erasing txId 104 2025-06-30T09:21:43.726631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:43.726642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:43.726680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:21:43.726707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.726714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-30T09:21:43.726721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-30T09:21:43.726819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.726831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-30T09:21:43.726848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:21:43.726855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:21:43.726861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:21:43.726866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:21:43.726872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-30T09:21:43.726878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:21:43.726885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-30T09:21:43.726890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:0 2025-06-30T09:21:43.726906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:21:43.726914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-30T09:21:43.726919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-30T09:21:43.726924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-30T09:21:43.727021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:21:43.727041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:21:43.727047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:21:43.727054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-30T09:21:43.727059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:21:43.727107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:21:43.727115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:21:43.727127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:21:43.727182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:21:43.727194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:21:43.727200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:21:43.727205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-30T09:21:43.727211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.727221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-30T09:21:43.728158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:21:43.728187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:21:43.728203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-30T09:21:43.728273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-30T09:21:43.728283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-30T09:21:43.728383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-30T09:21:43.728405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.728412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:393:2382] TestWaitNotification: OK eventTxId 104 2025-06-30T09:21:43.728515Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:43.728551Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 52us result status StatusPathDoesNotExist 2025-06-30T09:21:43.728591Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |81.6%| [TA] {RESULT} $(B)/ydb/core/grpc_services/ut/test-results/unittest/{meta.json ... results_accumulator.log} |81.6%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} |81.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::CreateExternalDataSourceShouldFailIfSuchEntityAlreadyExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:43.430183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:43.430208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.430212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:43.430215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:43.430220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:43.430223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:43.430231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.430243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:43.430331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.430386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:43.445954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:43.445987Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:43.446105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.449326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:43.449588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:43.449630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:43.451411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:43.451482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:43.451602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.451679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:43.452208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.452262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:43.452549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:43.452563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.452585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:43.452594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:43.452600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:43.452634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.454363Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-30T09:21:43.475578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.475658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.475714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:43.475720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:43.475763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:43.475775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:43.482964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.483035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:43.483120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.483143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:43.483151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:43.483158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:43.494367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.494406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:43.494417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:43.499159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.499192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.499202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.499213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.500074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:43.507129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:43.507215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:43.507474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.507529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.507540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.507643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:43.507657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.507697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.507712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21: ... 678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-30T09:21:43.816075Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:21:43.816175Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:21:43.816185Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:21:43.816190Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-30T09:21:43.816195Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-30T09:21:43.816199Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:21:43.816208Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-30T09:21:43.817008Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:21:43.817059Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-30T09:21:43.817114Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-30T09:21:43.817123Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-30T09:21:43.817202Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-30T09:21:43.817223Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.817228Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:310:2299] TestWaitNotification: OK eventTxId 101 2025-06-30T09:21:43.817312Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:43.817356Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 57us result status StatusSuccess 2025-06-30T09:21:43.817476Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-06-30T09:21:43.818478Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_new_bucket" Auth { None { } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.818540Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:337: [72057594046678944] CreateNewExternalDataSource, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_new_bucket" Auth { None { } } } 2025-06-30T09:21:43.818553Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:233: [72057594046678944] TCreateExternalDataSource Propose: opId# 102:0, path# /MyRoot/MyExternalDataSource 2025-06-30T09:21:43.818586Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 102:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/MyExternalDataSource', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeExternalDataSource, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:102, at schemeshard: 72057594046678944 2025-06-30T09:21:43.819149Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 102, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeExternalDataSource, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:102" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2 PathCreateTxId: 101, at schemeshard: 72057594046678944 2025-06-30T09:21:43.819204Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/MyExternalDataSource', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeExternalDataSource, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:102, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/MyExternalDataSource TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-30T09:21:43.819263Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:21:43.819271Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-30T09:21:43.819340Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:21:43.819359Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.819364Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:318:2307] TestWaitNotification: OK eventTxId 102 2025-06-30T09:21:43.819434Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:43.819465Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 37us result status StatusSuccess 2025-06-30T09:21:43.819553Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:21:43.851543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:43.851578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.851585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:43.851591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:43.851599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:43.851604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:43.851615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.851634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:43.851766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.851856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:43.891044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:21:43.891072Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:43.899587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:43.899677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:43.899716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:43.901725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:43.901860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:43.901993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.902102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:43.907519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.907605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:43.907993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:43.908014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.908044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:43.908060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:43.908067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:43.908091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.909968Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:43.943555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.943654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.943738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:43.943748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:43.943837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:43.943856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:43.944834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.944894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:43.944982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.945006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:43.945014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:43.945022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:43.945678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.945697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:43.945705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:43.946210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.946226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.946235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.946244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.947042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:43.947593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:43.947649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:43.947881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.947915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.947926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.948006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:43.948017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.948059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.948074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:43.948618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:43.948632Z node 1 :FLAT_TX_SCHEMESHARD ... ion_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:21:43.958815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:21:43.958821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:21:43.958825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:21:43.958830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-30T09:21:43.958837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:21:43.958842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-30T09:21:43.958848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 101:0 2025-06-30T09:21:43.958863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:21:43.958870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-30T09:21:43.958876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-30T09:21:43.958884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-30T09:21:43.958993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:21:43.959008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:21:43.959014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-30T09:21:43.959019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-30T09:21:43.959024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:21:43.959168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:21:43.959183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:21:43.959188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-30T09:21:43.959192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-30T09:21:43.959197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:21:43.959209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-30T09:21:43.960050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:21:43.960178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-30T09:21:43.960244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-30T09:21:43.960253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-30T09:21:43.960331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-30T09:21:43.960354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.960360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:304:2293] TestWaitNotification: OK eventTxId 101 2025-06-30T09:21:43.960460Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/UniqueName" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:43.960500Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/UniqueName" took 51us result status StatusSuccess 2025-06-30T09:21:43.960615Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/UniqueName" PathDescription { Self { Name: "UniqueName" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "UniqueName" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-06-30T09:21:43.961623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "UniqueName" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.961701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:337: [72057594046678944] CreateNewExternalDataSource, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "UniqueName" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } 2025-06-30T09:21:43.961719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_external_data_source.cpp:204: [72057594046678944] TAlterExternalDataSource Propose: opId# 102:0, path# /MyRoot/UniqueName 2025-06-30T09:21:43.961753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 102:1, propose status:StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource, source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp:95, at schemeshard: 72057594046678944 2025-06-30T09:21:43.962350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 102, response: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/UniqueName\', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource, source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp:95" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2 PathCreateTxId: 101, at schemeshard: 72057594046678944 2025-06-30T09:21:43.962411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource, source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp:95, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/UniqueName TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-30T09:21:43.962483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:21:43.962492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-30T09:21:43.962558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:21:43.962582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:21:43.962588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:312:2301] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:43.975335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:43.975367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.975374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:43.975380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:43.975387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:43.975393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:43.975403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.975421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:43.975569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.975648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:43.995371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:43.995406Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:43.995530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:44.001386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:44.006086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:44.006163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:44.009159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:44.009248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:44.009393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.009479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:44.010249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:44.010317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:44.010677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:44.010694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:44.010721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:44.010731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:44.010760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:44.010800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.013104Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-30T09:21:44.037989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:44.038095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.038179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:44.038190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:44.038255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:44.038275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:44.043284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.043361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:44.043461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.043484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:44.043492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:44.043499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:44.044329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.044353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:44.044362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:44.044910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.044926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.044933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:44.044942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:44.045956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:44.046504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:44.046556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:44.046808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.046845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:44.046853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:44.046934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:44.046947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:44.046993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:44.047008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21: ... CHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:44.063114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_external_data_source.cpp:35: [72057594046678944] TAlterExternalDataSource TPropose, operationId: 102:0HandleReply TEvOperationPlan: step# 5000003 2025-06-30T09:21:44.063145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 128 -> 240 2025-06-30T09:21:44.063177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:44.063189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:21:44.063394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:21:44.063704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:21:44.064034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:44.064045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:44.064090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:21:44.064109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:21:44.064126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:44.064133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-30T09:21:44.064140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-30T09:21:44.064145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-30T09:21:44.064249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.064261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:21:44.064278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:21:44.064284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:21:44.064290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:21:44.064294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:21:44.064300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-30T09:21:44.064306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:21:44.064313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:21:44.064320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:21:44.064351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:21:44.064360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-30T09:21:44.064366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-30T09:21:44.064370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-30T09:21:44.064505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:21:44.064519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:21:44.064525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:21:44.064531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-30T09:21:44.064536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:21:44.064692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:21:44.064707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:21:44.064712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:21:44.064717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-30T09:21:44.064722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:21:44.064732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-30T09:21:44.065437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:21:44.065710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-30T09:21:44.065802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:21:44.065815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-30T09:21:44.065914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:21:44.065937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:21:44.065943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:336:2325] TestWaitNotification: OK eventTxId 102 2025-06-30T09:21:44.066039Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:44.066113Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 86us result status StatusSuccess 2025-06-30T09:21:44.066228Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 2 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_new_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ParallelCreateExternalDataSource [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:43.619243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:43.619276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.619283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:43.619289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:43.619296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:43.619300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:43.619313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.619330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:43.619468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.619562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:43.644873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:43.644902Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:43.645064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.654822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:43.655328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:43.655382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:43.657377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:43.657454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:43.657602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.657697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:43.658333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.658404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:43.658841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:43.658859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.658901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:43.658914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:43.658922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:43.658967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.660808Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-30T09:21:43.684661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.684778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.684880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:43.684894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:43.684970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:43.684991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:43.693531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.693606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:43.693684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.693710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:43.693719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:43.693726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:43.694414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.694433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:43.694442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:43.694957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.694975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.694983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.694992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.695909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:43.696502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:43.696557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:43.696795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.696834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.696844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.696933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:43.696944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.696980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.696995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21: ... PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource1" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.932424Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/MyExternalDataSource2" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:43.932456Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/MyExternalDataSource2" took 32us result status StatusSuccess 2025-06-30T09:21:43.932561Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/MyExternalDataSource2" PathDescription { Self { Name: "MyExternalDataSource2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 126 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource2" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.932698Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:43.932724Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 28us result status StatusSuccess 2025-06-30T09:21:43.932813Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 124 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: true } Children { Name: "MyExternalDataSource1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 125 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "MyExternalDataSource2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 126 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.932887Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/MyExternalDataSource1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:43.932909Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/MyExternalDataSource1" took 24us result status StatusSuccess 2025-06-30T09:21:43.932962Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/MyExternalDataSource1" PathDescription { Self { Name: "MyExternalDataSource1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 125 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource1" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.933028Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/MyExternalDataSource2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:43.933048Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/MyExternalDataSource2" took 22us result status StatusSuccess 2025-06-30T09:21:43.933098Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/MyExternalDataSource2" PathDescription { Self { Name: "MyExternalDataSource2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 126 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource2" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::PreventDeletionOfDependentDataSources [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:43.410338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:43.410363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.410368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:43.410373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:43.410378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:43.410381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:43.410389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.410404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:43.410503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.410578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:43.425469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:43.425493Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:43.425608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.428618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:43.428836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:43.428876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:43.430334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:43.430404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:43.430535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.430618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:43.431181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.431240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:43.431505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:43.431520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.431549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:43.431556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:43.431561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:43.431588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.432845Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-30T09:21:43.458200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.458272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.458335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:43.458343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:43.458400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:43.458414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:43.459092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.459135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:43.459190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.459206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:43.459212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:43.459218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:43.459654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.459667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:43.459673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:43.460034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.460044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.460051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.460059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.460769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:43.461266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:43.461308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:43.461516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.461548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.461556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.461635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:43.461644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.461674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.461688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21: ... : DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:21:44.013068Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:21:44.013081Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:21:44.013086Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-30T09:21:44.013090Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:21:44.013094Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:21:44.013242Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:21:44.013258Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:21:44.013262Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-30T09:21:44.013269Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-30T09:21:44.013273Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:21:44.013285Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-30T09:21:44.013724Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:21:44.013798Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:21:44.014037Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-30T09:21:44.014091Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-30T09:21:44.014100Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-30T09:21:44.014173Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-30T09:21:44.014190Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-30T09:21:44.014195Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:338:2327] TestWaitNotification: OK eventTxId 101 2025-06-30T09:21:44.014269Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:44.014307Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 50us result status StatusSuccess 2025-06-30T09:21:44.014409Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 2025-06-30T09:21:44.015294Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpDropExternalDataSource Drop { Name: "ExternalDataSource" } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:44.015332Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_drop_external_data_source.cpp:116: [72057594046678944] TDropExternalDataSource Propose: opId# 103:0, path# /MyRoot/ExternalDataSource 2025-06-30T09:21:44.015346Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 103:1, propose status:StatusSchemeError, reason: Other entities depend on this data source, please remove them at the beginning: /MyRoot/ExternalTable, at schemeshard: 72057594046678944 2025-06-30T09:21:44.015902Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 103, response: Status: StatusSchemeError Reason: "Other entities depend on this data source, please remove them at the beginning: /MyRoot/ExternalTable" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:44.015949Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusSchemeError, reason: Other entities depend on this data source, please remove them at the beginning: /MyRoot/ExternalTable, operation: DROP EXTERNAL DATA SOURCE, path: /MyRoot/ExternalDataSource TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-30T09:21:44.016010Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-30T09:21:44.016018Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-30T09:21:44.016088Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-30T09:21:44.016114Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:21:44.016119Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:346:2335] TestWaitNotification: OK eventTxId 103 2025-06-30T09:21:44.016183Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:44.016215Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 39us result status StatusSuccess 2025-06-30T09:21:44.016306Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalDataSource" PathDescription { Self { Name: "ExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "ExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { References { Path: "/MyRoot/ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } } } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |81.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |81.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots >> TKeyValueTest::TestInlineCopyRangeWorks [GOOD] >> TKeyValueTest::TestInlineCopyRangeWorksNewApi ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::DropExternalDataSource [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:43.870920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:43.870950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.870958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:43.870964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:43.870973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:43.870978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:43.870990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.871007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:43.871137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.871216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:43.890184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:43.890219Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:43.890354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.893498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:43.893733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:43.893793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:43.895324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:43.895395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:43.895524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.895606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:43.896126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.896188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:43.896517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:43.896530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.896556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:43.896566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:43.896573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:43.896611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.898236Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-30T09:21:43.938200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.938294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.938376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:43.938386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:43.938455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:43.938472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:43.950193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.950257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:43.950323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.950344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:43.950351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:43.950357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:43.951121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.951144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:43.951153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:43.951640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.951654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.951662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.951671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.952510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:43.953061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:43.953111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:43.953360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.953395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.953404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.953486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:43.953496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.953534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.953548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21: ... 046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-30T09:21:44.354457Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.354490Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 8589936750 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:44.354503Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 102:0 HandleReply TEvOperationPlan: step# 5000003 2025-06-30T09:21:44.354546Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:21:44.354570Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 128 -> 240 2025-06-30T09:21:44.354616Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:44.354630Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:21:44.354800Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:21:44.355303Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:21:44.355677Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:44.355690Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:44.355751Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:21:44.355790Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:44.355797Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-30T09:21:44.355805Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-30T09:21:44.355901Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.355913Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:21:44.355932Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:21:44.355939Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:21:44.355945Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:21:44.355949Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:21:44.355956Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-30T09:21:44.355963Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:21:44.355970Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:21:44.355976Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:21:44.355994Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:21:44.356002Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-30T09:21:44.356007Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-30T09:21:44.356012Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-30T09:21:44.356108Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:21:44.356123Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:21:44.356129Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:21:44.356138Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-30T09:21:44.356144Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:21:44.356239Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:21:44.356246Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:21:44.356259Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:21:44.356310Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:21:44.356320Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:21:44.356325Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:21:44.356329Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-30T09:21:44.356334Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:44.356343Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-30T09:21:44.357178Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:21:44.357210Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:21:44.357224Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-30T09:21:44.357287Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:21:44.357296Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-30T09:21:44.357390Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:21:44.357412Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:21:44.357419Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:338:2327] TestWaitNotification: OK eventTxId 102 2025-06-30T09:21:44.357512Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:44.357556Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 59us result status StatusPathDoesNotExist 2025-06-30T09:21:44.357614Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/MyExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TOlapReboots::CreateTable >> TOlapReboots::CreateStandaloneTable >> TOlapReboots::CreateDropStandaloneTable |81.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/client/ut/ydb-core-client-ut |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/ut/ydb-core-client-ut |81.6%| [LD] {RESULT} $(B)/ydb/core/client/ut/ydb-core-client-ut >> TOlapReboots::CreateStore >> TConsoleTests::TestRegisterComputationalUnitsForPending [GOOD] >> TConsoleTests::TestNotifyOperationCompletion >> TCdcStreamWithRebootsTests::CreateStreamWithAwsRegion[TabletReboots] |81.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_external_data_source/test-results/unittest/{meta.json ... results_accumulator.log} >> TOlapReboots::CreateMultipleTables |81.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/test-results/unittest/{meta.json ... results_accumulator.log} >> TConsoleTests::TestDatabaseQuotas [GOOD] >> TConsoleTests::TestDatabaseQuotasBadOverallQuota >> TCdcStreamWithRebootsTests::CreateStreamOnIndexTableExplicitReady[PipeResets] >> TCdcStreamWithRebootsTests::CreateStreamExplicitReady[TabletReboots] >> TCdcStreamWithRebootsTests::CreateStreamOnIndexTableExplicitReady[TabletReboots] >> TCdcStreamWithRebootsTests::CreateStreamWithAwsRegion[PipeResets] >> TCdcStreamWithRebootsTests::DisableStream[PipeResets] |81.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest |81.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |81.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |81.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest >> TBackupCollectionWithRebootsTests::SimpleDropWithReboots2 [GOOD] >> TBackupCollectionWithRebootsTests::CreateDroppedWithReboots [GOOD] |81.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |81.6%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer >> TCdcStreamWithRebootsTests::DropStreamExplicitReady[PipeResets] >> TBackupCollectionWithRebootsTests::ParallelCreateDrop [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> TBackupCollectionWithRebootsTests::SimpleDropWithReboots2 [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:42.944350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:42.944382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:42.944389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:42.944395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:42.944410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:42.944415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:42.944426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:42.944443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:42.944559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:42.944671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:42.962004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:42.962034Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:42.962157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:42.966687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:42.968866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:42.968935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:42.980498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:42.980573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:42.980772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:42.980895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:42.983552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:42.983605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:42.983917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:42.983930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:42.983956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:42.983967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:42.983975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:42.984017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.986576Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:43.020367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.020483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.020575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:43.020587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:43.020648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:43.020668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:43.021921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.021990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:43.022085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.022101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:43.022108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:43.022116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:43.022926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.022944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:43.022953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:43.023652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.023670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.023679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.023689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.024749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:43.025375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:43.025435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:43.025715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.025759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.025804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.025903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:43.025914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.025963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.025980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... eBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:47.998952Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:21:47.998986Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:21:47.998996Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:21:47.999021Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:47.999027Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1004, path id: 3 2025-06-30T09:21:47.999033Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1004, path id: 4 2025-06-30T09:21:47.999037Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1004, path id: 5 2025-06-30T09:21:47.999085Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:21:47.999093Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-30T09:21:47.999108Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:21:47.999113Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:21:47.999118Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:21:47.999121Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:21:47.999126Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: false 2025-06-30T09:21:47.999131Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:21:47.999136Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:0 2025-06-30T09:21:47.999140Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:0 2025-06-30T09:21:47.999153Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:21:47.999159Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1004, publications: 3, subscribers: 0 2025-06-30T09:21:47.999163Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 3], 7 2025-06-30T09:21:47.999167Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 4], 7 2025-06-30T09:21:47.999171Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 5], 18446744073709551615 2025-06-30T09:21:47.999300Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:47.999313Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:47.999318Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:21:47.999323Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-30T09:21:47.999328Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-30T09:21:47.999386Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:21:47.999395Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:21:47.999406Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:21:47.999457Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:47.999467Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:47.999472Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:21:47.999476Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-30T09:21:47.999480Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:21:47.999817Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 7 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:47.999835Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 7 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:47.999840Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:21:47.999845Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 7 2025-06-30T09:21:47.999850Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:21:47.999864Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1004, subscribers: 0 2025-06-30T09:21:48.000481Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:21:48.000538Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:21:48.000667Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:21:48.000742Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1004 2025-06-30T09:21:48.000802Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-30T09:21:48.000810Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-30T09:21:48.000874Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-30T09:21:48.000894Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:21:48.000899Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [16:396:2385] TestWaitNotification: OK eventTxId 1004 2025-06-30T09:21:48.000971Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:48.001008Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1" took 48us result status StatusPathDoesNotExist 2025-06-30T09:21:48.001048Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.backups/collections/MyCollection1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.backups/collections\' (id: [OwnerId: 72057594046678944, LocalPathId: 4]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/.backups/collections/MyCollection1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.backups/collections" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "collections" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> TBackupCollectionWithRebootsTests::CreateDroppedWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:42.886426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:42.886459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:42.886466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:42.886473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:42.886488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:42.886494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:42.886505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:42.886521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:42.886643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:42.886778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:42.905718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:42.905748Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:42.905896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:42.915377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:42.919031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:42.919093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:42.921230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:42.921302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:42.921434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:42.921526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:42.922590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:42.922649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:42.923031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:42.923049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:42.923079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:42.923092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:42.923100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:42.923161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.925071Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:42.952332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:42.952429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.952505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:42.952515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:42.952567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:42.952582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:42.953589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:42.953645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:42.953725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.953737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:42.953744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:42.953751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:42.954309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.954324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:42.954333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:42.954782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.954799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.954806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:42.954815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:42.955680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:42.956200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:42.956251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:42.956501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:42.956535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:42.956557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:42.956638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:42.956648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:42.956688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:42.956704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... chemeshard__operation_drop_backup_collection.cpp:31: [72057594046678944] TDropBackupCollection TPropose, operationId: 1006:0, HandleReply TEvOperationPlan: step# 5000007 2025-06-30T09:21:48.135884Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-30T09:21:48.135909Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1006:0 1 -> 240 2025-06-30T09:21:48.135942Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:21:48.135953Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:21:48.135961Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-06-30T09:21:48.136159Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-30T09:21:48.136252Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 FAKE_COORDINATOR: Erasing txId 1006 2025-06-30T09:21:48.136566Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:48.136574Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:21:48.136614Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:21:48.136630Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 6] 2025-06-30T09:21:48.136658Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:48.136664Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1006, path id: 3 2025-06-30T09:21:48.136671Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1006, path id: 4 2025-06-30T09:21:48.136677Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1006, path id: 6 2025-06-30T09:21:48.136753Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1006:0, at schemeshard: 72057594046678944 2025-06-30T09:21:48.136762Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1006:0 ProgressState 2025-06-30T09:21:48.136778Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1006:0 progress is 1/1 2025-06-30T09:21:48.136784Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:21:48.136791Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1006:0 progress is 1/1 2025-06-30T09:21:48.136795Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:21:48.136801Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1006, ready parts: 1/1, is published: false 2025-06-30T09:21:48.136808Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:21:48.136814Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1006:0 2025-06-30T09:21:48.136820Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1006:0 2025-06-30T09:21:48.136834Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-30T09:21:48.136842Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1006, publications: 3, subscribers: 0 2025-06-30T09:21:48.136847Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 3], 9 2025-06-30T09:21:48.136852Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 4], 11 2025-06-30T09:21:48.136857Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 6], 18446744073709551615 2025-06-30T09:21:48.136952Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:48.136965Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:48.136971Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:21:48.136977Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 18446744073709551615 2025-06-30T09:21:48.136983Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-06-30T09:21:48.137038Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:21:48.137045Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 6], at schemeshard: 72057594046678944 2025-06-30T09:21:48.137056Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:21:48.137101Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 9 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:48.137111Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 9 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:48.137116Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:21:48.137122Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 9 2025-06-30T09:21:48.137127Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:21:48.137248Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 11 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:48.137261Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 11 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:48.137266Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:21:48.137271Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-06-30T09:21:48.137276Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:21:48.137290Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1006, subscribers: 0 2025-06-30T09:21:48.138175Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-30T09:21:48.138211Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:21:48.138250Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-30T09:21:48.138290Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 TestModificationResult got TxId: 1006, wait until txId: 1006 TestWaitNotification wait txId: 1006 2025-06-30T09:21:48.138395Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1006: send EvNotifyTxCompletion 2025-06-30T09:21:48.138404Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1006 2025-06-30T09:21:48.138495Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1006, at schemeshard: 72057594046678944 2025-06-30T09:21:48.138520Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1006: got EvNotifyTxCompletionResult 2025-06-30T09:21:48.138526Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1006: satisfy waiter [16:455:2444] TestWaitNotification: OK eventTxId 1006 >> TBackupCollectionWithRebootsTests::CreateDroppedAndDropWithReboots [GOOD] >> TConsoleTests::TestDatabaseQuotasBadOverallQuota [GOOD] >> TConsoleTests::TestDatabaseQuotasBadStorageQuota >> KqpScheme::CreateDropTableMultipleTime [GOOD] >> KqpScheme::CreateDropColumnTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> TBackupCollectionWithRebootsTests::ParallelCreateDrop [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:42.811069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:42.811102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:42.811110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:42.811116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:42.811133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:42.811139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:42.811151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:42.811168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:42.811291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:42.811431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:42.842187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:42.842233Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:42.842464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:42.853032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:42.856909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:42.856973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:42.861536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:42.861628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:42.861806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:42.861943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:42.862984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:42.863047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:42.863399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:42.863413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:42.863441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:42.863451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:42.863457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:42.863509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.871253Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:42.897379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:42.897490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.897579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:42.897592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:42.897649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:42.897670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:42.898838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:42.898899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:42.899004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.899020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:42.899028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:42.899036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:42.899765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.899781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:42.899789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:42.900286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.900300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:42.900309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:42.900319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:42.901271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:42.901793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:42.901850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:42.902108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:42.902145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:42.902169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:42.902261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:42.902272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:42.902316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:42.902332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... eBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:48.373931Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:21:48.373966Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:21:48.373976Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:21:48.374002Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:48.374008Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1005, path id: 3 2025-06-30T09:21:48.374015Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1005, path id: 4 2025-06-30T09:21:48.374019Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1005, path id: 5 2025-06-30T09:21:48.374072Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-30T09:21:48.374080Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1005:0 ProgressState 2025-06-30T09:21:48.374093Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:21:48.374098Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:21:48.374104Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:21:48.374108Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:21:48.374112Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: false 2025-06-30T09:21:48.374121Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:21:48.374127Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1005:0 2025-06-30T09:21:48.374131Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1005:0 2025-06-30T09:21:48.374146Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:21:48.374154Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1005, publications: 3, subscribers: 0 2025-06-30T09:21:48.374158Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 3], 7 2025-06-30T09:21:48.374162Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 4], 7 2025-06-30T09:21:48.374166Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 5], 18446744073709551615 2025-06-30T09:21:48.374297Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:21:48.374310Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:21:48.374315Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:21:48.374320Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-30T09:21:48.374324Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-30T09:21:48.374379Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:21:48.374418Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:21:48.374430Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:21:48.374478Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:21:48.374487Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:21:48.374491Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:21:48.374495Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-30T09:21:48.374500Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:21:48.374763Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 7 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:21:48.374780Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 7 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:21:48.374785Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:21:48.374790Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 7 2025-06-30T09:21:48.374794Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:21:48.374808Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1005, subscribers: 0 2025-06-30T09:21:48.375306Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:21:48.375344Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:21:48.375384Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:21:48.375582Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 TestModificationResult got TxId: 1005, wait until txId: 1005 TestWaitNotification wait txId: 1005 2025-06-30T09:21:48.375645Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-30T09:21:48.375653Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-30T09:21:48.375727Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-30T09:21:48.375747Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-30T09:21:48.375752Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [16:402:2391] TestWaitNotification: OK eventTxId 1005 2025-06-30T09:21:48.375826Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:48.375864Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1" took 48us result status StatusPathDoesNotExist 2025-06-30T09:21:48.375909Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.backups/collections/MyCollection1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.backups/collections\' (id: [OwnerId: 72057594046678944, LocalPathId: 4]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/.backups/collections/MyCollection1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.backups/collections" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "collections" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |81.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest >> TBackupCollectionWithRebootsTests::CreateWithReboots [GOOD] |81.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |81.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> TBackupCollectionWithRebootsTests::CreateDroppedAndDropWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:43.875038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:43.875068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.875075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:43.875081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:43.875096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:43.875101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:43.875111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:43.875128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:43.875244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.875347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:43.893857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:43.893891Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:43.894029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:43.898064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:43.898321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:43.898366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:43.900230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:43.900304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:43.900443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.900546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:43.901561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.901609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:43.901934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:43.901951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:43.901974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:43.901984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:43.901991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:43.902031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.903843Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:43.929137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:43.929222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.929281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:43.929291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:43.929344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:43.929360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:43.930183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.930237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:43.930299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.930311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:43.930317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:43.930323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:43.930949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.930969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:43.930980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:43.932955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.932973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:43.932980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.932988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:43.933865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:43.943557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:43.943624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:43.943913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:43.943956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:43.943980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.944067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:43.944089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:43.944122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:43.944140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... rd DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:49.003860Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:21:49.003891Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:21:49.003904Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 6] 2025-06-30T09:21:49.003927Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:49.003932Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1006, path id: 3 2025-06-30T09:21:49.003938Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1006, path id: 4 2025-06-30T09:21:49.003942Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1006, path id: 6 2025-06-30T09:21:49.004008Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1006:0, at schemeshard: 72057594046678944 2025-06-30T09:21:49.004015Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1006:0 ProgressState 2025-06-30T09:21:49.004029Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1006:0 progress is 1/1 2025-06-30T09:21:49.004034Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:21:49.004040Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1006:0 progress is 1/1 2025-06-30T09:21:49.004043Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:21:49.004048Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1006, ready parts: 1/1, is published: false 2025-06-30T09:21:49.004054Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:21:49.004060Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1006:0 2025-06-30T09:21:49.004064Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1006:0 2025-06-30T09:21:49.004077Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-30T09:21:49.004083Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1006, publications: 3, subscribers: 0 2025-06-30T09:21:49.004086Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 3], 9 2025-06-30T09:21:49.004090Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 4], 11 2025-06-30T09:21:49.004095Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 6], 18446744073709551615 2025-06-30T09:21:49.004176Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:49.004188Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:49.004193Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:21:49.004198Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 18446744073709551615 2025-06-30T09:21:49.004202Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-06-30T09:21:49.004251Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:21:49.004257Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 6], at schemeshard: 72057594046678944 2025-06-30T09:21:49.004266Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:21:49.004303Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 9 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:49.004311Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 9 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:49.004316Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:21:49.004320Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 9 2025-06-30T09:21:49.004324Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:21:49.004427Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 11 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:49.004438Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 11 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:49.004442Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:21:49.004446Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-06-30T09:21:49.004451Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:21:49.004460Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1006, subscribers: 0 2025-06-30T09:21:49.005200Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-30T09:21:49.005231Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:21:49.005262Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-30T09:21:49.005301Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 TestModificationResult got TxId: 1006, wait until txId: 1006 TestWaitNotification wait txId: 1006 2025-06-30T09:21:49.005368Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1006: send EvNotifyTxCompletion 2025-06-30T09:21:49.005377Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1006 2025-06-30T09:21:49.005461Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1006, at schemeshard: 72057594046678944 2025-06-30T09:21:49.005482Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1006: got EvNotifyTxCompletionResult 2025-06-30T09:21:49.005488Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1006: satisfy waiter [16:455:2444] TestWaitNotification: OK eventTxId 1006 2025-06-30T09:21:49.005584Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:49.005621Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1" took 50us result status StatusPathDoesNotExist 2025-06-30T09:21:49.005663Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.backups/collections/MyCollection1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.backups/collections\' (id: [OwnerId: 72057594046678944, LocalPathId: 4]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/.backups/collections/MyCollection1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.backups/collections" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "collections" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> KqpScheme::CreateDropColumnTable [GOOD] >> KqpScheme::CreateDropColumnTableNegative >> TBackupCollectionWithRebootsTests::SimpleDropWithReboots [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> TBackupCollectionWithRebootsTests::CreateWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:44.045557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:44.045593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:44.045600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:44.045607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:44.045622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:44.045627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:44.045637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:44.045654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:44.045793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:44.045906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:44.062933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:44.062970Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:44.063089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:44.067219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:44.067523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:44.067567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:44.069162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:44.069279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:44.069433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.069531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:44.070390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:44.070438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:44.070776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:44.070791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:44.070815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:44.070828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:44.070835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:44.070890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.072589Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:44.098836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:44.098948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.099049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:44.099062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:44.099117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:44.099137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:44.100260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.100320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:44.100403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.100417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:44.100424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:44.100431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:44.101098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.101113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:44.101120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:44.101546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.101560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.101568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:44.101578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:44.102432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:44.102957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:44.103013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:44.103239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.103267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:44.103285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:44.103365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:44.103376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:44.103415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:44.103430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... 1003 msg type: 269090816 2025-06-30T09:21:49.108814Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1003, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1003 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1003 at step: 5000004 2025-06-30T09:21:49.108889Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:49.108912Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1003 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 68719478895 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:49.108921Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_backup_collection.cpp:44: [72057594046678944] TCreateBackupCollection TPropose, operationId: 1003:0HandleReply TEvOperationPlan: step# 5000004 2025-06-30T09:21:49.108951Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 128 -> 240 2025-06-30T09:21:49.108981Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:21:49.108991Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 FAKE_COORDINATOR: Erasing txId 1003 2025-06-30T09:21:49.109366Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:49.109373Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:21:49.109401Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:21:49.109417Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:21:49.109429Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:49.109434Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1003, path id: 4 2025-06-30T09:21:49.109439Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1003, path id: 5 2025-06-30T09:21:49.109444Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1003, path id: 5 2025-06-30T09:21:49.109488Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:21:49.109495Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:21:49.109511Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:21:49.109516Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:21:49.109522Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:21:49.109526Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:21:49.109531Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: false 2025-06-30T09:21:49.109536Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:21:49.109541Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:21:49.109545Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:21:49.109557Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-30T09:21:49.109562Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 2, subscribers: 1 2025-06-30T09:21:49.109567Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 4], 5 2025-06-30T09:21:49.109570Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 5], 1 2025-06-30T09:21:49.109683Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 5 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:49.109690Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 5 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:49.109694Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:21:49.109697Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 5 2025-06-30T09:21:49.109700Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:21:49.109819Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 1 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:49.109827Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 1 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:49.109831Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:21:49.109833Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 1 2025-06-30T09:21:49.109839Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:21:49.109845Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 1 2025-06-30T09:21:49.109849Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [16:309:2298] 2025-06-30T09:21:49.110307Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:21:49.110514Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:21:49.110534Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:21:49.110539Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [16:310:2299] TestWaitNotification: OK eventTxId 1002 TestWaitNotification: OK eventTxId 1003 2025-06-30T09:21:49.110643Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:49.110682Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1" took 46us result status StatusSuccess 2025-06-30T09:21:49.110808Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1" PathDescription { Self { Name: "MyCollection1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 1 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 1 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 BackupCollectionVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "MyCollection1" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/Table1" } } Cluster { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TBackupCollectionWithRebootsTests::DropWithReboots [GOOD] >> TVPatchTests::PatchPartOk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> TBackupCollectionWithRebootsTests::SimpleDropWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:44.775105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:44.775138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:44.775145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:44.775151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:44.775166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:44.775171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:44.775182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:44.775200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:44.775319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:44.775427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:44.810395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:44.810428Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:44.810558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:44.818957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:44.822856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:44.822915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:44.825401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:44.825470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:44.825609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.825701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:44.831052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:44.831116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:44.831455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:44.831473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:44.831503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:44.831515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:44.831522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:44.831566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.833408Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:44.896165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:44.896263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.896339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:44.896350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:44.896408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:44.896425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:44.898199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.898270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:44.898387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.898406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:44.898417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:44.898432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:44.900046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.900067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:44.900076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:44.901724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.901742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.901751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:44.901780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:44.902782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:44.908053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:44.908116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:44.908404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.908450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:44.908474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:44.908573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:44.908586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:44.908625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:44.908641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... eBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:49.691144Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:21:49.691179Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:21:49.691193Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:21:49.691223Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:49.691230Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1004, path id: 3 2025-06-30T09:21:49.691238Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1004, path id: 4 2025-06-30T09:21:49.691243Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1004, path id: 5 2025-06-30T09:21:49.691295Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:21:49.691305Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-30T09:21:49.691321Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:21:49.691327Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:21:49.691333Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:21:49.691337Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:21:49.691343Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: false 2025-06-30T09:21:49.691349Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:21:49.691355Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:0 2025-06-30T09:21:49.691364Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:0 2025-06-30T09:21:49.691379Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:21:49.691387Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1004, publications: 3, subscribers: 0 2025-06-30T09:21:49.691392Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 3], 7 2025-06-30T09:21:49.691397Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 4], 7 2025-06-30T09:21:49.691401Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 5], 18446744073709551615 2025-06-30T09:21:49.691540Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:49.691556Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:49.691562Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:21:49.691568Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-30T09:21:49.691573Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-30T09:21:49.691640Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:21:49.691648Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:21:49.691660Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:21:49.691721Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:49.691734Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:49.691739Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:21:49.691744Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-30T09:21:49.691749Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:21:49.692139Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 7 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:49.692160Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 7 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:49.692167Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:21:49.692173Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 7 2025-06-30T09:21:49.692178Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:21:49.692195Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1004, subscribers: 0 2025-06-30T09:21:49.692882Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:21:49.692947Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:21:49.693107Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:21:49.693204Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1004 2025-06-30T09:21:49.693277Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-30T09:21:49.693288Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-30T09:21:49.693370Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-30T09:21:49.693393Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:21:49.693400Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [16:396:2385] TestWaitNotification: OK eventTxId 1004 2025-06-30T09:21:49.693490Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:49.693532Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1" took 56us result status StatusPathDoesNotExist 2025-06-30T09:21:49.693585Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.backups/collections/MyCollection1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.backups/collections\' (id: [OwnerId: 72057594046678944, LocalPathId: 4]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/.backups/collections/MyCollection1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.backups/collections" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "collections" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TVPatchTests::PatchPartGetError |81.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |81.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |81.7%| [LD] {RESULT} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut >> TCdcStreamWithRebootsTests::CreateStream[PipeResets] >> TVPatchTests::PatchPartOk [GOOD] >> KqpScheme::CreateDropColumnTableNegative [GOOD] >> KqpScheme::CreateAsyncReplicationWithSecret >> TVPatchTests::PatchPartGetError [GOOD] |81.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup_collection_reboots/unittest >> TBackupCollectionWithRebootsTests::DropWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:21:44.375604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:44.375635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:44.375642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:44.375648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:44.375662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:44.375667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:44.375679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:44.375697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:44.375829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:44.376107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:44.416538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:44.416575Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:44.416722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:44.424878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:44.435319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:44.435387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:44.440709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:44.440832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:44.440982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.441115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:44.442132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:44.442188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:44.442499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:44.442510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:44.442534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:44.442543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:44.442549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:44.442595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.444238Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:44.500181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:44.500283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.500363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:44.500373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:44.500451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:44.500469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:44.501460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.501524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:44.501599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.501612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:44.501619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:44.501626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:44.502889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.502913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:44.502922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:44.503510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.503525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:44.503534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:44.503543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:44.504495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:44.505179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:44.505233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:44.505473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:44.505510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:44.505534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:44.505622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:44.505632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:44.505675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:44.505690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... rd DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:50.169105Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:21:50.169133Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:21:50.169147Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 6] 2025-06-30T09:21:50.169171Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:50.169176Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1006, path id: 3 2025-06-30T09:21:50.169180Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1006, path id: 4 2025-06-30T09:21:50.169184Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:213:2213], at schemeshard: 72057594046678944, txId: 1006, path id: 6 2025-06-30T09:21:50.169226Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1006:0, at schemeshard: 72057594046678944 2025-06-30T09:21:50.169233Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1006:0 ProgressState 2025-06-30T09:21:50.169246Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1006:0 progress is 1/1 2025-06-30T09:21:50.169251Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:21:50.169257Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1006:0 progress is 1/1 2025-06-30T09:21:50.169260Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:21:50.169265Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1006, ready parts: 1/1, is published: false 2025-06-30T09:21:50.169271Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:21:50.169275Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1006:0 2025-06-30T09:21:50.169280Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1006:0 2025-06-30T09:21:50.169291Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-30T09:21:50.169297Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1006, publications: 3, subscribers: 0 2025-06-30T09:21:50.169302Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 3], 9 2025-06-30T09:21:50.169305Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 4], 11 2025-06-30T09:21:50.169309Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 6], 18446744073709551615 2025-06-30T09:21:50.169412Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:50.169424Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:50.169429Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:21:50.169433Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 18446744073709551615 2025-06-30T09:21:50.169437Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-06-30T09:21:50.169482Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:21:50.169487Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 6], at schemeshard: 72057594046678944 2025-06-30T09:21:50.169495Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:21:50.169533Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 9 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:50.169542Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 9 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:50.169546Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:21:50.169549Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 9 2025-06-30T09:21:50.169553Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:21:50.169620Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 11 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:50.169629Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 11 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:21:50.169633Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:21:50.169637Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-06-30T09:21:50.169641Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:21:50.169651Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1006, subscribers: 0 2025-06-30T09:21:50.171416Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-30T09:21:50.171500Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:21:50.171555Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-30T09:21:50.171796Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 TestModificationResult got TxId: 1006, wait until txId: 1006 TestWaitNotification wait txId: 1006 2025-06-30T09:21:50.171881Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1006: send EvNotifyTxCompletion 2025-06-30T09:21:50.171892Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1006 2025-06-30T09:21:50.171984Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1006, at schemeshard: 72057594046678944 2025-06-30T09:21:50.172013Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1006: got EvNotifyTxCompletionResult 2025-06-30T09:21:50.172019Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1006: satisfy waiter [16:456:2445] TestWaitNotification: OK eventTxId 1006 2025-06-30T09:21:50.172117Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:50.172181Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1" took 79us result status StatusPathDoesNotExist 2025-06-30T09:21:50.172229Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.backups/collections/MyCollection1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.backups/collections\' (id: [OwnerId: 72057594046678944, LocalPathId: 4]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/.backups/collections/MyCollection1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.backups/collections" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "collections" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TConsoleTests::TestNotifyOperationCompletion [GOOD] >> TConsoleTests::TestNotifyOperationCompletionExtSubdomain >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCleanIndex [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartOk [GOOD] Test command err: Recv 65537 2025-06-30T09:21:50.421096Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-30T09:21:50.421574Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-30T09:21:50.421604Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-30T09:21:50.421654Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# no 2025-06-30T09:21:50.421665Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:10:0] PullingPart# 1 Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-30T09:21:50.421699Z node 1 :BS_VDISK_PATCH INFO: {BSVSP08@skeleton_vpatch_actor.cpp:383} [0:1:0:0:0] TEvVPatch: received part data; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 DataParts# 4 ReceivedBlobId# [1:2:3:4:6:10:1] Status# OK ResultSize# 1 ParityPart# no 2025-06-30T09:21:50.421726Z node 1 :BS_VDISK_PATCH INFO: {BSVSP14@skeleton_vpatch_actor.cpp:462} [0:1:0:0:0] TEvVPatch: send xor diffs; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorDiffCount# 0 2025-06-30T09:21:50.421738Z node 1 :BS_VDISK_PATCH INFO: {BSVSP15@skeleton_vpatch_actor.cpp:502} [0:1:0:0:0] TEvVPatch: send vPut; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 ReceivedXorDiffs# 0 ExpectedXorDiffs# 0 Send NKikimr::TEvBlobStorage::TEvVPut Recv NKikimr::TEvBlobStorage::TEvVPutResult 2025-06-30T09:21:50.421791Z node 1 :BS_VDISK_PATCH INFO: {BSVSP10@skeleton_vpatch_actor.cpp:627} [0:1:0:0:0] TEvVPatch: received put result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK 2025-06-30T09:21:50.421800Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK ErrorReason# Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-30T09:21:50.421814Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartGetError [GOOD] Test command err: Recv 65537 2025-06-30T09:21:50.483275Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-30T09:21:50.483675Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-30T09:21:50.483694Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-30T09:21:50.483733Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# no 2025-06-30T09:21:50.483743Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:10:0] PullingPart# 1 Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-30T09:21:50.483773Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# ERROR ErrorReason# Recieve not OK status from VGetResult, received status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-30T09:21:50.483784Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> TVPatchTests::PatchPartFastXorDiffBeyoundBlob >> TVPatchTests::PatchPartFastXorDiffBeyoundBlob [GOOD] >> TVPatchTests::FullPatchTestXorDiffFasterVGetResult [GOOD] >> TVPatchTests::PatchPartFastXorDiffDisorder >> TConsoleTests::TestDatabaseQuotasBadStorageQuota [GOOD] >> TVPatchTests::PatchPartFastXorDiffDisorder [GOOD] >> TVPatchTests::FindingPartsWhenPartsAreDontExist >> TCdcStreamWithRebootsTests::DisableStream[PipeResets] [GOOD] >> YdbIndexTable::MultiShardTableOneIndex [GOOD] >> YdbIndexTable::MultiShardTableOneIndexDataColumn ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FullPatchTestXorDiffFasterVGetResult [GOOD] Test command err: Recv 65537 2025-06-30T09:21:51.115181Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:100:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-30T09:21:51.115618Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:100:0] Status# OK ResultSize# 1 2025-06-30T09:21:51.115640Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:100:0] FoundParts# [5] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchXorDiff 2025-06-30T09:21:51.115682Z node 1 :BS_VDISK_PATCH INFO: {BSVSP13@skeleton_vpatch_actor.cpp:674} [0:1:0:0:0] TEvVPatch: received xor diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] FromPart# 4 ToPart# 0 HasBuffer# no ReceivedXorDiffCount# 1/0 Send NKikimr::TEvBlobStorage::TEvVPatchXorDiffResult 2025-06-30T09:21:51.115697Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-30T09:21:51.115721Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] OriginalPartId# 0 PatchedPartId# 0 Status# ERROR ErrorReason# The diff at index 0 went beyound the blob part; DiffStart# 100 DiffEnd# 96 BlobPartSize# 32 Send NKikimr::TEvBlobStorage::TEvVPatchResult Recv NKikimr::TEvVPatchDyingConfirm |81.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCleanIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:234:2060] recipient: [1:228:2144] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:234:2060] recipient: [1:228:2144] Leader for TabletID 72057594046678944 is [1:245:2155] sender: [1:246:2060] recipient: [1:228:2144] 2025-06-30T09:19:59.968909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:19:59.968933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:19:59.968939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:19:59.968944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:19:59.968951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:19:59.968954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:19:59.968962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:19:59.968972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:19:59.969059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:19:59.969124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:19:59.980736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:19:59.980763Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:59.984181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:19:59.984216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:19:59.984239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:19:59.986264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:19:59.986364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:19:59.986479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:19:59.986556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:19:59.987373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:59.987425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:19:59.987694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:19:59.987704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:19:59.987745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:19:59.987754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:19:59.987760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:19:59.987791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:19:59.989243Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:245:2155] sender: [1:360:2060] recipient: [1:17:2064] 2025-06-30T09:20:00.011714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:20:00.011790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:00.011854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:20:00.011864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:20:00.011923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:20:00.011937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:00.012823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:00.012866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:20:00.012938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:00.012958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:20:00.012965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:00.012972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:00.013672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:00.013697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:20:00.013709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:00.014385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:00.014403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:20:00.014411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:00.014421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:00.015249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:00.015859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:00.015908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:20:00.016143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:20:00.016178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 252 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:20:00.016187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:00.016264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:00.016273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:20:00.016311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:20:00.016325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:20:00.016904Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:20:00.016914Z node 1 :FLAT_TX_SCHEMESHARD ... chemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:47.803756Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [7:245:2155], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:47.803761Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:48.160539Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:48.160584Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:48.160611Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [7:245:2155], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:48.160617Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:48.595739Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:48.595776Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:48.595796Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [7:245:2155], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:48.595803Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:48.967012Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:48.967058Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:48.967085Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [7:245:2155], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:48.967091Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:49.347784Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:49.347829Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:49.347852Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [7:245:2155], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:49.347859Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:49.713147Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:49.713177Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:49.713195Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [7:245:2155], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:49.713200Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:50.068673Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:50.068706Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:50.068728Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [7:245:2155], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:50.068735Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:50.451292Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:50.451340Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:21:50.451369Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [7:245:2155], Recipient [7:245:2155]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:50.451376Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:21:50.494613Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [7:1095:2842], Recipient [7:245:2155]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/tmp/TempTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-30T09:21:50.494637Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:21:50.494663Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/tmp/TempTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:50.494707Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/tmp/TempTable" took 34us result status StatusPathDoesNotExist 2025-06-30T09:21:50.494772Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/tmp/TempTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/tmp/TempTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:21:50.494861Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [7:1096:2843], Recipient [7:245:2155]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/tmp" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-30T09:21:50.494866Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:21:50.494876Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/tmp" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:50.494896Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/tmp" took 18us result status StatusPathDoesNotExist 2025-06-30T09:21:50.494910Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/tmp\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/tmp" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:21:50.494951Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [7:1097:2844], Recipient [7:245:2155]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/tmp/TempTable/ValueIndex" Options { ShowPrivateTable: true } 2025-06-30T09:21:50.494954Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:21:50.494960Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/tmp/TempTable/ValueIndex" Options { ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:50.494969Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/tmp/TempTable/ValueIndex" took 10us result status StatusPathDoesNotExist 2025-06-30T09:21:50.494981Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/tmp/TempTable/ValueIndex\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/tmp/TempTable/ValueIndex" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TVPatchTests::FindingPartsWhenPartsAreDontExist [GOOD] >> TVPatchTests::FindingPartsWhenOnlyOnePartExists |81.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |81.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data >> TVPatchTests::FindingPartsWhenOnlyOnePartExists [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartFastXorDiffDisorder [GOOD] Test command err: Recv 65537 2025-06-30T09:21:51.377574Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:100:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-30T09:21:51.377979Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:100:0] Status# OK ResultSize# 1 2025-06-30T09:21:51.378000Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:100:0] FoundParts# [5] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchXorDiff 2025-06-30T09:21:51.378034Z node 1 :BS_VDISK_PATCH INFO: {BSVSP13@skeleton_vpatch_actor.cpp:674} [0:1:0:0:0] TEvVPatch: received xor diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] FromPart# 4 ToPart# 0 HasBuffer# no ReceivedXorDiffCount# 1/0 Send NKikimr::TEvBlobStorage::TEvVPatchXorDiffResult 2025-06-30T09:21:51.378046Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-30T09:21:51.378064Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] OriginalPartId# 0 PatchedPartId# 0 Status# ERROR ErrorReason# [XorDiff from datapart] the start of the diff at index 0 righter than the start of the diff at index 1; PrevDiffStart# 2 DiffStart# 0 Send NKikimr::TEvBlobStorage::TEvVPatchResult Recv NKikimr::TEvVPatchDyingConfirm >> TVPatchTests::PatchPartPutError |81.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/test-results/unittest/{meta.json ... results_accumulator.log} |81.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest >> TCdcStreamWithRebootsTests::DisableStream[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:46.898850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:46.898882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.898889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:46.898904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:46.898911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:46.898916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:46.898928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.898956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:46.899091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:46.899184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:46.950720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:46.950767Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:46.950957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.960141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:46.961403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:46.961469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:46.973393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:46.973483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:46.973659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.973723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:46.975197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.975272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:46.975596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:46.975612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.975622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:46.975632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:46.975639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:46.975672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:46.980061Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:47.039307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:47.039390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:47.039463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:47.039474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:47.039554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:47.039571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:47.040314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:47.040363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:47.040421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:47.040432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:47.040438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:47.040445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:47.040943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:47.040956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:47.040963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:47.041371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:47.041383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:47.041391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:47.041400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:47.042268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:47.042753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:47.042799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:47.043033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:47.043064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... 057594046678944, cookie: 1004 2025-06-30T09:21:51.590728Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:51.590730Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:21:51.590756Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-06-30T09:21:51.590762Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:21:51.590772Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/2, is published: true 2025-06-30T09:21:51.590870Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6376: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1004 Step: 5000005 OrderId: 1004 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 185 } } CommitVersion { Step: 5000005 TxId: 1004 } 2025-06-30T09:21:51.590875Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1004, tablet: 72075186233409546, partId: 1 2025-06-30T09:21:51.590890Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1004:1, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1004 Step: 5000005 OrderId: 1004 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 185 } } CommitVersion { Step: 5000005 TxId: 1004 } 2025-06-30T09:21:51.590903Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1004 Step: 5000005 OrderId: 1004 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 185 } } CommitVersion { Step: 5000005 TxId: 1004 } 2025-06-30T09:21:51.590980Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 340 RawX2: 51539609877 } Origin: 72075186233409546 State: 2 TxId: 1004 Step: 0 Generation: 2 2025-06-30T09:21:51.590987Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1004, tablet: 72075186233409546, partId: 1 2025-06-30T09:21:51.590997Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1004:1, at schemeshard: 72057594046678944, message: Source { RawX1: 340 RawX2: 51539609877 } Origin: 72075186233409546 State: 2 TxId: 1004 Step: 0 Generation: 2 2025-06-30T09:21:51.591003Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 1004:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-30T09:21:51.591012Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 1004:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 340 RawX2: 51539609877 } Origin: 72075186233409546 State: 2 TxId: 1004 Step: 0 Generation: 2 2025-06-30T09:21:51.591023Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1004:1, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:51.591028Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 1004:1, at schemeshard: 72057594046678944 2025-06-30T09:21:51.591034Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 1004:1, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:21:51.591041Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1004:1 129 -> 240 2025-06-30T09:21:51.591532Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:21:51.591633Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:21:51.591694Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1004:1, at schemeshard: 72057594046678944 2025-06-30T09:21:51.591711Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1004:1, at schemeshard: 72057594046678944 2025-06-30T09:21:51.591725Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1004:1, at schemeshard: 72057594046678944 2025-06-30T09:21:51.591729Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1004:1 ProgressState 2025-06-30T09:21:51.591737Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:1 progress is 2/2 2025-06-30T09:21:51.591740Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 2/2 2025-06-30T09:21:51.591743Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:1 progress is 2/2 2025-06-30T09:21:51.591745Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 2/2 2025-06-30T09:21:51.591748Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 2/2, is published: true 2025-06-30T09:21:51.591755Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 2/2 2025-06-30T09:21:51.591759Z node 12 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:0 2025-06-30T09:21:51.591762Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:0 2025-06-30T09:21:51.591771Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:21:51.591775Z node 12 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:1 2025-06-30T09:21:51.591778Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:1 2025-06-30T09:21:51.591794Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 TestWaitNotification wait txId: 1004 2025-06-30T09:21:51.592498Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-30T09:21:51.592509Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-30T09:21:51.592573Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-30T09:21:51.592590Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:21:51.592596Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [12:720:2636] TestWaitNotification: OK eventTxId 1004 2025-06-30T09:21:51.592672Z node 12 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:51.592712Z node 12 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Stream" took 51us result status StatusSuccess 2025-06-30T09:21:51.592807Z node 12 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Stream" PathDescription { Self { Name: "Stream" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeCdcStream CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 CdcStreamVersion: 2 } ChildrenExist: true } Children { Name: "streamImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409548 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } CdcStreamDescription { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 4 } State: ECdcStreamStateDisabled SchemaVersion: 2 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> SubDomainWithReboots::RootWithStoragePools >> TVPatchTests::PatchPartPutError [GOOD] >> TSchemeShardSplitBySample::FlatlistNoResultWhenMedianKeyIsAtBoundary [GOOD] >> TSchemeShardSplitBySample::Histogram [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/console/ut/unittest >> TConsoleTests::TestDatabaseQuotasBadStorageQuota [GOOD] Test command err: 2025-06-30T09:20:41.067809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:41.067842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:41.067849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:41.067855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:41.067874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:41.067879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:41.067889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:41.067904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:41.068027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:41.068109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:41.072494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:41.072521Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:41.075629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:41.075681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:41.075717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046578944 2025-06-30T09:20:41.077230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:41.077380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:41.077508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.077630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: dc-1, pathId: [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:41.078365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:41.078417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:41.078708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:41.078720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:41.078732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:41.078773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046578944, domainId: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:41.078782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:41.078858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.129601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "hdd" } StoragePools { Name: "" Kind: "hdd-3" } StoragePools { Name: "" Kind: "hdd-1" } StoragePools { Name: "" Kind: "hdd-2" } } } TxId: 1 TabletId: 72057594046578944 , at schemeshard: 72057594046578944 2025-06-30T09:20:41.129674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //dc-1, opId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.129743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 0 2025-06-30T09:20:41.129749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046578944, LocalPathId: 1] source path: 2025-06-30T09:20:41.129811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046578944 2025-06-30T09:20:41.129825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:41.130538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046578944 PathId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.130584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //dc-1 2025-06-30T09:20:41.130622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.130629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046578944 2025-06-30T09:20:41.130633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:41.130636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:41.131205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.131224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046578944 2025-06-30T09:20:41.131231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:41.131769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.131791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.131798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.131806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:41.132579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046578944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:41.133036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046578944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:41.133089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:41.133345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.133354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:20:41.133359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.799076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.799169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046578944, at schemeshard: 72057594046578944 2025-06-30T09:20:41.799182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.799278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:41.799289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.799339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 1 2025-06-30T09:20:41.799354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:41.800430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:41.800454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046578944, txId: 1, path id: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:41.80052 ... tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:21:50.062245Z node 171 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:21:50.062261Z node 164 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:21:50.062270Z node 164 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:21:50.062283Z node 164 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:21:50.062290Z node 165 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:21:50.062299Z node 165 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:21:50.062312Z node 165 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:21:50.062321Z node 166 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:21:50.062329Z node 166 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:21:50.062346Z node 166 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:21:50.062353Z node 167 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:21:50.062362Z node 167 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:21:50.062473Z node 164 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:21:50.062486Z node 164 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:21:50.062492Z node 164 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:21:50.062514Z node 164 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[164:1167:2121] 2025-06-30T09:21:50.062581Z node 165 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:21:50.062590Z node 165 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:21:50.062595Z node 165 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:21:50.062605Z node 165 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[165:1169:2121] 2025-06-30T09:21:50.062668Z node 166 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:21:50.062678Z node 166 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:21:50.062682Z node 166 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:21:50.062691Z node 166 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[166:1171:2121] 2025-06-30T09:21:50.062701Z node 167 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:21:50.062718Z node 168 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:21:50.062805Z node 169 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:21:50.062816Z node 169 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:21:50.062821Z node 169 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:21:50.062834Z node 169 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[169:1174:2121] 2025-06-30T09:21:50.062906Z node 170 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:21:50.062916Z node 170 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:21:50.062921Z node 170 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:21:50.062930Z node 170 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[170:1176:2121] 2025-06-30T09:21:50.062994Z node 171 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:21:50.063005Z node 171 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:21:50.063009Z node 171 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:21:50.063018Z node 171 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[171:1178:2121] 2025-06-30T09:21:50.063530Z node 167 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:21:50.063551Z node 167 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:21:50.063561Z node 167 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:21:50.063573Z node 167 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[167:1180:2121] 2025-06-30T09:21:50.064017Z node 168 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:21:50.064034Z node 168 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:21:50.064039Z node 168 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:21:50.064059Z node 168 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[168:1182:2121] 2025-06-30T09:21:50.065651Z node 168 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:21:50.065663Z node 168 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [168:1129:2117] 2025-06-30T09:21:50.065688Z node 169 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:21:50.065693Z node 169 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [169:1130:2117] 2025-06-30T09:21:50.065710Z node 170 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:21:50.065714Z node 170 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [170:1131:2117] 2025-06-30T09:21:50.065723Z node 171 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:21:50.065727Z node 171 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [171:1132:2117] 2025-06-30T09:21:50.065743Z node 164 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:21:50.065748Z node 164 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [164:1125:2117] 2025-06-30T09:21:50.065782Z node 165 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:21:50.065786Z node 165 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [165:1126:2117] 2025-06-30T09:21:50.065797Z node 166 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:21:50.065801Z node 166 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [166:1127:2117] 2025-06-30T09:21:50.065813Z node 167 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:21:50.065817Z node 167 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [167:1128:2117] 2025-06-30T09:21:50.066498Z node 166 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[166:1171:2121]} 2025-06-30T09:21:50.066538Z node 168 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[168:1182:2121]} 2025-06-30T09:21:50.066542Z node 169 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[169:1174:2121]} 2025-06-30T09:21:50.066586Z node 170 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[170:1176:2121]} 2025-06-30T09:21:50.066634Z node 164 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[164:1167:2121]} 2025-06-30T09:21:50.066660Z node 171 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[171:1178:2121]} 2025-06-30T09:21:50.066673Z node 165 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[165:1169:2121]} 2025-06-30T09:21:50.066687Z node 167 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[167:1180:2121]} 2025-06-30T09:21:50.066713Z node 169 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:21:50.066722Z node 169 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:21:50.066724Z node 169 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:21:50.066960Z node 168 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:21:50.066970Z node 168 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:21:50.066974Z node 168 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:21:50.066993Z node 166 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:21:50.066999Z node 166 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:21:50.067002Z node 166 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:21:50.067066Z node 170 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:21:50.067071Z node 170 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:21:50.067073Z node 170 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:21:50.067096Z node 164 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:21:50.067101Z node 164 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:21:50.067103Z node 164 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:21:50.067125Z node 171 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:21:50.067129Z node 171 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:21:50.067131Z node 171 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:21:50.067162Z node 165 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:21:50.067166Z node 165 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:21:50.067169Z node 165 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk 2025-06-30T09:21:50.067173Z node 167 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:21:50.067177Z node 167 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:21:50.067179Z node 167 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FindingPartsWhenOnlyOnePartExists [GOOD] Test command err: Recv 65537 2025-06-30T09:21:51.632922Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-30T09:21:51.633377Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-30T09:21:51.633398Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts 2025-06-30T09:21:51.633414Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm Recv 65537 2025-06-30T09:21:51.845397Z node 2 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-30T09:21:51.845467Z node 2 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-30T09:21:51.845476Z node 2 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-30T09:21:51.845510Z node 2 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# yes 2025-06-30T09:21:51.845520Z node 2 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: received force end; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK ErrorReason# Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-30T09:21:51.845530Z node 2 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartPutError [GOOD] Test command err: Recv 65537 2025-06-30T09:21:52.215809Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-30T09:21:52.216291Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-30T09:21:52.216315Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-30T09:21:52.216355Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# no 2025-06-30T09:21:52.216364Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:10:0] PullingPart# 1 Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-30T09:21:52.216398Z node 1 :BS_VDISK_PATCH INFO: {BSVSP08@skeleton_vpatch_actor.cpp:383} [0:1:0:0:0] TEvVPatch: received part data; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 DataParts# 4 ReceivedBlobId# [1:2:3:4:6:10:1] Status# OK ResultSize# 1 ParityPart# no 2025-06-30T09:21:52.216421Z node 1 :BS_VDISK_PATCH INFO: {BSVSP14@skeleton_vpatch_actor.cpp:462} [0:1:0:0:0] TEvVPatch: send xor diffs; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorDiffCount# 0 2025-06-30T09:21:52.216431Z node 1 :BS_VDISK_PATCH INFO: {BSVSP15@skeleton_vpatch_actor.cpp:502} [0:1:0:0:0] TEvVPatch: send vPut; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 ReceivedXorDiffs# 0 ExpectedXorDiffs# 0 Send NKikimr::TEvBlobStorage::TEvVPut Recv NKikimr::TEvBlobStorage::TEvVPutResult 2025-06-30T09:21:52.216461Z node 1 :BS_VDISK_PATCH INFO: {BSVSP10@skeleton_vpatch_actor.cpp:627} [0:1:0:0:0] TEvVPatch: received put result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# ERROR 2025-06-30T09:21:52.216469Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# ERROR ErrorReason# Recieve not OK status from VPutResult, received status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-30T09:21:52.216480Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm |81.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/test-results/unittest/{meta.json ... results_accumulator.log} |81.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_split_merge/unittest >> TSchemeShardSplitBySample::Histogram [GOOD] >> TFlatTest::Init |81.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/test-results/unittest/{meta.json ... results_accumulator.log} >> TargetDiscoverer::Dirs >> TargetDiscoverer::InvalidCredentials >> YdbIndexTable::MultiShardTableOneIndexIndexOverlapDataColumn [GOOD] >> YdbIndexTable::MultiShardTableOneIndexPkOverlap >> TargetDiscoverer::IndexedTable >> TFlatTest::Init [GOOD] >> TFlatTest::LargeDatashardReply >> TargetDiscoverer::Basic >> TargetDiscoverer::Dirs [GOOD] >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-false >> KqpScheme::CreateAsyncReplicationWithSecret [GOOD] >> KqpScheme::CreateBackupCollectionDisabledByDefault >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-false >> TSchemeShardLoginTest::DisableBuiltinAuthMechanism >> TargetDiscoverer::InvalidCredentials [GOOD] |81.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Dirs [GOOD] Test command err: 2025-06-30T09:21:53.354558Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670196857092304:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:53.393498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00369c/r3tmp/tmp84KQJE/pdisk_1.dat 2025-06-30T09:21:53.434993Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:53.435858Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670196857092086:2080] 1751275313308489 != 1751275313308492 TClient is connected to server localhost:31000 TServer::EnableGrpc on GrpcPort 20850, node 1 2025-06-30T09:21:53.479931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:53.479950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:53.479952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:53.480009Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:53.495697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:53.495733Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:53.496732Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31000 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:53.547530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:53.551096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:21:53.558935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:53.647101Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1751275313599, tx_id: 1 } } } 2025-06-30T09:21:53.647114Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-30T09:21:53.650832Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Dir, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1751275313606, tx_id: 281474976715658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-30T09:21:53.650845Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-30T09:21:53.654756Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275313662, tx_id: 281474976715659 } }] } } 2025-06-30T09:21:53.654771Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root/Dir 2025-06-30T09:21:53.864195Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275313662, tx_id: 281474976715659 } } } 2025-06-30T09:21:53.864216Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Dir/Table 2025-06-30T09:21:53.864223Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Dir/Table, dstPath# /Root/Replicated/Dir/Table, kind# Table >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-false [GOOD] >> TargetDiscoverer::Basic [GOOD] >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-false >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-true >> TargetDiscoverer::IndexedTable [GOOD] >> TCdcStreamWithRebootsTests::CreateStreamOnIndexTableExplicitReady[PipeResets] [GOOD] >> TSchemeShardLoginTest::DisableBuiltinAuthMechanism [GOOD] >> TSchemeShardLoginTest::FailedLoginUserUnderNameOfGroup |81.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |81.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |81.7%| [LD] {RESULT} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-true >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-false ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::InvalidCredentials [GOOD] Test command err: 2025-06-30T09:21:53.628351Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670198267508895:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:53.630107Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003693/r3tmp/tmpfEFrz6/pdisk_1.dat 2025-06-30T09:21:53.694680Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:62530 TServer::EnableGrpc on GrpcPort 3866, node 1 2025-06-30T09:21:53.731087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:53.731119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:53.731694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:53.748064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:53.748083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:53.748085Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:53.748143Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62530 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:53.813043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:53.816752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:53.817843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:53.963347Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: CLIENT_UNAUTHENTICATED, issues: {
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/types/credentials/login/login.cpp:217: Cannot find user: user } } } 2025-06-30T09:21:53.963370Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root, status# CLIENT_UNAUTHENTICATED, issues# {
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/types/credentials/login/login.cpp:217: Cannot find user: user } >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-false >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true >> TSchemeShardLoginTest::FailedLoginUserUnderNameOfGroup [GOOD] >> TSchemeShardLoginTest::ChangeAcceptablePasswordParameters ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::IndexedTable [GOOD] Test command err: 2025-06-30T09:21:53.797984Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670199658926951:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:53.798088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003687/r3tmp/tmpOrg3O1/pdisk_1.dat 2025-06-30T09:21:53.857627Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:11992 TServer::EnableGrpc on GrpcPort 32586, node 1 2025-06-30T09:21:53.894717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:53.894729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:53.894731Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:53.894807Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:53.897491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:53.897532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:53.903494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11992 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:53.946678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:53.949348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:53.973608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:54.071107Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1751275313998, tx_id: 1 } } } 2025-06-30T09:21:54.071125Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-30T09:21:54.075247Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275314082, tx_id: 281474976715658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-30T09:21:54.075266Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-30T09:21:54.282073Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275314082, tx_id: 281474976715658 } } } 2025-06-30T09:21:54.282092Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Table 2025-06-30T09:21:54.282097Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table, dstPath# /Root/Replicated/Table, kind# Table 2025-06-30T09:21:54.282108Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:140: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table/Index, dstPath# /Root/Replicated/Table/Index/indexImplTable, kind# IndexTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Basic [GOOD] Test command err: 2025-06-30T09:21:53.788392Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670198880747836:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:53.788420Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003659/r3tmp/tmpSQlPeH/pdisk_1.dat 2025-06-30T09:21:53.868961Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:53.889621Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:53.889650Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:53.890586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63441 TServer::EnableGrpc on GrpcPort 14459, node 1 2025-06-30T09:21:53.913582Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:53.913594Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:53.913596Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:53.913655Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63441 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:53.964304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:53.969147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:54.043552Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1751275314012, tx_id: 1 } } } 2025-06-30T09:21:54.043572Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-30T09:21:54.047662Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275314068, tx_id: 281474976715658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-30T09:21:54.047675Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-30T09:21:54.236297Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1751275314068, tx_id: 281474976715658 } } } 2025-06-30T09:21:54.236312Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Table 2025-06-30T09:21:54.236318Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table, dstPath# /Root/Replicated/Table, kind# Table >> KqpScheme::CreateBackupCollectionDisabledByDefault [GOOD] >> KqpScheme::CreateBackupCollection ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest >> TCdcStreamWithRebootsTests::CreateStreamOnIndexTableExplicitReady[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:46.497238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:46.497271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.497278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:46.497292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:46.497299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:46.497305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:46.497315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.497339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:46.497459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:46.497547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:46.522967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:46.522999Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:46.523136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.528751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:46.530030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:46.530082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:46.533418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:46.533483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:46.533612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.533668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:46.536056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.536132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:46.536414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:46.536426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.536437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:46.536447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:46.536453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:46.536484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:46.538027Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.561491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:46.561577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.561648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:46.561658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:46.561717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:46.561733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:46.562795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.562866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:46.562955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.562970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:46.562977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:46.562983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:46.563683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.563699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:46.563705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:46.564282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.564298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.564305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:46.564314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:46.565103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:46.565628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:46.565671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:46.565920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.565966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... -30T09:21:54.524409Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:54.524417Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 6] 2025-06-30T09:21:54.524471Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-30T09:21:54.524514Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:54.524519Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [26:211:2211], at schemeshard: 72057594046678944, txId: 1003, path id: 6 2025-06-30T09:21:54.524525Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [26:211:2211], at schemeshard: 72057594046678944, txId: 1003, path id: 7 2025-06-30T09:21:54.524626Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:3, at schemeshard: 72057594046678944 2025-06-30T09:21:54.524635Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:3 ProgressState 2025-06-30T09:21:54.524657Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:3 progress is 4/4 2025-06-30T09:21:54.524662Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 4/4 2025-06-30T09:21:54.524668Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:3 progress is 4/4 2025-06-30T09:21:54.524671Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 4/4 2025-06-30T09:21:54.524675Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 4/4, is published: false 2025-06-30T09:21:54.524682Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 4/4 2025-06-30T09:21:54.524689Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:21:54.524695Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:21:54.524711Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:21:54.524716Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:1 2025-06-30T09:21:54.524719Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:1 2025-06-30T09:21:54.524724Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 4 2025-06-30T09:21:54.524727Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:2 2025-06-30T09:21:54.524730Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:2 2025-06-30T09:21:54.524751Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:21:54.524756Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:3 2025-06-30T09:21:54.524759Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:3 2025-06-30T09:21:54.524767Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 5 2025-06-30T09:21:54.524773Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 2, subscribers: 0 2025-06-30T09:21:54.524778Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 6], 4 2025-06-30T09:21:54.524781Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 7], 2 2025-06-30T09:21:54.525094Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:54.525111Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:54.525120Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:21:54.525126Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 4 2025-06-30T09:21:54.525130Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-30T09:21:54.525535Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:54.525549Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:54.525554Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:21:54.525559Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 2 2025-06-30T09:21:54.525563Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 4 2025-06-30T09:21:54.525580Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:21:54.526619Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:21:54.527381Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:21:54.528488Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:21:54.528500Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:21:54.528608Z node 26 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:21:54.528628Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:21:54.528634Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [26:742:2647] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:21:54.528731Z node 26 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:54.528820Z node 26 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable/Stream" took 110us result status StatusSuccess 2025-06-30T09:21:54.528985Z node 26 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index/indexImplTable/Stream" PathDescription { Self { Name: "Stream" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeCdcStream CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 CdcStreamVersion: 1 } ChildrenExist: true } Children { Name: "streamImpl" PathId: 7 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409549 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } CdcStreamDescription { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 6 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-true >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::FailedLoginWithInvalidUser >> TSchemeShardLoginTest::ChangeAcceptablePasswordParameters [GOOD] >> TSchemeShardLoginTest::ChangeAccountLockoutParameters >> TConsoleTests::TestNotifyOperationCompletionExtSubdomain [GOOD] >> TConsoleTests::TestRemoveAttributes >> TSchemeShardLoginTest::FailedLoginWithInvalidUser [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-true [GOOD] >> RetryPolicy::TWriteSession_SeqNoShift [GOOD] >> RetryPolicy::RetryWithBatching ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::FailedLoginWithInvalidUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:21:54.267432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:54.267463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:54.267469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:54.267475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:54.267487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:54.267491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:54.267501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:54.267527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:54.267676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:54.267769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:54.283213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:21:54.283242Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:54.286950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:54.287019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:54.287060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:54.288518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:54.288590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:54.288733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:54.288802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:54.289435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:54.289480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:54.289781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:54.289794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:54.289822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:54.289829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:54.289835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:54.289854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.291275Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:54.314997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:54.315110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.315199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:54.315208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:54.315265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:54.315281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:54.316239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:54.316293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:54.316359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.316371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:54.316377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:54.316383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:54.316876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.316889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:54.316895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:54.317257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.317268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.317275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:54.317283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:54.318036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:54.318465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:54.318515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:54.318725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:54.318775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:54.318785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:54.318856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:54.318864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:54.318900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:54.318914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:54.319356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:54.319365Z node 1 :FLAT_TX_SCHEMESHARD ... -06-30T09:21:55.598242Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:21:55.598248Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-30T09:21:55.598253Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-30T09:21:55.598332Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-30T09:21:55.598346Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-30T09:21:55.598352Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-30T09:21:55.598358Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-30T09:21:55.598364Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:55.598381Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-30T09:21:55.598994Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-30T09:21:55.599105Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:55.599213Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [5:272:2261] Bootstrap 2025-06-30T09:21:55.601404Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [5:272:2261] Become StateWork (SchemeCache [5:277:2266]) 2025-06-30T09:21:55.601551Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:55.601620Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 86us result status StatusSuccess 2025-06-30T09:21:55.601771Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:55.601849Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [5:272:2261] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:21:55.602337Z node 5 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 2025-06-30T09:21:55.602526Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-30T09:21:55.602535Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:101: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-30T09:21:55.642222Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:96: TTxLogin Complete, with error: Cannot find user: user1, at schemeshard: 72057594046678944 2025-06-30T09:21:55.642276Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:55.642286Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:55.642361Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:55.642368Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-30T09:21:55.642549Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 0 2025-06-30T09:21:55.642650Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:55.642682Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 41us result status StatusSuccess 2025-06-30T09:21:55.642826Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy05cEqQVn7jgp39i/TQM\nxMCSXL2NBSIJ7bYS2KBXdALlsA0FB1BBeXmtOW6rdvfkrNjRxptWjdSl6GBFuI5T\n/SopQy5n2G/jzpFRHXFvnqi8vsz6uXHjE/V4AeWq15Iw3DmVgRQnMTV2NYuVYp1N\n3mvWIYVvbwhHaNrOY/MkzY+yNMvCWiGYKRgN9wuGpKOt2A6pk8nrieZTyTfUKJMx\naqVwCbOnncrYP37KwrB//dMugz0YMirYm6bmXSsutR8sTCX4FOpElOwX959U4r1C\nVAXExXDZ6HCjooDu8mswNr0wvoa8m17DrYLIP3UzhAuXcmD7vkRB+6S0D3UaeZJk\njwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1751361715640 } Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:21:54.118356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:54.118381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:54.118386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:54.118389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:54.118400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:54.118403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:54.118411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:54.118436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:54.118527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:54.118599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:54.131246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:21:54.131276Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:54.134508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:54.134570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:54.134602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:54.136440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:54.136533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:54.136649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:54.136708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:54.137358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:54.137402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:54.137640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:54.137649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:54.137678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:54.137684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:54.137689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:54.137704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.139096Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:54.157222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:54.157313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.157385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:54.157392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:54.157436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:54.157448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:54.158316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:54.158356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:54.158423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.158435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:54.158441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:54.158447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:54.158872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.158882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:54.158890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:54.159410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.159422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.159429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:54.159436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:54.160039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:54.160439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:54.160478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:54.160642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:54.160675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:54.160681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:54.160740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:54.160746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:54.160775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:54.160786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:54.161205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:54.161214Z node 1 :FLAT_TX_SCHEMESHARD ... FO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-30T09:21:55.628797Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:21:55.628805Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-30T09:21:55.629388Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:21:55.629472Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 2025-06-30T09:21:55.629559Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dir1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:55.629581Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dir1" took 30us result status StatusSuccess 2025-06-30T09:21:55.629661Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dir1" PathDescription { Self { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 106 2025-06-30T09:21:55.630403Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveGroup { Group: "group1" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:55.630440Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5309: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 1] name: MyRoot type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:55.630445Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5325: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:55.630451Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5309: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 2] name: Dir1 type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:55.630454Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5325: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:21:55.630517Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:55.630538Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-30T09:21:55.630544Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-30T09:21:55.630549Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-30T09:21:55.630553Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-30T09:21:55.630560Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:55.630570Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-06-30T09:21:55.630575Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-30T09:21:55.630582Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:0 2025-06-30T09:21:55.630587Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-06-30T09:21:55.630592Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-30T09:21:55.630991Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:55.631008Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: REMOVE GROUP, path: /MyRoot 2025-06-30T09:21:55.631036Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:55.631042Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:55.631064Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:55.631069Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-06-30T09:21:55.631173Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-30T09:21:55.631182Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-30T09:21:55.631187Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-06-30T09:21:55.631192Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-30T09:21:55.631201Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:21:55.631217Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-06-30T09:21:55.631490Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 2025-06-30T09:21:55.631566Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:21:55.631588Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 27us result status StatusSuccess 2025-06-30T09:21:55.631684Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: true } Children { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TCdcStreamWithRebootsTests::CreateStreamWithAwsRegion[PipeResets] [GOOD] >> KqpScheme::CreateBackupCollection [GOOD] |81.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest |81.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::Delete |81.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest >> TCdcStreamWithRebootsTests::CreateStreamWithAwsRegion[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:46.829960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:46.829992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.829999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:46.830014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:46.830021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:46.830026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:46.830037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.830061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:46.830192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:46.830275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:46.853176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:46.853205Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:46.853348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.861056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:46.862376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:46.862430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:46.864642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:46.864704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:46.864854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.864910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:46.866049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.866115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:46.866399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:46.866414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.866425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:46.866434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:46.866441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:46.866470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:46.869014Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.893571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:46.893656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.893724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:46.893734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:46.893818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:46.893836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:46.894589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.894635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:46.894697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.894708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:46.894715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:46.894722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:46.895281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.895298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:46.895306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:46.895780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.895796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.895804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:46.895812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:46.896634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:46.897147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:46.897196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:46.897436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.897471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... RefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:21:56.312493Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:21:56.313176Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:2, at schemeshard: 72057594046678944 2025-06-30T09:21:56.313288Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:56.313298Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:21:56.313362Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:21:56.313421Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:56.313428Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [26:211:2211], at schemeshard: 72057594046678944, txId: 1003, path id: 4 2025-06-30T09:21:56.313434Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [26:211:2211], at schemeshard: 72057594046678944, txId: 1003, path id: 5 2025-06-30T09:21:56.313624Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:2, at schemeshard: 72057594046678944 2025-06-30T09:21:56.313637Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:2 ProgressState 2025-06-30T09:21:56.313656Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:2 progress is 3/3 2025-06-30T09:21:56.313662Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-30T09:21:56.313668Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:2 progress is 3/3 2025-06-30T09:21:56.313672Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-30T09:21:56.313681Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 3/3, is published: false 2025-06-30T09:21:56.313690Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-30T09:21:56.313697Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:21:56.313703Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:21:56.313720Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:21:56.313727Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:1 2025-06-30T09:21:56.313731Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:1 2025-06-30T09:21:56.313766Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:21:56.313771Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:2 2025-06-30T09:21:56.313775Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:2 2025-06-30T09:21:56.313784Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 5 2025-06-30T09:21:56.313790Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 2, subscribers: 0 2025-06-30T09:21:56.313795Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 4], 4 2025-06-30T09:21:56.313799Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 5], 2 2025-06-30T09:21:56.314075Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:56.314090Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:56.314096Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:21:56.314102Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 4 2025-06-30T09:21:56.314108Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:21:56.314415Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:56.314433Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:56.314442Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:21:56.314448Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 2 2025-06-30T09:21:56.314452Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:21:56.314467Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:21:56.315931Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:21:56.316033Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:21:56.317650Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:21:56.317665Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:21:56.317785Z node 26 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:21:56.317809Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:21:56.317816Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [26:664:2580] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:21:56.317920Z node 26 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:56.317997Z node 26 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Stream" took 103us result status StatusSuccess 2025-06-30T09:21:56.318153Z node 26 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Stream" PathDescription { Self { Name: "Stream" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeCdcStream CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 CdcStreamVersion: 1 } ChildrenExist: true } Children { Name: "streamImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409548 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } CdcStreamDescription { Name: "Stream" Mode: ECdcStreamModeNewAndOldImages PathId { OwnerId: 72057594046678944 LocalId: 4 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatDynamoDBStreamsJson VirtualTimestamps: false AwsRegion: "ru-central1" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |81.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateBackupCollection [GOOD] Test command err: Trying to start YDB, gRPC: 7492, MsgBus: 28585 2025-06-30T09:21:19.107502Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670052303645058:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:19.110256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047c5/r3tmp/tmpRQmtAI/pdisk_1.dat 2025-06-30T09:21:19.200101Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:19.202815Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670052303645016:2080] 1751275279104177 != 1751275279104180 TServer::EnableGrpc on GrpcPort 7492, node 1 2025-06-30T09:21:19.229002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:19.229015Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:19.229017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:19.229049Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28585 2025-06-30T09:21:19.258140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:19.258174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:19.258871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28585 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:19.363874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:19.375154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:19.387666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:19.428420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:19.465386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.489195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:19.603649Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670052303646628:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.603695Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.665213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.676439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.693330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.705486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.726037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.737500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.750490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:19.772570Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670052303647279:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.772615Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.772745Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670052303647284:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:19.773873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:19.777508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:19.777581Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670052303647286:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:19.860687Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670052303647337:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:20.110650Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:20.212587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:20.217598Z node 1 :TX_PROXY ERROR: schemereq. ... nce;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047c5/r3tmp/tmp8ZmrPr/pdisk_1.dat 2025-06-30T09:21:55.473043Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2413, node 6 2025-06-30T09:21:55.483490Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:55.483503Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:55.483505Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:55.483555Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3087 2025-06-30T09:21:55.518070Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:55.518103Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:55.523260Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3087 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:55.599463Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:55.607811Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:55.624006Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:55.658294Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:55.703668Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:55.720908Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:55.903083Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670206689652132:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:55.903113Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:55.914092Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:55.924148Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:55.936590Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:55.950228Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:55.965964Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:55.979546Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:56.005320Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:56.042970Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670210984620082:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:56.043009Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:56.043109Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670210984620087:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:56.044303Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:56.048653Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:56.048809Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670210984620089:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:56.151986Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670210984620140:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:56.339201Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670210984620410:3569] txid# 281474976715672, issues: { message: "Backup collections must be placed in /Root/.backups/collections" severity: 1 } 2025-06-30T09:21:56.347700Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateBackupCollection, opId: 281474976715673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp:1246) 2025-06-30T09:21:56.361284Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateBackupCollection, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_backup_collection.cpp:186) 2025-06-30T09:21:56.412355Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |81.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data/unittest >> TCdcStreamWithRebootsTests::DropStreamExplicitReady[PipeResets] [GOOD] >> KqpUserConstraint::KqpReadNull+UploadNull |81.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data/unittest |81.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest >> TCdcStreamWithRebootsTests::DropStreamExplicitReady[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:48.687801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:48.687830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:48.687837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:48.687889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:48.687897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:48.687901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:48.687913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:48.687942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:48.688073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:48.688195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:48.706567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:48.706598Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:48.706770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:48.710693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:48.712064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:48.712122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:48.714589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:48.714664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:48.714870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:48.714944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:48.716455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:48.716534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:48.716855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:48.716874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:48.716886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:48.716898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:48.716905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:48.716939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:48.719014Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:48.750703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:48.750944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:48.751033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:48.751045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:48.751118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:48.751140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:48.752260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:48.752320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:48.752403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:48.752417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:48.752424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:48.752430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:48.753057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:48.753073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:48.753080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:48.753620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:48.753637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:48.753645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:48.753653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:48.754572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:48.755128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:48.755184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:48.755459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:48.755492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... chemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:57.515877Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:57.515882Z node 22 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:21:57.515887Z node 22 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-30T09:21:57.515892Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:21:57.515994Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:57.516000Z node 22 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:21:57.516005Z node 22 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-06-30T09:21:57.516010Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:21:57.516153Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:57.516162Z node 22 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:21:57.516168Z node 22 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-30T09:21:57.516173Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:21:57.516186Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 2/3, is published: true 2025-06-30T09:21:57.516265Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:21:57.516271Z node 22 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:21:57.516276Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 2/3, is published: true 2025-06-30T09:21:57.516566Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:21:57.517547Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-30T09:21:57.517610Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:21:57.517650Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:21:57.517680Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:21:57.517706Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:21:57.517826Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:21:57.517860Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:21:57.517868Z node 22 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-30T09:21:57.517883Z node 22 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 3/3 2025-06-30T09:21:57.517887Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 3/3 2025-06-30T09:21:57.517893Z node 22 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 3/3 2025-06-30T09:21:57.517896Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 3/3 2025-06-30T09:21:57.517902Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 3/3, is published: true 2025-06-30T09:21:57.517907Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 3/3 2025-06-30T09:21:57.517914Z node 22 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:0 2025-06-30T09:21:57.517919Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:0 2025-06-30T09:21:57.517956Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:21:57.517963Z node 22 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:1 2025-06-30T09:21:57.517967Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:1 2025-06-30T09:21:57.517973Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:21:57.517977Z node 22 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:2 2025-06-30T09:21:57.517981Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:2 2025-06-30T09:21:57.517993Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-30T09:21:57.518072Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:21:57.518082Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:21:57.518097Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:21:57.518103Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:21:57.518116Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:21:57.518124Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:21:57.518131Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:21:57.518671Z node 22 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1004 2025-06-30T09:21:57.518787Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-30T09:21:57.518796Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-30T09:21:57.518871Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-30T09:21:57.518890Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:21:57.518896Z node 22 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [22:755:2670] TestWaitNotification: OK eventTxId 1004 2025-06-30T09:21:57.518975Z node 22 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:57.519016Z node 22 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Stream" took 54us result status StatusPathDoesNotExist 2025-06-30T09:21:57.519061Z node 22 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/Stream\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 3]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Table/Stream" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |81.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |81.8%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |81.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |81.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data/unittest |81.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data/unittest >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsFromAdLdapServer >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithoutLoginPlaceholders [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnames [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromIpV4List [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromIpV6List [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesLdapsScheme [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesUnknownScheme [GOOD] |81.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data/unittest >> KqpUserConstraint::KqpReadNull-UploadNull >> TFlatTest::LargeDatashardReply [GOOD] |81.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesUnknownScheme [GOOD] >> KqpConstraints::AddNonColumnDoesnotReturnInternalError [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsFromAdLdapServer [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGood >> TCdcStreamWithRebootsTests::CreateStream[PipeResets] [GOOD] >> KqpUserConstraint::KqpReadNull+UploadNull [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDontExistGroupAttribute >> LdapAuthProviderTest_LdapsScheme::LdapRefreshRemoveUserBad >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGood >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data/unittest >> KqpUserConstraint::KqpReadNull+UploadNull [GOOD] Test command err: 2025-06-30T09:21:58.534923Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:58.535049Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:21:58.535070Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003f4d/r3tmp/tmp5T6Z2n/pdisk_1.dat 2025-06-30T09:21:58.687122Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:21:58.688180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:58.709975Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:58.711452Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275317963907 != 1751275317963911 2025-06-30T09:21:58.759590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:58.759642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:58.771445Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:58.859701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:59.288591Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:819:2666], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:59.288622Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:830:2671], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:59.288636Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:59.289896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:59.319225Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:59.442605Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:833:2674], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:21:59.510725Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:903:2713] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:59.723322Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz029bzrds1gjjs01r3f7v5h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTZlNzRjOTMtMWU0MDcyNTEtMTQwNGM0YmUtNjU1NmZlMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:59.724888Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [1:934:2734], TxId: 281474976715660, task: 1. Ctx: { TraceId : 01jz029bzrds1gjjs01r3f7v5h. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=OTZlNzRjOTMtMWU0MDcyNTEtMTQwNGM0YmUtNjU1NmZlMzQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Source[0] fatal error: {
: Fatal: Read from column index 1: got NULL from NOT NULL column, code: 2012 } 2025-06-30T09:21:59.725603Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:934:2734], TxId: 281474976715660, task: 1. Ctx: { TraceId : 01jz029bzrds1gjjs01r3f7v5h. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=OTZlNzRjOTMtMWU0MDcyNTEtMTQwNGM0YmUtNjU1NmZlMzQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. InternalError: INTERNAL_ERROR KIKIMR_CONSTRAINT_VIOLATION: {
: Fatal: Read from column index 1: got NULL from NOT NULL column, code: 2012 }. 2025-06-30T09:21:59.726653Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:935:2735], TxId: 281474976715660, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=OTZlNzRjOTMtMWU0MDcyNTEtMTQwNGM0YmUtNjU1NmZlMzQ=. CustomerSuppliedId : . TraceId : 01jz029bzrds1gjjs01r3f7v5h. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: Terminate execution }. 2025-06-30T09:21:59.732504Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=OTZlNzRjOTMtMWU0MDcyNTEtMTQwNGM0YmUtNjU1NmZlMzQ=, ActorId: [1:817:2664], ActorState: ExecuteState, TraceId: 01jz029bzrds1gjjs01r3f7v5h, Create QueryResponse for error on request, msg: 2025-06-30T09:21:59.733063Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz029bzrds1gjjs01r3f7v5h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTZlNzRjOTMtMWU0MDcyNTEtMTQwNGM0YmUtNjU1NmZlMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpConstraints::AddNonColumnDoesnotReturnInternalError [GOOD] Test command err: Trying to start YDB, gRPC: 27802, MsgBus: 25830 2025-06-30T09:21:13.557981Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670024791277629:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:13.558826Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047f4/r3tmp/tmpg3E0UH/pdisk_1.dat 2025-06-30T09:21:13.634511Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 27802, node 1 2025-06-30T09:21:13.636725Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:13.646966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:13.646983Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:13.646986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:13.647037Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:13.658008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:13.658039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:13.659047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25830 TClient is connected to server localhost:25830 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:13.742543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:13.747904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:21:13.761039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:13.832463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:13.872324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:13.903839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:14.059926Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670029086246468:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.059960Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.142749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.157495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.170858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.191968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.205669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.266105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.288294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:14.326440Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670029086247124:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.326470Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.326808Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670029086247129:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:14.330362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:14.333306Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670029086247131:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:21:14.427704Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670029086247182:3400] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:14.561973Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:14.596061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:21:14.607744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runne ... ... 2025-06-30T09:21:25.894215Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:1680:3277], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:25.894269Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:25.902503Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.105625Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.370227Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.604255Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:26.854154Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.079141Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.367946Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:27.587886Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:2351:3772], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.587920Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.587982Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:2356:3777], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:27.588932Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:27.738269Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:2358:3779], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:27.780693Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:2416:3818] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:27.987837Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:28.375322Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710757:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:21:29.087449Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:3026:4272], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: RemovePrefixMembers, At function: PersistableRepr, At function: SqlProject, At tuple, At function: SqlProjectItem, At lambda
:2:37: Error: At function: Member
:2:37: Error: Member not found: Value3. Did you mean Value? 2025-06-30T09:21:29.088353Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=7&id=ZWQzNzcxMzYtMTgyNzhmMDYtYjQzMjgwMWQtYjlhMTQxNDc=, ActorId: [7:2681:4024], ActorState: ExecuteState, TraceId: 01jz028efta5cjr1nsw7k3ghf2, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: GENERIC_ERROR:
: Error: Type annotation, code: 1030
:2:17: Error: At function: RemovePrefixMembers, At function: PersistableRepr, At function: SqlProject, At tuple, At function: SqlProjectItem, At lambda
:2:37: Error: At function: Member
:2:37: Error: Member not found: Value3. Did you mean Value? 2025-06-30T09:21:29.103368Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:3035:4281], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Column is under build operation, write operation is not allowed to column: Value3 for table: /Root/AddNonColumnDoesnotReturnInternalError, code: 2017 2025-06-30T09:21:29.103950Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=7&id=ZWQzNzcxMzYtMTgyNzhmMDYtYjQzMjgwMWQtYjlhMTQxNDc=, ActorId: [7:2681:4024], ActorState: ExecuteState, TraceId: 01jz028egbcjg7eh991xqwgypc, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: BAD_REQUEST:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Column is under build operation, write operation is not allowed to column: Value3 for table: /Root/AddNonColumnDoesnotReturnInternalError, code: 2017 2025-06-30T09:21:29.118985Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:3044:4290], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:2:94: Error: At function: KiUpdateTable!
:2:94: Error: Column 'Value3' is under the build operation '/Root/AddNonColumnDoesnotReturnInternalError'., code: 2017 2025-06-30T09:21:29.119614Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=7&id=ZWQzNzcxMzYtMTgyNzhmMDYtYjQzMjgwMWQtYjlhMTQxNDc=, ActorId: [7:2681:4024], ActorState: ExecuteState, TraceId: 01jz028egvaew3rf02b9egze85, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: BAD_REQUEST:
: Error: Type annotation, code: 1030
:2:94: Error: At function: KiUpdateTable!
:2:94: Error: Column 'Value3' is under the build operation '/Root/AddNonColumnDoesnotReturnInternalError'., code: 2017 2025-06-30T09:21:29.135285Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:3053:4299], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:2:89: Error: At tuple, At tuple, At function: KqlDeleteRows, At function: Map, At function: Filter
:2:29: Error: At lambda
:2:88: Error: At function: ==
:2:82: Error: At function: Member, At function: Member, At function: Member
:2:82: Error: Member not found: Value3. Did you mean Value? 2025-06-30T09:21:29.135834Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=7&id=ZWQzNzcxMzYtMTgyNzhmMDYtYjQzMjgwMWQtYjlhMTQxNDc=, ActorId: [7:2681:4024], ActorState: ExecuteState, TraceId: 01jz028eha1nqrcjqdaeykq90k, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: GENERIC_ERROR:
: Error: Execution, code: 1060
:2:89: Error: At tuple, At tuple, At function: KqlDeleteRows, At function: Map, At function: Filter
:2:29: Error: At lambda
:2:88: Error: At function: ==
:2:82: Error: At function: Member, At function: Member, At function: Member
:2:82: Error: Member not found: Value3. Did you mean Value? 2025-06-30T09:21:29.196535Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) GRpc shutdown warning: left infly: 1, spent: 3.227281 sec GRpc shutdown warning: left infly: 1, spent: 6.538652 sec GRpc shutdown warning: left infly: 1, spent: 9.846805 sec GRpc shutdown warning: left infly: 1, spent: 13.22898 sec GRpc shutdown warning: left infly: 1, spent: 16.638536 sec GRpc shutdown warning: left infly: 1, spent: 19.95072 sec GRpc shutdown warning: left infly: 1, spent: 23.167557 sec GRpc shutdown warning: left infly: 1, spent: 26.4402 sec GRpc shutdown warning: left infly: 1, spent: 29.678815 sec GRpc shutdown warning: failed to shutdown all connections, left infly: 1, spent: 30.010738 sec >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDontExistGroupAttribute >> LdapAuthProviderTest::LdapServerIsUnavailable >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsFromAdLdapServer ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/client/ut/unittest >> TFlatTest::LargeDatashardReply [GOOD] Test command err: 2025-06-30T09:21:52.871444Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670193213026076:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:52.871515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004dd3/r3tmp/tmpdzbzB2/pdisk_1.dat 2025-06-30T09:21:52.953061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-06-30T09:21:52.953079Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:52.953217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage 2025-06-30T09:21:52.953237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:52.953356Z node 1 :HIVE DEBUG: hive_impl.cpp:2279: HIVE#72057594037968897 Merged config: { MinScatterToBalance: 100 MaxNodeUsageToKick: 100 WarmUpBootWaitingPeriod: 10 MinCounterScatterToBalance: 100 ObjectImbalanceToBalance: 100 } 2025-06-30T09:21:52.954581Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670193213025959:2080] 1751275312869130 != 1751275312869133 2025-06-30T09:21:52.974332Z node 1 :HIVE DEBUG: hive_impl.cpp:145: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [1:7521670193213026094:2098] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-06-30T09:21:52.974371Z node 1 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Execute 2025-06-30T09:21:52.974407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:52.974411Z node 1 :HIVE DEBUG: hive_impl.cpp:365: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-30T09:21:52.974414Z node 1 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-30T09:21:52.974417Z node 1 :HIVE DEBUG: hive_impl.cpp:365: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-30T09:21:52.974418Z node 1 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-30T09:21:52.974436Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:52.974475Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-30T09:21:52.974484Z node 1 :HIVE DEBUG: hive_impl.cpp:226: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-30T09:21:52.974485Z node 1 :HIVE DEBUG: hive_impl.cpp:229: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-30T09:21:52.974488Z node 1 :HIVE DEBUG: hive_impl.cpp:306: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-30T09:21:52.974536Z node 1 :HIVE DEBUG: hive_impl.cpp:812: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 1 Location DataCenter: "1" Module: "1" Rack: "1" Unit: "1" 2025-06-30T09:21:52.975389Z node 1 :HIVE DEBUG: tx__register_node.cpp:96: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Complete 2025-06-30T09:21:52.975423Z node 1 :HIVE DEBUG: node_info.cpp:373: HIVE#72057594037968897 Node(1) Ping([1:7521670193213026094:2098]) 2025-06-30T09:21:52.975443Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-30T09:21:52.975670Z node 1 :HIVE DEBUG: hive_impl.cpp:741: HIVE#72057594037968897 THive::Handle::TEvSyncTablets 2025-06-30T09:21:52.975684Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:41: HIVE#72057594037968897 THive::TTxSyncTablets([1:7521670193213026094:2098])::Execute 2025-06-30T09:21:52.975688Z node 1 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-30T09:21:52.975707Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:130: HIVE#72057594037968897 THive::TTxSyncTablets([1:7521670193213026094:2098])::Complete 2025-06-30T09:21:52.975748Z node 1 :HIVE DEBUG: hive_impl.cpp:735: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 1: Status: 0 StartTime: 1751275312870728 ResourceMaximum { Memory: 202797637632 } 2025-06-30T09:21:52.975754Z node 1 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(1)::Execute 2025-06-30T09:21:52.975759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:52.975805Z node 1 :HIVE DEBUG: hive_impl.cpp:2795: HIVE#72057594037968897 AddRegisteredDataCentersNode(1, 1) 2025-06-30T09:21:52.975812Z node 1 :HIVE DEBUG: hive_impl.cpp:365: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-30T09:21:52.975814Z node 1 :HIVE DEBUG: hive_impl.cpp:346: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-30T09:21:52.975844Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-30T09:21:52.975853Z node 1 :HIVE DEBUG: hive_impl.cpp:226: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-30T09:21:52.975855Z node 1 :HIVE DEBUG: hive_impl.cpp:229: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-30T09:21:52.975858Z node 1 :HIVE DEBUG: hive_impl.cpp:306: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-30T09:21:52.979126Z node 1 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(1)::Complete 2025-06-30T09:21:52.979143Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete TClient is connected to server localhost:9546 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:21:52.986888Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521670193213026194:2105] Handle TEvNavigate describe path dc-1 2025-06-30T09:21:52.988653Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521670193213026467:2257] HANDLE EvNavigateScheme dc-1 2025-06-30T09:21:52.988975Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7521670193213026467:2257] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-30T09:21:52.997645Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7521670193213026467:2257] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-30T09:21:52.999873Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7521670193213026467:2257] Handle TEvDescribeSchemeResult Forward to# [1:7521670193213026466:2256] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-30T09:21:53.008308Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7521670193213026194:2105] Handle TEvProposeTransaction 2025-06-30T09:21:53.008325Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:327: actor# [1:7521670193213026194:2105] Cookie# 0 userReqId# "" DELAY REQUEST, wait txids from allocator Type# Scheme 2025-06-30T09:21:53.019326Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:7521670193213026194:2105] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:21:53.019599Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:21:53.019620Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7521670193213026194:2105] TxId# 281474976715657 ProcessProposeTransaction 2025-06-30T09:21:53.019673Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7521670193213026194:2105] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7521670197507993778:2265] 2025-06-30T09:21:53.034847Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7521670197507993778:2265] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "" PeerName: "" 2025-06-30T09:21:53.034875Z no ... _TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715674 ready parts: 1/1 2025-06-30T09:21:53.436907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715674:0 progress is 1/1 2025-06-30T09:21:53.436910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715674 ready parts: 1/1 2025-06-30T09:21:53.436913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715674, ready parts: 1/1, is published: true 2025-06-30T09:21:53.436926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:7521670197507994807:2346] message: TxId: 281474976715674 2025-06-30T09:21:53.436930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715674 ready parts: 1/1 2025-06-30T09:21:53.436934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715674:0 2025-06-30T09:21:53.436936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715674:0 2025-06-30T09:21:53.436960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 2 2025-06-30T09:21:53.437915Z node 1 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037899 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-30T09:21:53.437936Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037899 Initiating switch from PreOffline to Offline state 2025-06-30T09:21:53.438290Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037899 Reporting state Offline to schemeshard 72057594046644480 2025-06-30T09:21:53.438399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5640: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7521670197507994362 RawX2: 4503603922340087 } TabletId: 72075186224037899 State: 4 2025-06-30T09:21:53.438418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037899, state: Offline, at schemeshard: 72057594046644480 2025-06-30T09:21:53.438534Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037899 state Offline 2025-06-30T09:21:53.438542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:12 hive 72057594037968897 at ss 72057594046644480 2025-06-30T09:21:53.438606Z node 1 :HIVE DEBUG: tx__delete_tablet.cpp:74: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() ShardOwnerId: 72057594046644480 ShardLocalIdx: 12 TxId_Deprecated: 12 TabletID: 72075186224037899 2025-06-30T09:21:53.438615Z node 1 :HIVE DEBUG: tx__delete_tablet.cpp:19: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute Tablet 72075186224037899 2025-06-30T09:21:53.438640Z node 1 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037899.Leader.1) VolatileState: Running -> Stopped (Node 1) 2025-06-30T09:21:53.438669Z node 1 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(DataShard.72075186224037899.Leader.1 gen 1) to node 1 2025-06-30T09:21:53.438692Z node 1 :HIVE DEBUG: tx__delete_tablet.cpp:67: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() result Status: OK Origin: 72057594037968897 TxId_Deprecated: 12 ShardOwnerId: 72057594046644480 ShardLocalIdx: 12 2025-06-30T09:21:53.439463Z node 1 :HIVE DEBUG: tx__delete_tablet.cpp:136: HIVE#72057594037968897 THive::TTxDeleteTablet::Complete() SideEffects: {Notifications: 0x10080003 [1:7521670193213026094:2098] NKikimrLocal.TEvStopTablet TabletId: 72075186224037899 FollowerId: 0 Generation: 1,0x10040206 [1:7521670193213026288:2147] NKikimrHive.TEvDeleteTabletReply Status: OK Origin: 72057594037968897 TxId_Deprecated: 12 ShardOwnerId: 72057594046644480 ShardLocalIdx: 12 Actions: NKikimr::TTabletReqBlockBlobStorage} 2025-06-30T09:21:53.439503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 12 ShardOwnerId: 72057594046644480 ShardLocalIdx: 12, at schemeshard: 72057594046644480 2025-06-30T09:21:53.439545Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037899 reason = ReasonStop 2025-06-30T09:21:53.439560Z node 1 :HIVE DEBUG: tx__block_storage_result.cpp:23: HIVE#72057594037968897 THive::TTxBlockStorageResult::Execute(72075186224037899 OK) 2025-06-30T09:21:53.439574Z node 1 :HIVE DEBUG: tx__block_storage_result.cpp:65: HIVE#72057594037968897 THive::TTxBlockStorageResult::Complete(72075186224037899 OK) 2025-06-30T09:21:53.439581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 1 2025-06-30T09:21:53.439584Z node 1 :HIVE DEBUG: hive_impl.cpp:896: HIVE#72057594037968897 THive::Handle::TEvInitiateDeleteStorage TabletId=72075186224037899 2025-06-30T09:21:53.439641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-30T09:21:53.439645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 6], at schemeshard: 72057594046644480 2025-06-30T09:21:53.439656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-30T09:21:53.439672Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037899 2025-06-30T09:21:53.439705Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037899 2025-06-30T09:21:53.439755Z node 1 :HIVE DEBUG: hive_impl.cpp:484: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus, TabletId: 72075186224037899 2025-06-30T09:21:53.439758Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037899 not found 2025-06-30T09:21:53.439764Z node 1 :HIVE DEBUG: tx__delete_tablet_result.cpp:26: HIVE#72057594037968897 THive::TTxDeleteTabletResult::Execute(72075186224037899 OK) 2025-06-30T09:21:53.439818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:12 2025-06-30T09:21:53.439824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:12 tabletId 72075186224037899 2025-06-30T09:21:53.439837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-30T09:21:53.440439Z node 1 :HIVE DEBUG: tx__delete_tablet_result.cpp:72: HIVE#72057594037968897 THive::TTxDeleteTabletResult(72075186224037899)::Complete SideEffects {} test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004dd3/r3tmp/tmpQoRjvB/pdisk_1.dat 2025-06-30T09:21:53.767365Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:53.779617Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:5154 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-30T09:21:53.859914Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:53.859950Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:53.860355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:53.862383Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:53.862544Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:21:53.866528Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:54.756486Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:58.974031Z node 2 :MINIKQL_ENGINE ERROR: datashard__engine_host.cpp:520: Shard %72075186224037888, txid %281474976716360, engine error: Error executing transaction (read-only: 1): Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) 2025-06-30T09:21:58.984818Z node 2 :TX_DATASHARD ERROR: execute_data_tx_unit.cpp:268: Datashard execution error for [0:281474976716360] at 72075186224037888: Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) 2025-06-30T09:21:58.985861Z node 2 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976716360 at tablet 72075186224037888 status: RESULT_UNAVAILABLE errors: REPLY_SIZE_EXCEEDED (Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648)) | 2025-06-30T09:21:58.994905Z node 2 :TX_PROXY ERROR: datareq.cpp:883: Actor# [2:7521670219780448618:5893] txid# 281474976716360 RESPONSE Status# ExecResultUnavailable marker# P13c DataShardErrors: [REPLY_SIZE_EXCEEDED] Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) proxy error code: ExecResultUnavailable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest >> TCdcStreamWithRebootsTests::CreateStream[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:50.663246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:50.663273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:50.663284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:50.663300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:50.663307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:50.663313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:50.663324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:50.663352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:50.663498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:50.663576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:50.681870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:50.681897Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:50.682057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:50.686136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:50.687200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:50.687243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:50.693287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:50.693364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:50.693547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:50.693614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:50.694815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:50.694890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:50.695233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:50.695246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:50.695257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:50.695268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:50.695275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:50.695310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:50.697194Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:50.722256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:50.722334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:50.722415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:50.722424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:50.722489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:50.722505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:50.723314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:50.723373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:50.723446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:50.723459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:50.723467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:50.723474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:50.723990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:50.724004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:50.724011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:50.724406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:50.724419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:50.724427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:50.724436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:50.725237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:50.725709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:50.725779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:50.726043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:50.726074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... ard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:21:59.855271Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:21:59.855771Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:2, at schemeshard: 72057594046678944 2025-06-30T09:21:59.855814Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:59.855820Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:21:59.855867Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:21:59.855898Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:59.855902Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [26:210:2210], at schemeshard: 72057594046678944, txId: 1003, path id: 4 2025-06-30T09:21:59.855906Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [26:210:2210], at schemeshard: 72057594046678944, txId: 1003, path id: 5 2025-06-30T09:21:59.855963Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:2, at schemeshard: 72057594046678944 2025-06-30T09:21:59.855969Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:2 ProgressState 2025-06-30T09:21:59.855981Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:2 progress is 3/3 2025-06-30T09:21:59.855986Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-30T09:21:59.855990Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:2 progress is 3/3 2025-06-30T09:21:59.855992Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-30T09:21:59.855995Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 3/3, is published: false 2025-06-30T09:21:59.856000Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-30T09:21:59.856005Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:21:59.856009Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:21:59.856018Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:21:59.856022Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:1 2025-06-30T09:21:59.856025Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:1 2025-06-30T09:21:59.856037Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:21:59.856040Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:2 2025-06-30T09:21:59.856042Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:2 2025-06-30T09:21:59.856048Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 5 2025-06-30T09:21:59.856054Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 2, subscribers: 0 2025-06-30T09:21:59.856059Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 4], 4 2025-06-30T09:21:59.856062Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 5], 2 2025-06-30T09:21:59.856314Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:59.856327Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:59.856334Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:21:59.856338Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 4 2025-06-30T09:21:59.856342Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:21:59.856536Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:59.856546Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:21:59.856549Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:21:59.856552Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 2 2025-06-30T09:21:59.856555Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:21:59.856563Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:21:59.857517Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:21:59.857552Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:21:59.858329Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:21:59.858339Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:21:59.858408Z node 26 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:21:59.858423Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:21:59.858427Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [26:665:2581] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:21:59.858491Z node 26 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:59.858545Z node 26 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Stream" took 67us result status StatusSuccess 2025-06-30T09:21:59.858637Z node 26 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Stream" PathDescription { Self { Name: "Stream" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeCdcStream CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 CdcStreamVersion: 1 } ChildrenExist: true } Children { Name: "streamImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409548 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } CdcStreamDescription { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 4 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDontExistGroupAttribute [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserLoginBad >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts >> KqpUserConstraint::KqpReadNull-UploadNull [GOOD] >> StatisticsSaveLoad::Delete [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDontExistGroupAttribute [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserLoginBad >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsDisableRequestToAD >> TConsoleTests::TestRemoveAttributes [GOOD] >> TConsoleTests::TestRemoveAttributesExtSubdomain >> LdapAuthProviderTest::LdapServerIsUnavailable [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyHost >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsFromAdLdapServer [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGood >> SubDomainWithReboots::RootWithStoragePools [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::Delete [GOOD] Test command err: 2025-06-30T09:21:57.471329Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:420:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:21:57.471393Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:57.471420Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00328f/r3tmp/tmpM4AjKn/pdisk_1.dat 2025-06-30T09:21:57.622315Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6304, node 1 2025-06-30T09:21:57.743949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:57.743975Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:57.743980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:57.744144Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:57.744800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:57.823648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:57.823685Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:57.836210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23695 2025-06-30T09:21:58.202154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:21:59.175542Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-30T09:21:59.204222Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:59.204268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:59.275495Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:21:59.283142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:59.427335Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:59.467538Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:59.467774Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:59.467940Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:59.467978Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:59.468030Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:59.468093Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:59.468116Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:59.468135Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:59.468155Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:21:59.637923Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:59.637973Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:59.652309Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:59.694186Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:59.708399Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-30T09:21:59.708442Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-30T09:21:59.717704Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-30T09:21:59.717781Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-30T09:21:59.717804Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-30T09:21:59.717808Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-30T09:21:59.717812Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-30T09:21:59.717817Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-30T09:21:59.717821Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-30T09:21:59.717826Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-30T09:21:59.717941Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-30T09:21:59.733257Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7960: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:21:59.733296Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7990: ConnectToSA(), pipe client id: [2:1799:2565], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:21:59.735841Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1811:2574] 2025-06-30T09:21:59.737597Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1843:2590] 2025-06-30T09:21:59.737931Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1843:2590], schemeshard id = 72075186224037897 2025-06-30T09:21:59.738541Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-30T09:21:59.746559Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-30T09:21:59.746586Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-30T09:21:59.746600Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-30T09:21:59.752677Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:59.754844Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-30T09:21:59.754889Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-30T09:21:59.883233Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-30T09:22:00.003743Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-30T09:22:00.055100Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-30T09:22:00.591579Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:00.592246Z node 1 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-30T09:22:00.592349Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-30T09:22:00.595115Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-30T09:22:00.596184Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2167:3037], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:00.596225Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2183:3042], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:00.596306Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:00.598086Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:00.611156Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2187:3045], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:22:00.819242Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2278:3075] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:00.874442Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2300:3087]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-30T09:22:00.874502Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-30T09:22:00.874518Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2302:3089] 2025-06-30T09:22:00.874551Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2302:3089] 2025-06-30T09:22:00.874762Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2303:2800] 2025-06-30T09:22:00.874832Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2302:3089], server id = [2:2303:2800], tablet id = 72075186224037894, status = OK 2025-06-30T09:22:00.874883Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2303:2800], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-30T09:22:00.874903Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-30T09:22:00.874956Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-30T09:22:00.874969Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2300:3087], StatRequests.size() = 1 2025-06-30T09:22:00.926091Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=MzdjZTlkYjAtNzA4Zjg3NS0yOTk0NzJjZi05MmNmNmI2ZA==, TxId: 2025-06-30T09:22:00.926126Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=MzdjZTlkYjAtNzA4Zjg3NS0yOTk0NzJjZi05MmNmNmI2ZA==, TxId: 2025-06-30T09:22:00.926447Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-30T09:22:00.927089Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-30T09:22:00.934217Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2336:3110]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-30T09:22:00.934276Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-30T09:22:00.934285Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2336:3110], StatRequests.size() = 1 2025-06-30T09:22:00.969346Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=YmUwZTBiNmMtZDJlN2IwZDgtNjE2MDIwYTEtN2MxYmYzMWU=, TxId: 2025-06-30T09:22:00.969380Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=YmUwZTBiNmMtZDJlN2IwZDgtNjE2MDIwYTEtN2MxYmYzMWU=, TxId: 2025-06-30T09:22:00.969853Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-30T09:22:00.970479Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-06-30T09:22:00.978595Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:2368:3125]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-30T09:22:00.978674Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-30T09:22:00.978683Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:2368:3125], StatRequests.size() = 1 2025-06-30T09:22:01.010277Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=NWIwNGNhOWEtZjc2MzI4Y2UtNDEwYjRkMzQtNTJhMWIyODc=, TxId: 01jz029dn5e6k9xyfdfmmnynjm 2025-06-30T09:22:01.010331Z node 1 :STATISTICS WARN: query_actor.cpp:372: [TQueryBase] Finish with BAD_REQUEST, Issues: {
: Error: No data }, SessionId: ydb://session/3?node_id=1&id=NWIwNGNhOWEtZjc2MzI4Y2UtNDEwYjRkMzQtNTJhMWIyODc=, TxId: 01jz029dn5e6k9xyfdfmmnynjm >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserLoginBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserPasswordBad >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserLoginBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserPasswordBad >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithCustomGroupAttributeGood >> TRestoreWithRebootsTests::ShouldSucceedOnMultipleFrames [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsDisableRequestToAD [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithRemovedUserCredentialsBad >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain_reboots/unittest >> SubDomainWithReboots::RootWithStoragePools [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:52.435790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:52.435816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:52.435823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:52.435829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:52.435842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:52.435847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:52.435856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:52.435871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:52.436018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:52.436157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:52.456543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:52.456574Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:52.456710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:52.460099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:52.461167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:52.461228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:52.463015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:52.463076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:52.463205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:52.463260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:52.464135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:52.464183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:52.464454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:52.464467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:52.464476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:52.464485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:52.464490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:52.464520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:52.465978Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:52.493437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:52.493539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:52.493611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:52.493620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:52.493687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:52.493703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:52.494678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:52.494756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:52.494821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:52.494834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:52.494841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:52.494847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:52.495532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:52.495550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:52.495557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:52.496093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:52.496107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:52.496114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:52.496123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:52.496859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:52.499771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:52.499849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:52.500124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:52.500169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... State no shards to create, do next state 2025-06-30T09:22:01.457129Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1002:0 2 -> 3 2025-06-30T09:22:01.457532Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-30T09:22:01.457545Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1002:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:01.457552Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1002:0 3 -> 128 2025-06-30T09:22:01.457913Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-30T09:22:01.457923Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-30T09:22:01.457930Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1002:0, at tablet# 72057594046678944 2025-06-30T09:22:01.457939Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1002 ready parts: 1/1 2025-06-30T09:22:01.457970Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1002 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:01.458281Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1002:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1002 msg type: 269090816 2025-06-30T09:22:01.458312Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1002, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1002 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1002 at step: 5000003 2025-06-30T09:22:01.458393Z node 31 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:01.458415Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1002 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 133143988335 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:01.458423Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1002:0, at tablet# 72057594046678944 2025-06-30T09:22:01.458483Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1002:0 128 -> 240 2025-06-30T09:22:01.458492Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1002:0, at tablet# 72057594046678944 2025-06-30T09:22:01.458522Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:01.458538Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1002 2025-06-30T09:22:01.458989Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:01.458999Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:01.459050Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:01.459057Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [31:212:2212], at schemeshard: 72057594046678944, txId: 1002, path id: 1 2025-06-30T09:22:01.459144Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-30T09:22:01.459155Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1002:0 ProgressState 2025-06-30T09:22:01.459170Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1002:0 progress is 1/1 2025-06-30T09:22:01.459175Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-30T09:22:01.459180Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1002:0 progress is 1/1 2025-06-30T09:22:01.459184Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-30T09:22:01.459189Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1002, ready parts: 1/1, is published: false 2025-06-30T09:22:01.459194Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-30T09:22:01.459200Z node 31 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1002:0 2025-06-30T09:22:01.459209Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1002:0 2025-06-30T09:22:01.459222Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:01.459227Z node 31 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1002, publications: 1, subscribers: 1 2025-06-30T09:22:01.459232Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1002, [OwnerId: 72057594046678944, LocalPathId: 1], 6 2025-06-30T09:22:01.459319Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-30T09:22:01.459331Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-30T09:22:01.459335Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1002 2025-06-30T09:22:01.459341Z node 31 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 6 2025-06-30T09:22:01.459345Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:01.459360Z node 31 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1002, subscribers: 1 2025-06-30T09:22:01.459365Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [31:310:2299] 2025-06-30T09:22:01.459878Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-30T09:22:01.459898Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-30T09:22:01.459904Z node 31 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [31:311:2300] TestWaitNotification: OK eventTxId 1002 2025-06-30T09:22:01.460001Z node 31 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:01.460033Z node 31 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 41us result status StatusSuccess 2025-06-30T09:22:01.460137Z node 31 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 2 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data/unittest >> KqpUserConstraint::KqpReadNull-UploadNull [GOOD] Test command err: 2025-06-30T09:22:00.224299Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:00.224374Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:00.224386Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003f3f/r3tmp/tmpI8dBqT/pdisk_1.dat 2025-06-30T09:22:00.331746Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:00.332840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:00.358098Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:00.360627Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275319005705 != 1751275319005709 2025-06-30T09:22:00.407763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:00.407816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:00.419438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:00.506304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:00.810857Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:819:2666], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:00.810892Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:830:2671], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:00.810906Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:00.811917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:00.835085Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:00.961315Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:833:2674], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:22:01.002591Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:903:2713] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:01.084255Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz029dfa9nczs534v6dd67v6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTEzY2Y5YTAtYzU4YmVlYmMtNzM1MTViMDEtZGQ1YWVmMTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> LdapAuthProviderTest::LdapRequestWithEmptyHost [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBaseDn >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood >> LdapAuthProviderTest_nonSecure::LdapRefreshRemoveUserBad >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithRemovedUserCredentialsBad |81.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data/unittest |81.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data/unittest >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithRemovedUserCredentialsBad >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithRemovedUserCredentialsBad [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoGood |81.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |81.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |81.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |81.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithCustomGroupAttributeGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsUseInvalidSearchFilterBad >> StatisticsSaveLoad::ForbidAccess >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDontExistGroupAttribute |81.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |81.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> VectorIndexBuildTestReboots::BaseCase+Prefixed[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:16:54.389907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:16:54.389933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:54.389938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:16:54.389943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:16:54.389954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:16:54.389958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:16:54.389967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:16:54.389984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:16:54.390107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:54.390177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:16:54.405328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:16:54.405354Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:16:54.405455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:16:54.409752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:16:54.409812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:16:54.409857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:16:54.412035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:16:54.412236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:16:54.412368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:54.412424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:16:54.413164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:54.413206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:16:54.413447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:16:54.413458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:16:54.413490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:16:54.413499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:16:54.413505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:16:54.413542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:16:54.414979Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:16:54.435859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:16:54.435939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:54.435995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:16:54.436003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:16:54.436044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:16:54.436057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:16:54.436854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:54.436895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:16:54.436952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:54.436961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:16:54.436967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:16:54.436973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:16:54.437400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:54.437411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:16:54.437417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:16:54.437793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:54.437803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:16:54.437810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:54.437818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:16:54.438445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:16:54.438858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:16:54.438897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:16:54.439071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:16:54.439095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:16:54.439102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:54.439160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:16:54.439167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:16:54.439199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:16:54.439211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: ... CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 8 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 7 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:32.101770Z node 280 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplPostingTable0build" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:32.101816Z node 280 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplPostingTable0build" took 47us result status StatusPathDoesNotExist 2025-06-30T09:21:32.101837Z node 280 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/dir/Table/index1/indexImplPostingTable0build\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/dir/Table/index1\' (id: [OwnerId: 72057594046678944, LocalPathId: 5]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/dir/Table/index1/indexImplPostingTable0build" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/dir/Table/index1" LastExistedPrefixPathId: 5 LastExistedPrefixDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:21:32.101904Z node 280 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplPostingTable1build" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:32.101921Z node 280 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplPostingTable1build" took 20us result status StatusPathDoesNotExist 2025-06-30T09:21:32.101938Z node 280 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/dir/Table/index1/indexImplPostingTable1build\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/dir/Table/index1\' (id: [OwnerId: 72057594046678944, LocalPathId: 5]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/dir/Table/index1/indexImplPostingTable1build" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/dir/Table/index1" LastExistedPrefixPathId: 5 LastExistedPrefixDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:21:32.101998Z node 280 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplPostingTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:21:32.102024Z node 280 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplPostingTable" took 26us result status StatusSuccess 2025-06-30T09:21:32.102109Z node 280 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1/indexImplPostingTable" PathDescription { Self { Name: "indexImplPostingTable" PathId: 7 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplPostingTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 8 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 7 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ... posting table contains 400 rows >> LdapAuthProviderTest::LdapRequestWithEmptyBaseDn [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBindDn >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithRemovedUserCredentialsBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoGood |81.8%| [TA] $(B)/ydb/core/kqp/ut/data/test-results/unittest/{meta.json ... results_accumulator.log} >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithRemovedUserCredentialsBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoGood >> YdbIndexTable::MultiShardTableOneIndexDataColumn [GOOD] >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap >> StatisticsSaveLoad::Simple >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDontExistGroupAttribute [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserLoginBad ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_restore/unittest >> TRestoreWithRebootsTests::ShouldSucceedOnMultipleFrames [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:04.414909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:04.414928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.414933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:04.414937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:04.414941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:04.414946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:04.414955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.414968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:04.415085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:04.415177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:04.426887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:04.426911Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:04.427016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:04.429573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:04.430411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:04.430444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:04.431950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:04.432007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:04.432146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.432223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:04.433146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.433182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:04.433395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.433403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.433410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:04.433416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:04.433420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:04.433441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:04.434609Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:04.450562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:04.450622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.450675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:04.450681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:04.450717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:04.450725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:04.451410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.451446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:04.451491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.451499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:04.451502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:04.451507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:04.451997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.452013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:04.452018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:04.452392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.452400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.452405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.452410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:04.452935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:04.453372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:04.453409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:04.453583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.453608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... 2:01.915794Z node 135 :DATASHARD_RESTORE DEBUG: import_s3.cpp:516: [Import] [s3:1003] GetObject: key# /data_00.csv.zst, range# 28-34 REQUEST: GET /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:5644 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C2D11F8D-916F-4487-B3A2-D1BA390DC036 amz-sdk-request: attempt=1 content-type: application/xml range: bytes=28-34 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /data_00.csv.zst / 60 2025-06-30T09:22:01.916525Z node 135 :DATASHARD_RESTORE DEBUG: import_s3.cpp:655: [Import] [s3:1003] Handle NKikimr::NWrappers::NExternalStorage::TEvGetObjectResponse { Key: null Result: e0a029185b0e1ad2f41736bc4b274b81 Body: 7b } 2025-06-30T09:22:01.916537Z node 135 :DATASHARD_RESTORE TRACE: import_s3.cpp:672: [Import] [s3:1003] Content size: processed-bytes# 0, content-length# 60, body-size# 7 2025-06-30T09:22:01.916548Z node 135 :DATASHARD_RESTORE DEBUG: import_s3.cpp:516: [Import] [s3:1003] GetObject: key# /data_00.csv.zst, range# 35-41 REQUEST: GET /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:5644 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B6082E40-1512-4DD2-A1A5-0D816669F7D2 amz-sdk-request: attempt=1 content-type: application/xml range: bytes=35-41 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /data_00.csv.zst / 60 2025-06-30T09:22:01.922840Z node 135 :DATASHARD_RESTORE DEBUG: import_s3.cpp:655: [Import] [s3:1003] Handle NKikimr::NWrappers::NExternalStorage::TEvGetObjectResponse { Key: null Result: e0a029185b0e1ad2f41736bc4b274b81 Body: 7b } 2025-06-30T09:22:01.922868Z node 135 :DATASHARD_RESTORE TRACE: import_s3.cpp:672: [Import] [s3:1003] Content size: processed-bytes# 0, content-length# 60, body-size# 7 2025-06-30T09:22:01.922916Z node 135 :DATASHARD_RESTORE INFO: import_s3.cpp:805: [Import] [s3:1003] Upload rows: count# 1, size# 34 2025-06-30T09:22:01.923696Z node 135 :DATASHARD_RESTORE DEBUG: import_s3.cpp:813: [Import] [s3:1003] Handle NKikimr::TEvDataShard::TEvS3UploadRowsResponse { Record: TabletID: 72075186233409546 Status: 0 Info: { DataETag: e0a029185b0e1ad2f41736bc4b274b81 ProcessedBytes: 37 WrittenBytes: 16 WrittenRows: 2 ChecksumState: DownloadState: } } 2025-06-30T09:22:01.923715Z node 135 :DATASHARD_RESTORE NOTICE: import_s3.cpp:620: [Import] [s3:1003] Process download info at 'UploadResponse': info# { DataETag: e0a029185b0e1ad2f41736bc4b274b81 ProcessedBytes: 37 WrittenBytes: 16 WrittenRows: 2 ChecksumState: DownloadState: } 2025-06-30T09:22:01.923733Z node 135 :DATASHARD_RESTORE DEBUG: import_s3.cpp:516: [Import] [s3:1003] GetObject: key# /data_00.csv.zst, range# 42-48 REQUEST: GET /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:5644 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 74BA57C7-E906-4675-A39D-0430897EBD5F amz-sdk-request: attempt=1 content-type: application/xml range: bytes=42-48 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /data_00.csv.zst / 60 2025-06-30T09:22:01.924746Z node 135 :DATASHARD_RESTORE DEBUG: import_s3.cpp:655: [Import] [s3:1003] Handle NKikimr::NWrappers::NExternalStorage::TEvGetObjectResponse { Key: null Result: e0a029185b0e1ad2f41736bc4b274b81 Body: 7b } 2025-06-30T09:22:01.924762Z node 135 :DATASHARD_RESTORE TRACE: import_s3.cpp:672: [Import] [s3:1003] Content size: processed-bytes# 37, content-length# 60, body-size# 7 2025-06-30T09:22:01.924778Z node 135 :DATASHARD_RESTORE DEBUG: import_s3.cpp:516: [Import] [s3:1003] GetObject: key# /data_00.csv.zst, range# 49-55 REQUEST: GET /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:5644 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: EA5C8935-E195-46BE-8FE1-F50FB8BE524A amz-sdk-request: attempt=1 content-type: application/xml range: bytes=49-55 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /data_00.csv.zst / 60 2025-06-30T09:22:01.925462Z node 135 :DATASHARD_RESTORE DEBUG: import_s3.cpp:655: [Import] [s3:1003] Handle NKikimr::NWrappers::NExternalStorage::TEvGetObjectResponse { Key: null Result: e0a029185b0e1ad2f41736bc4b274b81 Body: 7b } 2025-06-30T09:22:01.925474Z node 135 :DATASHARD_RESTORE TRACE: import_s3.cpp:672: [Import] [s3:1003] Content size: processed-bytes# 37, content-length# 60, body-size# 7 2025-06-30T09:22:01.925486Z node 135 :DATASHARD_RESTORE DEBUG: import_s3.cpp:516: [Import] [s3:1003] GetObject: key# /data_00.csv.zst, range# 56-59 REQUEST: GET /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:5644 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 5AFD0ACB-CB2A-409E-9064-1A92E51F507F amz-sdk-request: attempt=1 content-type: application/xml range: bytes=56-59 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /data_00.csv.zst / 60 2025-06-30T09:22:01.926119Z node 135 :DATASHARD_RESTORE DEBUG: import_s3.cpp:655: [Import] [s3:1003] Handle NKikimr::NWrappers::NExternalStorage::TEvGetObjectResponse { Key: null Result: e0a029185b0e1ad2f41736bc4b274b81 Body: 4b } 2025-06-30T09:22:01.926130Z node 135 :DATASHARD_RESTORE TRACE: import_s3.cpp:672: [Import] [s3:1003] Content size: processed-bytes# 37, content-length# 60, body-size# 4 2025-06-30T09:22:01.926165Z node 135 :DATASHARD_RESTORE INFO: import_s3.cpp:805: [Import] [s3:1003] Upload rows: count# 1, size# 34 2025-06-30T09:22:01.926873Z node 135 :DATASHARD_RESTORE DEBUG: import_s3.cpp:813: [Import] [s3:1003] Handle NKikimr::TEvDataShard::TEvS3UploadRowsResponse { Record: TabletID: 72075186233409546 Status: 0 Info: { DataETag: e0a029185b0e1ad2f41736bc4b274b81 ProcessedBytes: 60 WrittenBytes: 24 WrittenRows: 3 ChecksumState: DownloadState: } } 2025-06-30T09:22:01.926889Z node 135 :DATASHARD_RESTORE NOTICE: import_s3.cpp:620: [Import] [s3:1003] Process download info at 'UploadResponse': info# { DataETag: e0a029185b0e1ad2f41736bc4b274b81 ProcessedBytes: 60 WrittenBytes: 24 WrittenRows: 3 ChecksumState: DownloadState: } 2025-06-30T09:22:01.926897Z node 135 :DATASHARD_RESTORE NOTICE: import_s3.cpp:961: [Import] [s3:1003] Finish: success# 1, error# , writtenBytes# 24, writtenRows# 3 2025-06-30T09:22:01.930072Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 338 RawX2: 579820587283 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 24 RowsProcessed: 3 } 2025-06-30T09:22:01.930097Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-30T09:22:01.930126Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Source { RawX1: 338 RawX2: 579820587283 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 24 RowsProcessed: 3 } 2025-06-30T09:22:01.930145Z node 135 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TRestore TProposedWaitParts, opId: 1003:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 338 RawX2: 579820587283 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 24 RowsProcessed: 3 } 2025-06-30T09:22:01.930165Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1003:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:01.930171Z node 135 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:01.930178Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:22:01.930189Z node 135 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 129 -> 240 2025-06-30T09:22:01.930234Z node 135 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TRestore, opId# 1003:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:01.930718Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:01.930785Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:01.930796Z node 135 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:22:01.930813Z node 135 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:01.930818Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:01.930824Z node 135 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:01.930829Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:01.930835Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-30T09:22:01.930877Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [135:416:2387] message: TxId: 1003 2025-06-30T09:22:01.930890Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:01.930897Z node 135 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:22:01.930902Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:22:01.930936Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:22:01.931534Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:22:01.931549Z node 135 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [135:453:2423] TestWaitNotification: OK eventTxId 1003 |81.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |81.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |81.8%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/data/test-results/unittest/{meta.json ... results_accumulator.log} |81.8%| [LD] {RESULT} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut >> LdapAuthProviderTest::LdapRequestWithEmptyBindDn [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBindPassword >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_3_Table [GOOD] |81.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest >> TxUsage::Sinks_Oltp_WriteToTopics_3_Query >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserLoginBad [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserPasswordBad |81.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest >> LdapAuthProviderTest::LdapRequestWithEmptyBindPassword [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsDisableRequestToAD ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] Test command err: 2025-06-30T09:21:59.001616Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670219315607290:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:59.020466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004725/r3tmp/tmpUAB9Dm/pdisk_1.dat 2025-06-30T09:21:59.209208Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670219315607098:2080] 1751275318987930 != 1751275318987933 2025-06-30T09:21:59.214292Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16210, node 1 2025-06-30T09:21:59.251867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:59.251881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:59.251883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:59.251932Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:59.263086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:59.263119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:59.271150Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:21:59.414135Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:21:59.414297Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:21:59.414304Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:21:59.414960Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:21844, port: 21844 2025-06-30T09:21:59.414987Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:21:59.510957Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:21:59.562895Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:21:59.616784Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****6jbA (53B5C90B) () has now valid token of ldapuser@ldap 2025-06-30T09:21:59.968732Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670224886516946:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:59.968977Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004725/r3tmp/tmpQKga3I/pdisk_1.dat 2025-06-30T09:21:59.993109Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:59.993400Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670224886516919:2080] 1751275319968493 != 1751275319968496 TServer::EnableGrpc on GrpcPort 64666, node 2 2025-06-30T09:22:00.014516Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:00.014531Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:00.014535Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:00.014594Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:00.079069Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:00.079104Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:00.082822Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:00.083191Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:00.083608Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:00.083621Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:00.083821Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:22968, port: 22968 2025-06-30T09:22:00.083846Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:00.151079Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:00.198952Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:00.199544Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:00.199568Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:00.243306Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:00.287830Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:00.291630Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****C2aQ (7D28EC4C) () has now valid token of ldapuser@ldap 2025-06-30T09:22:00.613797Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670228511135883:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:00.613818Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004725/r3tmp/tmpBi5Q36/pdisk_1.dat 2025-06-30T09:22:00.637319Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:00.640270Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670228511135863:2080] 1751275320613653 != 1751275320613656 TServer::EnableGrpc on GrpcPort 12180, node 3 2025-06-30T09:22:00.659704Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:00.659717Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:00.659721Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:00.659767Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:00.719211Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:00.719255Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:00.723357Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:00.758834Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:00.759666Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:00.759680Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:00.759930Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:16690, port: 16690 2025-06-30T09:22:00.759960Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:00.815010Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:00.859374Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****d75Q (C120BADA) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004725/r3tmp/tmpmENAgy/pdisk_1.dat 2025-06-30T09:22:01.366950Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:01.375663Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:01.375896Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521670233515256820:2080] 1751275321347191 != 1751275321347194 TServer::EnableGrpc on GrpcPort 26737, node 4 2025-06-30T09:22:01.399729Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:01.399744Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:01.399748Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:01.399815Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:01.459774Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:01.459809Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:01.460314Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:01.618275Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:01.618341Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:01.618344Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:01.618486Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://qqq:9649 ldaps://localhost:9649 ldaps://localhost:11111, port: 9649 2025-06-30T09:22:01.618507Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:01.707007Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:01.754958Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:01.755174Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:01.755195Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:01.798924Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:01.846933Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:01.847408Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****3qJQ (6D609149) () has now valid token of ldapuser@ldap 2025-06-30T09:22:02.060326Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521670235915396736:2166];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:02.060900Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004725/r3tmp/tmp7t1Ei1/pdisk_1.dat 2025-06-30T09:22:02.088198Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4228, node 5 2025-06-30T09:22:02.096430Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.096443Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.096446Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.096518Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.165250Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.165281Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.166509Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:02.216839Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:02.216939Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:02.216943Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:02.217141Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:62595, port: 62595 2025-06-30T09:22:02.217177Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:02.286932Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-30T09:22:02.333872Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:02.334090Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:02.334102Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-30T09:22:02.375066Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-30T09:22:02.420341Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-30T09:22:02.420751Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****GngQ (2A0C3717) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004725/r3tmp/tmpK0PBjr/pdisk_1.dat 2025-06-30T09:22:02.636193Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 19371, node 6 2025-06-30T09:22:02.663792Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.663806Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.663808Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.663863Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.664760Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:02.670927Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7521670235729025635:2080] 1751275322626431 != 1751275322626434 2025-06-30T09:22:02.727029Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.727074Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.731196Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:02.794867Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:02.794967Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:02.794973Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:02.795204Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:7868, port: 7868 2025-06-30T09:22:02.795229Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:02.859103Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: &(uid=ldapuser)(), attributes: memberOf 2025-06-30T09:22:02.859135Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter &(uid=ldapuser)() on server ldaps://localhost:7868. Bad search filter 2025-06-30T09:22:02.859377Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****N9sQ (4DB7C8F8) () has now permanent error message 'Could not login via LDAP (Could not perform search for filter &(uid=ldapuser)() on server ldaps://localhost:7868. Bad search filter)' >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithCustomGroupAttributeGood >> KqpScan::NoTruncate >> KqpScan::AggregateNoColumn >> KqpScan::RightSemiJoinSimple >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithCustomGroupAttributeGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsUseInvalidSearchFilterBad >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsDisableRequestToAD [GOOD] >> KqpScan::PureExpr >> KqpScan::AggregateWithFunction ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsDisableRequestToAD [GOOD] Test command err: 2025-06-30T09:22:00.863117Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670229350781232:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:00.864892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046d6/r3tmp/tmp11VJjn/pdisk_1.dat 2025-06-30T09:22:00.965550Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22175, node 1 2025-06-30T09:22:00.987021Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:00.987034Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:00.987037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:00.987086Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:01.018833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:01.018861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:01.020476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:01.147025Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:01.149971Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:01.149988Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:01.150624Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://unavailablehost:26664, port: 26664 2025-06-30T09:22:01.150654Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:01.158342Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:184: Could not start TLS. Can't contact LDAP server 2025-06-30T09:22:01.158809Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****bA7A (383CAF2C) () has now retryable error message 'Could not login via LDAP (Could not start TLS. Can't contact LDAP server)' 2025-06-30T09:22:01.158887Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:01.158895Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:01.159188Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://unavailablehost:26664, port: 26664 2025-06-30T09:22:01.159212Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:01.164150Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:184: Could not start TLS. Can't contact LDAP server 2025-06-30T09:22:01.164217Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****bA7A (383CAF2C) () has now retryable error message 'Could not login via LDAP (Could not start TLS. Can't contact LDAP server)' test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046d6/r3tmp/tmpmUk6Y9/pdisk_1.dat 2025-06-30T09:22:01.654877Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 31929, node 2 2025-06-30T09:22:01.663235Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:01.663535Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670232958577622:2080] 1751275321625464 != 1751275321625467 2025-06-30T09:22:01.676693Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:01.676711Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:01.676715Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:01.676784Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:01.727830Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:01.727864Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:01.728510Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:01.774970Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:01.778031Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:01.778050Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:01.778316Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****eU5w (39F249C4) () has now permanent error message 'Could not login via LDAP (List of ldap server hosts is empty)' 2025-06-30T09:22:02.236055Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670237637830910:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:02.236828Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046d6/r3tmp/tmpvvcqam/pdisk_1.dat 2025-06-30T09:22:02.256443Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:02.257154Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670237637830779:2080] 1751275322233782 != 1751275322233785 TServer::EnableGrpc on GrpcPort 18759, node 3 2025-06-30T09:22:02.271443Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.271460Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.271462Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.271512Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.339249Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.339285Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.340275Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:02.435441Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:02.437322Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:02.437343Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:02.437541Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****PjMQ (F202967A) () has now permanent error message 'Could not login via LDAP (Parameter BaseDn is empty)' 2025-06-30T09:22:02.850465Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521670236160371826:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:02.850486Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046d6/r3tmp/tmpY69Mku/pdisk_1.dat 2025-06-30T09:22:02.876705Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521670236160371807:2080] 1751275322850335 != 1751275322850338 2025-06-30T09:22:02.876887Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1462, node 4 2025-06-30T09:22:02.895013Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.895034Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.895037Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.895104Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.961003Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.961054Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.962720Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:03.054870Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:03.055525Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:03.055536Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:03.055738Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****n7ag (D906A8AD) () has now permanent error message 'Could not login via LDAP (Parameter BindDn is empty)' 2025-06-30T09:22:03.456783Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521670239843662262:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:03.456807Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046d6/r3tmp/tmpdyYBAb/pdisk_1.dat 2025-06-30T09:22:03.479820Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:03.480170Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7521670239843662243:2080] 1751275323456653 != 1751275323456656 TServer::EnableGrpc on GrpcPort 65101, node 5 2025-06-30T09:22:03.496233Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:03.496251Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:03.496254Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:03.496326Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:03.545055Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:03.546377Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:03.546391Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:03.546609Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****oqkA (F8F231BF) () has now permanent error message 'Could not login via LDAP (Parameter BindPassword is empty)' 2025-06-30T09:22:03.561844Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:03.561885Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:03.562958Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:03.963922Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670241620404387:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:03.963946Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046d6/r3tmp/tmpJEsUrT/pdisk_1.dat 2025-06-30T09:22:03.979967Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26991, node 6 2025-06-30T09:22:03.995239Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:03.995261Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:03.995263Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:03.995322Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:04.056253Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:04.058164Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:04.058179Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:04.058402Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:14395, port: 14395 2025-06-30T09:22:04.058421Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:04.068704Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:04.068739Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:04.069814Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:04.111677Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:04.155258Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****HiyA (1A180AB4) () has now valid token of ldapuser@ldap ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] Test command err: 2025-06-30T09:22:00.589579Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670228989362254:2174];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:00.589787Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046da/r3tmp/tmpGJJwZS/pdisk_1.dat 2025-06-30T09:22:00.684520Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:00.685374Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670228989362118:2080] 1751275320586845 != 1751275320586848 TServer::EnableGrpc on GrpcPort 17719, node 1 2025-06-30T09:22:00.742193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:00.742231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:00.747217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:00.767461Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:00.767478Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:00.767481Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:00.767546Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:00.844665Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:00.846853Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:00.846890Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:00.847513Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:24542, port: 24542 2025-06-30T09:22:00.847546Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:00.860022Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:00.907017Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:00.955056Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:00.955266Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:00.955277Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:01.002899Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:01.046933Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:01.047615Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****_ddg (843F8236) () has now valid token of ldapuser@ldap 2025-06-30T09:22:01.464898Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670232425204988:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:01.465779Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046da/r3tmp/tmpJfnN1Z/pdisk_1.dat 2025-06-30T09:22:01.494654Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6270, node 2 2025-06-30T09:22:01.514710Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:01.514726Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:01.514729Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:01.514812Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:01.573434Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:01.573471Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:01.575065Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:01.881484Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:01.882602Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:01.882611Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:01.882807Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:10576, port: 10576 2025-06-30T09:22:01.882824Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:01.889083Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:01.931445Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:01.975256Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****vM8Q (0F0C3A10) () has now valid token of ldapuser@ldap 2025-06-30T09:22:02.176192Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670235653733248:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:02.176208Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046da/r3tmp/tmpTYWYDN/pdisk_1.dat 2025-06-30T09:22:02.198682Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:02.204578Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670235653733221:2080] 1751275322175939 != 1751275322175942 TServer::EnableGrpc on GrpcPort 14845, node 3 2025-06-30T09:22:02.217642Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.217656Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.217659Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.217714Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.287310Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.287348Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.288180Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:02.291124Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:02.293805Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:02.293823Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:02.294043Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://qqq:20917 ldap://localhost:20917 ldap://localhost:11111, port: 20917 2025-06-30T09:22:02.294074Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:02.328099Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:02.371153Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:02.418126Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:02.418383Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:02.418400Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:02.463251Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:02.506960Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:02.507449Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****VYug (3C7860F8) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046da/r3tmp/tmpXk9DEb/pdisk_1.dat 2025-06-30T09:22:02.763210Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:02.767027Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3980, node 4 2025-06-30T09:22:02.779068Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.779082Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.779085Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.779151Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.853093Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.853137Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.853945Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:02.880922Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:02.880996Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:02.881002Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:02.881188Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:28710, port: 28710 2025-06-30T09:22:02.881212Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:02.888302Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:02.931348Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-30T09:22:02.975174Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****eUxA (87CD32DF) () has now valid token of ldapuser@ldap 2025-06-30T09:22:03.305290Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521670241046151851:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:03.305332Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046da/r3tmp/tmprzkjzJ/pdisk_1.dat 2025-06-30T09:22:03.328517Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:03.328809Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7521670241046151824:2080] 1751275323305036 != 1751275323305039 TServer::EnableGrpc on GrpcPort 12780, node 5 2025-06-30T09:22:03.352892Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:03.352905Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:03.352907Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:03.352962Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:03.408241Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:03.408275Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:03.409816Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:03.497672Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:03.499103Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:03.499116Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:03.499322Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:3537, port: 3537 2025-06-30T09:22:03.499346Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:03.507184Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=invalidRobouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:03.555030Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:3537. Invalid credentials 2025-06-30T09:22:03.555294Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****YqDA (10E310E3) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:3537. Invalid credentials)' 2025-06-30T09:22:03.882957Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670242124556408:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:03.882980Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046da/r3tmp/tmpuCLiCr/pdisk_1.dat 2025-06-30T09:22:03.922179Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:03.922843Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7521670242124556389:2080] 1751275323882842 != 1751275323882845 TServer::EnableGrpc on GrpcPort 24586, node 6 2025-06-30T09:22:03.938397Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:03.938414Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:03.938431Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:03.938481Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:03.996261Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:03.996301Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:03.997311Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:04.018396Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:04.020536Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:04.020560Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:04.020777Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:25847, port: 25847 2025-06-30T09:22:04.020797Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:04.058568Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:04.103011Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:25847. Invalid credentials 2025-06-30T09:22:04.103304Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****J8uA (F2A456B8) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:25847. Invalid credentials)' >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] >> KqpScan::UnionWithPureExpr >> KqpScan::NoTruncate [GOOD] >> KqpScan::MultipleResults >> YdbIndexTable::MultiShardTableOneIndexPkOverlap [GOOD] >> Donor::ConsistentWritesWhenSwitchingToDonorMode [GOOD] >> KqpScan::TaggedScalar >> KqpScan::RightSemiJoinSimple [GOOD] >> KqpScan::SecondaryIndex ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] Test command err: 2025-06-30T09:22:00.892377Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670228918783738:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:00.893292Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046c0/r3tmp/tmpy81tAv/pdisk_1.dat 2025-06-30T09:22:01.025287Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:01.031319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:01.031361Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:01.035233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61660, node 1 2025-06-30T09:22:01.059117Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:01.059133Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:01.059136Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:01.059207Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:01.094382Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:01.094484Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:01.094489Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:01.094632Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:12657, port: 12657 2025-06-30T09:22:01.095003Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:01.103442Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:01.150927Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:01.199378Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****X-lg (E0CD2844) () has now valid token of ldapuser@ldap 2025-06-30T09:22:01.616626Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670230054040434:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:01.619389Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046c0/r3tmp/tmp4mvBVF/pdisk_1.dat 2025-06-30T09:22:01.653479Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:01.657538Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670230054040335:2080] 1751275321614316 != 1751275321614319 TServer::EnableGrpc on GrpcPort 30494, node 2 2025-06-30T09:22:01.678258Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:01.678282Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:01.678286Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:01.678343Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:01.726780Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:01.726822Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:01.727183Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:01.769002Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:01.771539Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:01.771554Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:01.771733Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:14535, port: 14535 2025-06-30T09:22:01.771754Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:01.795067Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:01.843700Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:01.844003Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:01.844021Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:01.894864Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:01.939079Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:01.939692Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****Riow (C5EC9EA9) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046c0/r3tmp/tmpWebjgo/pdisk_1.dat 2025-06-30T09:22:02.454889Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:02.463043Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16274, node 3 2025-06-30T09:22:02.480398Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.480411Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.480413Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.480470Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.515273Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.515320Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.516376Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:02.538805Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:02.542857Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:02.542878Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:02.543094Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:16952, port: 16952 2025-06-30T09:22:02.543117Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:02.549623Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:02.595036Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****BZjA (D37E5DCA) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046c0/r3tmp/tmpkZHrTS/pdisk_1.dat 2025-06-30T09:22:03.142759Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:03.148281Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:03.149197Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521670241831234975:2080] 1751275323099709 != 1751275323099712 TServer::EnableGrpc on GrpcPort 24290, node 4 2025-06-30T09:22:03.175313Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:03.175329Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:03.175331Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:03.175389Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:03.223098Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:03.223133Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:03.226718Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:03.518831Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:03.526083Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:03.526102Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:03.526333Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://qqq:1966 ldap://localhost:1966 ldap://localhost:11111, port: 1966 2025-06-30T09:22:03.526357Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:03.548691Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:03.595099Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:03.596764Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:03.596783Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:03.643902Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:03.688666Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:03.689113Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****oS2A (C6994C7D) () has now valid token of ldapuser@ldap 2025-06-30T09:22:04.017409Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521670243490892916:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:04.017436Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046c0/r3tmp/tmpZEf7AZ/pdisk_1.dat 2025-06-30T09:22:04.034707Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:04.035297Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7521670243490892897:2080] 1751275324017297 != 1751275324017300 TServer::EnableGrpc on GrpcPort 9181, node 5 2025-06-30T09:22:04.050636Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:04.050649Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:04.050651Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:04.050699Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:04.115545Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:04.117604Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:04.117629Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:04.117834Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:10543, port: 10543 2025-06-30T09:22:04.117866Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:04.119689Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-30T09:22:04.122665Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:04.122702Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:04.125307Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:04.166905Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:04.167226Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:04.167240Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-30T09:22:04.214938Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-30T09:22:04.259866Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-30T09:22:04.260253Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****YRdQ (00234E14) () has now valid token of ldapuser@ldap 2025-06-30T09:22:04.579361Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670242974130306:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:04.579389Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046c0/r3tmp/tmpeCB2Lw/pdisk_1.dat 2025-06-30T09:22:04.603061Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:04.603367Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7521670242974130274:2080] 1751275324579123 != 1751275324579126 TServer::EnableGrpc on GrpcPort 18656, node 6 2025-06-30T09:22:04.613472Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:04.613487Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:04.613489Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:04.613562Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:04.678015Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:04.678046Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:04.678798Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:04.679112Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:04.679697Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:04.679713Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:04.679917Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:4838, port: 4838 2025-06-30T09:22:04.679949Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:04.683336Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: &(uid=ldapuser)(), attributes: memberOf 2025-06-30T09:22:04.683368Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:4838. Bad search filter 2025-06-30T09:22:04.683485Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****eT0w (37AED337) () has now permanent error message 'Could not login via LDAP (Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:4838. Bad search filter)' >> KqpScan::AggregateNoColumn [GOOD] >> KqpScan::AggregateNoColumnNoRemaps >> KqpScan::PureExpr [GOOD] >> KqpScan::RestrictSqlV0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneIndexPkOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 27724, MsgBus: 22850 2025-06-30T09:21:41.459861Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670148378733703:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:41.463778Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0033b6/r3tmp/tmpGOblzz/pdisk_1.dat 2025-06-30T09:21:41.543545Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:41.545282Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670148378733512:2080] 1751275301451085 != 1751275301451088 TServer::EnableGrpc on GrpcPort 27724, node 1 2025-06-30T09:21:41.574962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:41.574976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:41.574979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:41.575026Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:41.610996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:41.611046Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:41.615294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22850 TClient is connected to server localhost:22850 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:41.753487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:41.767427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:41.776125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:41.915866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:41.964149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:41.999237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:42.443022Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670152673702407:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.443114Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.451779Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:42.465261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.486585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.508239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.533534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.561054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.608748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.632872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.662282Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670152673703065:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.662318Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.662984Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670152673703070:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.664106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:42.668507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:42.668622Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670152673703075:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:42.758708Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670152673703126:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:43.003507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:43.227768Z node 1 :KQP_EXECUTER ERROR: kqp_ ... zRjOGYtNDFlMDcwNGYtNzY2YjQxYTctOWVkMzE3Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.631718Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719492. Ctx: { TraceId: 01jz029h6k03qs0h25cdaqb7c1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2RhMTg0MmUtZjg3MmJlYmUtODIwNDA2NWYtMWQyMzBhZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.631843Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719491. Ctx: { TraceId: 01jz029h6kfs9pftbsnq94jgv8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTUwNzNiYTItNTkzY2M5YjUtM2MxMTlmYzctYTJiNmMzMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.633013Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719493. Ctx: { TraceId: 01jz029h6kc60s0c86fz2dsd0g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyMzRjOGYtNDFlMDcwNGYtNzY2YjQxYTctOWVkMzE3Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.633277Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719494. Ctx: { TraceId: 01jz029h6kfs9pftbsnq94jgv8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTUwNzNiYTItNTkzY2M5YjUtM2MxMTlmYzctYTJiNmMzMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.633701Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719495. Ctx: { TraceId: 01jz029h6k03qs0h25cdaqb7c1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2RhMTg0MmUtZjg3MmJlYmUtODIwNDA2NWYtMWQyMzBhZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.634338Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719496. Ctx: { TraceId: 01jz029h6k03qs0h25cdaqb7c1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2RhMTg0MmUtZjg3MmJlYmUtODIwNDA2NWYtMWQyMzBhZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.635981Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719497. Ctx: { TraceId: 01jz029h6p69jvae645zepqtyd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTE4MGMwNTktYjA5M2IzZDAtMTg1YmEyZDEtNDAxZGI0Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.637053Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719498. Ctx: { TraceId: 01jz029h6p69jvae645zepqtyd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTE4MGMwNTktYjA5M2IzZDAtMTg1YmEyZDEtNDAxZGI0Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.640289Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719499. Ctx: { TraceId: 01jz029h6t8772g1cqgwx6nbfd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjNhM2ZjNmUtOWQ5OGMxMDEtNmJiMjMyNGEtMTg4YzZiMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.641291Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719500. Ctx: { TraceId: 01jz029h6t8772g1cqgwx6nbfd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjNhM2ZjNmUtOWQ5OGMxMDEtNmJiMjMyNGEtMTg4YzZiMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.644334Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719501. Ctx: { TraceId: 01jz029h73d4x9v8fd74gjmkr0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTAwYWIyZDItNzRjZGQ1M2MtOGFmNDIxZGQtMjRhZmQ4NTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.644682Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719502. Ctx: { TraceId: 01jz029h73btzpg4ne9yc471kr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTE4MGMwNTktYjA5M2IzZDAtMTg1YmEyZDEtNDAxZGI0Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.646213Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719503. Ctx: { TraceId: 01jz029h74apde6mdfpf07043x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTUwNzNiYTItNTkzY2M5YjUtM2MxMTlmYzctYTJiNmMzMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.646712Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719504. Ctx: { TraceId: 01jz029h748sxrb7p9vhtxbh8d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2RhMTg0MmUtZjg3MmJlYmUtODIwNDA2NWYtMWQyMzBhZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.646722Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719505. Ctx: { TraceId: 01jz029h756q3ez7bqghhn2qmt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyMzRjOGYtNDFlMDcwNGYtNzY2YjQxYTctOWVkMzE3Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.647876Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719506. Ctx: { TraceId: 01jz029h74apde6mdfpf07043x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTUwNzNiYTItNTkzY2M5YjUtM2MxMTlmYzctYTJiNmMzMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.648362Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719507. Ctx: { TraceId: 01jz029h756q3ez7bqghhn2qmt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyMzRjOGYtNDFlMDcwNGYtNzY2YjQxYTctOWVkMzE3Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.648852Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719508. Ctx: { TraceId: 01jz029h748sxrb7p9vhtxbh8d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2RhMTg0MmUtZjg3MmJlYmUtODIwNDA2NWYtMWQyMzBhZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.649952Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719509. Ctx: { TraceId: 01jz029h781kker8khmzrp2jgk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTE4MGMwNTktYjA5M2IzZDAtMTg1YmEyZDEtNDAxZGI0Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.650788Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719510. Ctx: { TraceId: 01jz029h784t83adyfcastktg6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjNhM2ZjNmUtOWQ5OGMxMDEtNmJiMjMyNGEtMTg4YzZiMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.652852Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719511. Ctx: { TraceId: 01jz029h784t83adyfcastktg6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjNhM2ZjNmUtOWQ5OGMxMDEtNmJiMjMyNGEtMTg4YzZiMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.654498Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719512. Ctx: { TraceId: 01jz029h7debqrq4g0s5cype42, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTE4MGMwNTktYjA5M2IzZDAtMTg1YmEyZDEtNDAxZGI0Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.655112Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719513. Ctx: { TraceId: 01jz029h7dckpyfz9tc88qy2ec, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTAwYWIyZDItNzRjZGQ1M2MtOGFmNDIxZGQtMjRhZmQ4NTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.656078Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719514. Ctx: { TraceId: 01jz029h7eed9dtd476mjks829, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTUwNzNiYTItNTkzY2M5YjUtM2MxMTlmYzctYTJiNmMzMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.656165Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719515. Ctx: { TraceId: 01jz029h7e4d51pa838ewnjn8w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2RhMTg0MmUtZjg3MmJlYmUtODIwNDA2NWYtMWQyMzBhZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.656969Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719516. Ctx: { TraceId: 01jz029h7dckpyfz9tc88qy2ec, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTAwYWIyZDItNzRjZGQ1M2MtOGFmNDIxZGQtMjRhZmQ4NTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.658144Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719517. Ctx: { TraceId: 01jz029h7eed9dtd476mjks829, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTUwNzNiYTItNTkzY2M5YjUtM2MxMTlmYzctYTJiNmMzMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.658573Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719519. Ctx: { TraceId: 01jz029h7e4d51pa838ewnjn8w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2RhMTg0MmUtZjg3MmJlYmUtODIwNDA2NWYtMWQyMzBhZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.658773Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719518. Ctx: { TraceId: 01jz029h7gbfx06mk8h39v2gd6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyMzRjOGYtNDFlMDcwNGYtNzY2YjQxYTctOWVkMzE3Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-30T09:22:04.659759Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719520. Ctx: { TraceId: 01jz029h7e4d51pa838ewnjn8w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2RhMTg0MmUtZjg3MmJlYmUtODIwNDA2NWYtMWQyMzBhZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.660737Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719521. Ctx: { TraceId: 01jz029h7jcd86dh49t7ex2e62, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjNhM2ZjNmUtOWQ5OGMxMDEtNmJiMjMyNGEtMTg4YzZiMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:04.666197Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719522. Ctx: { TraceId: 01jz029h7r92r8qwm8ntrn2nj9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTE4MGMwNTktYjA5M2IzZDAtMTg1YmEyZDEtNDAxZGI0Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS finished with status: SUCCESS 2025-06-30T09:22:04.667513Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719523. Ctx: { TraceId: 01jz029h7r92r8qwm8ntrn2nj9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTE4MGMwNTktYjA5M2IzZDAtMTg1YmEyZDEtNDAxZGI0Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS >> KqpScan::MultipleResults [GOOD] >> KqpScan::MiltiExprWithPure >> KqpScan::AggregateWithFunction [GOOD] >> KqpScan::CountDistinct >> KqpScan::UnionWithPureExpr [GOOD] >> TConsoleTests::TestRemoveAttributesExtSubdomain [GOOD] >> KqpScan::YqlTableSample >> TImportWithRebootsTests::ShouldSucceedOnTableWithChecksum [GOOD] >> TImportWithRebootsTests::ShouldSucceedOnSingleView >> KqpScan::DecimalColumn >> KqpScan::UnionBasic >> YdbIndexTable::MultiShardTableOneUniqIndex [GOOD] >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn >> KqpScan::AggregateByColumn >> KqpScan::TaggedScalar [GOOD] >> KqpScan::TooManyComputeActors >> KqpScan::SecondaryIndex [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::ConsistentWritesWhenSwitchingToDonorMode [GOOD] Test command err: RandomSeed# 15712138948003526587 Reassign# 1 -- VSlotId { NodeId: 2 PDiskId: 1000 VSlotId: 1000 } GroupId: 2181038080 GroupGeneration: 1 VDiskKind: "Default" FailDomainIdx: 1 VDiskMetrics { SatisfactionRank: 0 VSlotId { NodeId: 2 PDiskId: 1000 VSlotId: 1000 } State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 1000 } Status: "READY" Ready: true Put# [1:1:1:0:0:47:0] Put# [1:1:2:0:0:14:0] 2025-06-30T09:19:03.139699Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:19:03.140133Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 18255868181687490554] 2025-06-30T09:19:03.141294Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:1:0:0:47:5] 2025-06-30T09:19:03.141309Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:2:0:0:14:6] 2025-06-30T09:19:03.141361Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 2 PartsResurrected# 2 Put# [1:1:3:0:0:4:0] Put# [1:1:4:0:0:8:0] Put# [1:1:5:0:0:29:0] Put# [1:1:6:0:0:64:0] Put# [1:1:7:0:0:89:0] Put# [1:1:8:0:0:62:0] Put# [1:1:9:0:0:72:0] Put# [1:1:10:0:0:44:0] Put# [1:1:11:0:0:61:0] Put# [1:1:12:0:0:32:0] Put# [1:1:13:0:0:40:0] Put# [1:1:14:0:0:74:0] Put# [1:1:15:0:0:4:0] Put# [1:1:16:0:0:15:0] Put# [1:1:17:0:0:13:0] Put# [1:1:18:0:0:94:0] Put# [1:1:19:0:0:4:0] Put# [1:1:20:0:0:49:0] Put# [1:1:21:0:0:91:0] Put# [1:1:22:0:0:93:0] Put# [1:1:23:0:0:69:0] Put# [1:1:24:0:0:53:0] Put# [1:1:25:0:0:72:0] Put# [1:1:26:0:0:28:0] Put# [1:1:27:0:0:56:0] Put# [1:1:28:0:0:13:0] Put# [1:1:29:0:0:70:0] Put# [1:1:30:0:0:42:0] Put# [1:1:31:0:0:70:0] Put# [1:1:32:0:0:61:0] Put# [1:1:33:0:0:60:0] Put# [1:1:34:0:0:36:0] Put# [1:1:35:0:0:75:0] Put# [1:1:36:0:0:87:0] Put# [1:1:37:0:0:14:0] Put# [1:1:38:0:0:9:0] Put# [1:1:39:0:0:40:0] Put# [1:1:40:0:0:49:0] Put# [1:1:41:0:0:52:0] Put# [1:1:42:0:0:5:0] Put# [1:1:43:0:0:32:0] Put# [1:1:44:0:0:58:0] Put# [1:1:45:0:0:18:0] Put# [1:1:46:0:0:85:0] Put# [1:1:47:0:0:15:0] Put# [1:1:48:0:0:8:0] Put# [1:1:49:0:0:93:0] Put# [1:1:50:0:0:59:0] Put# [1:1:51:0:0:40:0] Put# [1:1:52:0:0:80:0] Put# [1:1:53:0:0:3:0] Put# [1:1:54:0:0:47:0] Put# [1:1:55:0:0:82:0] Put# [1:1:56:0:0:15:0] Put# [1:1:57:0:0:72:0] Put# [1:1:58:0:0:94:0] Put# [1:1:59:0:0:74:0] Put# [1:1:60:0:0:50:0] Put# [1:1:61:0:0:40:0] Put# [1:1:62:0:0:85:0] Put# [1:1:63:0:0:27:0] Put# [1:1:64:0:0:57:0] Put# [1:1:65:0:0:50:0] Put# [1:1:66:0:0:4:0] Put# [1:1:67:0:0:73:0] Put# [1:1:68:0:0:65:0] Put# [1:1:69:0:0:1:0] Put# [1:1:70:0:0:9:0] Put# [1:1:71:0:0:24:0] Put# [1:1:72:0:0:70:0] Put# [1:1:73:0:0:52:0] Put# [1:1:74:0:0:32:0] Put# [1:1:75:0:0:80:0] Put# [1:1:76:0:0:97:0] Put# [1:1:77:0:0:99:0] Put# [1:1:78:0:0:84:0] Put# [1:1:79:0:0:29:0] Put# [1:1:80:0:0:9:0] Put# [1:1:81:0:0:32:0] Put# [1:1:82:0:0:83:0] Put# [1:1:83:0:0:9:0] Put# [1:1:84:0:0:5:0] Put# [1:1:85:0:0:13:0] Put# [1:1:86:0:0:86:0] Put# [1:1:87:0:0:30:0] Put# [1:1:88:0:0:27:0] Put# [1:1:89:0:0:40:0] Put# [1:1:90:0:0:99:0] Put# [1:1:91:0:0:70:0] Put# [1:1:92:0:0:82:0] Put# [1:1:93:0:0:15:0] Put# [1:1:94:0:0:76:0] Put# [1:1:95:0:0:34:0] Put# [1:1:96:0:0:53:0] Put# [1:1:97:0:0:58:0] Put# [1:1:98:0:0:78:0] Put# [1:1:99:0:0:34:0] Put# [1:1:100:0:0:53:0] Put# [1:1:101:0:0:37:0] Put# [1:1:102:0:0:77:0] Put# [1:1:103:0:0:25:0] Put# [1:1:104:0:0:71:0] Put# [1:1:105:0:0:4:0] Put# [1:1:106:0:0:4:0] Put# [1:1:107:0:0:82:0] Put# [1:1:108:0:0:59:0] Put# [1:1:109:0:0:57:0] Put# [1:1:110:0:0:99:0] Put# [1:1:111:0:0:39:0] Put# [1:1:112:0:0:83:0] Put# [1:1:113:0:0:71:0] Put# [1:1:114:0:0:88:0] Put# [1:1:115:0:0:88:0] Put# [1:1:116:0:0:81:0] Put# [1:1:117:0:0:90:0] Put# [1:1:118:0:0:11:0] Put# [1:1:119:0:0:7:0] Put# [1:1:120:0:0:11:0] Put# [1:1:121:0:0:60:0] Put# [1:1:122:0:0:40:0] Put# [1:1:123:0:0:83:0] Put# [1:1:124:0:0:27:0] Put# [1:1:125:0:0:67:0] Put# [1:1:126:0:0:92:0] Put# [1:1:127:0:0:14:0] Put# [1:1:128:0:0:53:0] Put# [1:1:129:0:0:82:0] Put# [1:1:130:0:0:71:0] Put# [1:1:131:0:0:100:0] Put# [1:1:132:0:0:33:0] Put# [1:1:133:0:0:57:0] Put# [1:1:134:0:0:53:0] Put# [1:1:135:0:0:27:0] Put# [1:1:136:0:0:40:0] Put# [1:1:137:0:0:87:0] Put# [1:1:138:0:0:83:0] Put# [1:1:139:0:0:22:0] Put# [1:1:140:0:0:96:0] Put# [1:1:141:0:0:59:0] Put# [1:1:142:0:0:73:0] Put# [1:1:143:0:0:66:0] Put# [1:1:144:0:0:90:0] Put# [1:1:145:0:0:47:0] Put# [1:1:146:0:0:98:0] Put# [1:1:147:0:0:54:0] Put# [1:1:148:0:0:63:0] Put# [1:1:149:0:0:13:0] Put# [1:1:150:0:0:37:0] Put# [1:1:151:0:0:78:0] Put# [1:1:152:0:0:26:0] Put# [1:1:153:0:0:54:0] Put# [1:1:154:0:0:24:0] Put# [1:1:155:0:0:76:0] Put# [1:1:156:0:0:34:0] Put# [1:1:157:0:0:28:0] Put# [1:1:158:0:0:9:0] Put# [1:1:159:0:0:60:0] Put# [1:1:160:0:0:5:0] Put# [1:1:161:0:0:26:0] Put# [1:1:162:0:0:96:0] Put# [1:1:163:0:0:73:0] Put# [1:1:164:0:0:81:0] Put# [1:1:165:0:0:78:0] Put# [1:1:166:0:0:31:0] Put# [1:1:167:0:0:31:0] Put# [1:1:168:0:0:84:0] Put# [1:1:169:0:0:78:0] Put# [1:1:170:0:0:3:0] Put# [1:1:171:0:0:52:0] Put# [1:1:172:0:0:16:0] Put# [1:1:173:0:0:59:0] Put# [1:1:174:0:0:14:0] Put# [1:1:175:0:0:66:0] Put# [1:1:176:0:0:39:0] Put# [1:1:177:0:0:21:0] Put# [1:1:178:0:0:11:0] Put# [1:1:179:0:0:34:0] Put# [1:1:180:0:0:74:0] Put# [1:1:181:0:0:90:0] Put# [1:1:182:0:0:43:0] Put# [1:1:183:0:0:46:0] Put# [1:1:184:0:0:33:0] Put# [1:1:185:0:0:47:0] Put# [1:1:186:0:0:25:0] Put# [1:1:187:0:0:24:0] Put# [1:1:188:0:0:85:0] Put# [1:1:189:0:0:13:0] Put# [1:1:190:0:0:3:0] Put# [1:1:191:0:0:86:0] Put# [1:1:192:0:0:70:0] Put# [1:1:193:0:0:52:0] Put# [1:1:194:0:0:56:0] Put# [1:1:195:0:0:45:0] Put# [1:1:196:0:0:8:0] Put# [1:1:197:0:0:86:0] Put# [1:1:198:0:0:8:0] Put# [1:1:199:0:0:70:0] Put# [1:1:200:0:0:74:0] Put# [1:1:201:0:0:89:0] Put# [1:1:202:0:0:20:0] Put# [1:1:203:0:0:93:0] Put# [1:1:204:0:0:69:0] Put# [1:1:205:0:0:2:0] Put# [1:1:206:0:0:92:0] Put# [1:1:207:0:0:17:0] Put# [1:1:208:0:0:2:0] Put# [1:1:209:0:0:65:0] Put# [1:1:210:0:0:59:0] Put# [1:1:211:0:0:63:0] Put# [1:1:212:0:0:40:0] Put# [1:1:213:0:0:93:0] Put# [1:1:214:0:0:13:0] Put# [1:1:215:0:0:16:0] Put# [1:1:216:0:0:19:0] Put# [1:1:217:0:0:60:0] Put# [1:1:218:0:0:5:0] Put# [1:1:219:0:0:90:0] Put# [1:1:220:0:0:1:0] Put# [1:1:221:0:0:53:0] Put# [1:1:222:0:0:2:0] Put# [1:1:223:0:0:8:0] Put# [1:1:224:0:0:75:0] Put# [1:1:225:0:0:68:0] Put# [1:1:226:0:0:97:0] Put# [1:1:227:0:0:20:0] Put# [1:1:228:0:0:96:0] Put# [1:1:229:0:0:10:0] Put# [1:1:230:0:0:95:0] Put# [1:1:231:0:0:4:0] Put# [1:1:232:0:0:11:0] Put# [1:1:233:0:0:23:0] Put# [1:1:234:0:0:78:0] Put# [1:1:235:0:0:75:0] Put# [1:1:236:0:0:58:0] Put# [1:1:237:0:0:10:0] Put# [1:1:238:0:0:25:0] Put# [1:1:239:0:0:27:0] Put# [1:1:240:0:0:33:0] Put# [1:1:241:0:0:1:0] Put# [1:1:242:0:0:82:0] Put# [1:1:243:0:0:81:0] Put# [1:1:244:0:0:47:0] Put# [1:1:245:0:0:34:0] Put# [1:1:246:0:0:63:0] Put# [1:1:247:0:0:59:0] Put# [1:1:248:0:0:18:0] Put# [1:1:249:0:0:14:0] Put# [1:1:250:0:0:44:0] Put# [1:1:251:0:0:10:0] Put# [1:1:252:0:0:90:0] Put# [1:1:253:0:0:58:0] Put# [1:1:254:0:0:75:0] Put# [1:1:255:0:0:16:0] Put# [1:1:256:0:0:97:0] Put# [1:1:257:0:0:79:0] Put# [1:1:258:0:0:68:0] Put# [1:1:259:0:0:98:0] Put# [1:1:260:0:0:83:0] Put# [1:1:261:0:0:49:0] Put# [1:1:262:0:0:55:0] Put# [1:1:263:0:0:15:0] Put# [1:1:264:0:0:27:0] Put# [1:1:265:0:0:33:0] Put# [1:1:266:0:0:4:0] Put# [1:1:267:0:0:59:0] Put# [1:1:268:0:0:13:0] Put# [1:1:269:0:0:81:0] Put# [1:1:270:0:0:64:0] Put# [1:1:271:0:0:50:0] Put# [1:1:272:0:0:77:0] Put# [1:1:273:0:0:8:0] Put# [1:1:274:0:0:48:0] Put# [1:1:275:0:0:86:0] Put# [1:1:276:0:0:80:0] Put# [1:1:277:0:0:92:0] Put# [1:1:278:0:0:84:0] Put# [1:1:279:0:0:33:0] Put# [1:1:280:0:0:36:0] Put# [1:1:281:0:0:76:0] Put# [1:1:282:0:0:50:0] Put# [1:1:283:0:0:17:0] Put# [1:1:284:0:0:11:0] Put# [1:1:285:0:0:92:0] Put# [1:1:286:0:0:3:0] Put# [1:1:287:0:0:23:0] Put# [1:1:288:0:0:46:0] Put# [1:1:289:0:0:66:0] Put# [1:1:290:0:0:71:0] Put# [1:1:291:0:0:51:0] Put# [1:1:292:0:0:61:0] Put# [1:1:293:0:0:68:0] Put# [1:1:294:0:0:92:0] Put# [1:1:295:0:0:80:0] Put# [1:1:296:0:0:33:0] Put# [1:1:297:0:0:96:0] Put# [1:1:298:0:0:92:0] Put# [1:1:299:0:0:13:0] Put# [1:1:300:0:0:42:0] Put# [1:1:301:0:0:88:0] Put# [1:1:302:0:0:81:0] Put# [1:1:303:0:0:63:0] Put# [1:1:304:0:0:39:0] Put# [1:1:305:0:0:16:0] Put# [1:1:306:0:0:11:0] Put# [1:1:307:0:0:23:0] Put# [1:1:308:0:0:92:0] Put# [1:1:309:0:0:59:0] Put# [1:1:310:0:0:60:0] Put# [1:1:311:0:0:26:0] Put# [1:1:312:0:0:33:0] Put# [1:1:313:0:0:7:0] Put# [1:1:314:0:0:59:0] Put# [1:1:315:0:0:12:0] Put# [1:1:316:0:0:59:0] Put# [1:1:317:0:0:41:0] Put# [1:1:318:0:0:33:0] Put# [1:1:319:0:0:90:0] Put# [1:1:320:0:0:44:0] Put# [1:1:321:0:0:84:0] Put# [1:1:322:0:0:36:0] Put# [1:1:323:0:0:43:0] Put# [1:1:324:0:0:2:0] Put# [1:1:325:0:0:12:0] Put# [1:1:326:0:0:23:0] Put# [1:1:327:0:0:75:0] Put# [1:1:328:0:0:63:0] Put# [1:1:329:0:0:62:0] Put# [1:1:330:0:0:91:0] Put# [1:1:331:0:0:1:0] Put# [1:1:332:0:0:57:0] Put# [1:1:333:0:0:7:0] Put# [1:1:334:0:0:2:0] Put# [1:1:335:0:0:71:0] Put# [1:1:336:0:0:46:0] Put# [1:1:337:0:0:77:0] Put# [1:1:338:0:0:3:0] Put# [1:1:339:0:0:25:0] Put# [1:1:340:0:0:73:0] Put# [1:1:341:0:0:93:0] Put# [1:1:342:0:0:69:0] Put# [1:1:343:0:0:86:0] Put# [1:1:344:0:0:95:0] Put# [1:1:345:0:0:99:0] Put# [1:1:346:0:0:15:0] Put# [1:1:347:0:0:29:0] Put# [1:1:348:0:0:100:0] Put# [1:1:349:0:0:50:0] Put# [1:1:350:0:0:19:0] Put# [1:1:351:0:0:38:0] Put# [1:1:352:0:0:74:0] Put# [1:1:353:0:0:29:0] Put# [1:1:354:0:0:71:0] Put# [1:1:355:0:0:89:0] Put# [1:1:356:0:0:61:0] Put# [1:1:357:0:0:87:0] Put# [1:1:358:0:0:62:0] Put# [1:1:359:0:0:52:0] Put# [1:1:360:0:0:100:0] Put# [1:1:361:0:0:24:0] Put# [1:1:362:0:0:78:0] Put# [1:1:363:0:0:77:0] Put# [1:1:364:0:0:94:0] Put# [1:1:365:0:0:96:0] Put# [1:1:366:0:0:78:0] Put# [1:1:367:0:0:57:0] Put# [1:1:368:0:0:82:0] Put# [1:1:369:0:0:43:0] Put# [1:1:370:0:0:4:0] Put# [1:1:371:0:0:89:0] Put# [1:1:372:0:0:91:0] Put# [1:1:373:0:0:79:0] Put# [1:1:374:0:0:99:0] Put# [1:1:375:0:0:78:0] Put# [1:1:376:0:0:54:0] Put# [1:1:377:0:0:15:0] Put# [1:1:378:0:0:82:0] Put# [1:1:379:0:0:57:0] Put# [1:1:380:0:0:65:0] Put# [1:1:381:0:0:32:0] Put# [1:1:382:0:0:8:0] Put# [1:1:383:0:0:3:0] Put# [1:1:384:0:0:79:0] Put# [1:1:385:0:0:67:0] Put# [1:1:386:0:0:74:0] Put# [1:1:387:0:0:91:0] Put# [1:1:388:0:0:38:0] Put# [1:1:389:0:0:27:0] Put# [1:1:390:0:0:10:0] Put# [1:1:391:0:0:55:0] Put# [1:1:392:0:0:40:0] Put# [1:1:393:0:0:83:0] Put# [1:1:394:0:0:53:0] Put# [1:1:395:0:0:25:0] Put# [1:1:396:0:0:67:0] Put# [1:1:397:0:0:27:0] Put# [1:1:398:0:0:66:0] Put# [1:1:399:0:0:77:0] Put# [1:1:400:0:0:13:0] Put# [1:1:401:0:0:87:0] Put# [1:1:402:0:0:95:0] Put# [1:1:403:0:0:49:0] Put# [1:1:404:0:0:66:0] Put# [1:1:405:0:0:38:0] Put# [1:1:406:0:0:15:0] Put# [1:1:407:0:0:22:0] Put# [1:1:408:0:0:29:0] Put# [1:1:409:0:0:73:0] Put# [1:1:410:0:0:57:0] Put# [1:1:411:0:0:82:0] Put# [1:1:412:0:0:55:0] Put# [1:1:413:0:0:85:0] Put# [1:1:414:0:0:84:0] Put# [1:1:415:0:0:83:0] Put# [1:1:416:0:0:89:0] Put# [1:1:417:0:0:68:0] Put# [1:1:418:0:0:69:0] Put# [1:1:419:0:0:55:0] Put# [1:1:420:0:0:25:0] Put# [1:1:421:0:0:91:0] Put# [1:1:422:0:0:6:0] Put# [1:1:423:0:0:78:0] Put# [1:1:424:0:0:54:0] Put# [1:1:425:0:0:13:0] Put# [1:1:426:0:0:68:0] Put# [1:1:427:0:0:2:0] Put# [1:1:428:0:0:36:0] Put# [1:1:429:0:0:93:0] Put# [1:1:430:0:0:83:0] Put# [1:1:431:0:0:36:0] Put# [1:1:432:0:0:48:0] Put# [1:1:433:0:0:52:0] Put# [1:1:434:0:0:18:0] Put# [1:1:435:0:0:37:0] Put# [1:1:436:0:0:13:0] Put# [1:1:437:0:0:33:0] Put# [1:1:438:0:0:50:0] Put# [1:1:439:0:0:7:0] Put# [1:1:440:0:0:17:0] Put# [1:1:441:0:0:52:0] Put# [1:1:442:0:0:47:0] Put# [1:1:443:0:0:11:0] Put# [1:1:444:0:0:22:0] Put# [1:1:445:0:0:17:0] Put# [1:1:446:0:0:85:0] Put# [1:1:447:0:0:53:0] Put# [1:1:448:0:0:60:0] Put# [1:1:449:0:0:72:0] Put# [1:1:450:0:0:28:0] Put# [1:1:451:0:0:8:0] Put# [1:1:452:0:0:42:0] Put# [1:1:453:0:0:7 ... 1:0] Put# [1:21:6668:0:0:70:0] Put# [1:21:6669:0:0:53:0] Put# [1:21:6670:0:0:37:0] Put# [1:21:6671:0:0:13:0] Put# [1:21:6672:0:0:72:0] Put# [1:21:6673:0:0:90:0] Put# [1:21:6674:0:0:93:0] Put# [1:21:6675:0:0:74:0] Put# [1:21:6676:0:0:11:0] Put# [1:21:6677:0:0:35:0] Put# [1:21:6678:0:0:87:0] Put# [1:21:6679:0:0:16:0] Put# [1:21:6680:0:0:33:0] Put# [1:21:6681:0:0:69:0] Put# [1:21:6682:0:0:57:0] Put# [1:21:6683:0:0:11:0] Put# [1:21:6684:0:0:68:0] Put# [1:21:6685:0:0:4:0] Put# [1:21:6686:0:0:84:0] Put# [1:21:6687:0:0:96:0] Put# [1:21:6688:0:0:40:0] Put# [1:21:6689:0:0:26:0] Put# [1:21:6690:0:0:28:0] Put# [1:21:6691:0:0:64:0] Put# [1:21:6692:0:0:47:0] Put# [1:21:6693:0:0:99:0] Put# [1:21:6694:0:0:21:0] Put# [1:21:6695:0:0:12:0] Put# [1:21:6696:0:0:57:0] Put# [1:21:6697:0:0:24:0] Put# [1:21:6698:0:0:33:0] Put# [1:21:6699:0:0:48:0] Put# [1:21:6700:0:0:46:0] Put# [1:21:6701:0:0:79:0] Put# [1:21:6702:0:0:2:0] Put# [1:21:6703:0:0:13:0] Put# [1:21:6704:0:0:10:0] Put# [1:21:6705:0:0:15:0] Put# [1:21:6706:0:0:34:0] Put# [1:21:6707:0:0:94:0] Put# [1:21:6708:0:0:32:0] Put# [1:21:6709:0:0:59:0] Put# [1:21:6710:0:0:64:0] Put# [1:21:6711:0:0:5:0] Put# [1:21:6712:0:0:49:0] Put# [1:21:6713:0:0:93:0] Put# [1:21:6714:0:0:93:0] Put# [1:21:6715:0:0:96:0] Put# [1:21:6716:0:0:98:0] Put# [1:21:6717:0:0:87:0] Put# [1:21:6718:0:0:60:0] Put# [1:21:6719:0:0:50:0] Put# [1:21:6720:0:0:33:0] Put# [1:21:6721:0:0:86:0] Put# [1:21:6722:0:0:12:0] Put# [1:21:6723:0:0:57:0] Put# [1:21:6724:0:0:26:0] Put# [1:21:6725:0:0:56:0] Put# [1:21:6726:0:0:48:0] Put# [1:21:6727:0:0:7:0] Put# [1:21:6728:0:0:2:0] Put# [1:21:6729:0:0:52:0] Put# [1:21:6730:0:0:35:0] Put# [1:21:6731:0:0:76:0] Put# [1:21:6732:0:0:9:0] Put# [1:21:6733:0:0:71:0] Put# [1:21:6734:0:0:59:0] Put# [1:21:6735:0:0:18:0] Put# [1:21:6736:0:0:3:0] Put# [1:21:6737:0:0:30:0] Put# [1:21:6738:0:0:58:0] Put# [1:21:6739:0:0:5:0] Put# [1:21:6740:0:0:30:0] Put# [1:21:6741:0:0:28:0] Put# [1:21:6742:0:0:53:0] Put# [1:21:6743:0:0:71:0] Put# [1:21:6744:0:0:29:0] Put# [1:21:6745:0:0:41:0] Put# [1:21:6746:0:0:67:0] Put# [1:21:6747:0:0:73:0] Put# [1:21:6748:0:0:89:0] Put# [1:21:6749:0:0:28:0] Put# [1:21:6750:0:0:59:0] Put# [1:21:6751:0:0:44:0] Put# [1:21:6752:0:0:21:0] Put# [1:21:6753:0:0:39:0] Put# [1:21:6754:0:0:78:0] Put# [1:21:6755:0:0:28:0] Put# [1:21:6756:0:0:97:0] Put# [1:21:6757:0:0:54:0] Put# [1:21:6758:0:0:49:0] Put# [1:21:6759:0:0:59:0] Put# [1:21:6760:0:0:43:0] Put# [1:21:6761:0:0:7:0] Put# [1:21:6762:0:0:9:0] Put# [1:21:6763:0:0:25:0] Put# [1:21:6764:0:0:100:0] Put# [1:21:6765:0:0:23:0] Put# [1:21:6766:0:0:90:0] Put# [1:21:6767:0:0:50:0] Put# [1:21:6768:0:0:37:0] Put# [1:21:6769:0:0:90:0] Put# [1:21:6770:0:0:70:0] Put# [1:21:6771:0:0:41:0] Put# [1:21:6772:0:0:6:0] Put# [1:21:6773:0:0:43:0] Put# [1:21:6774:0:0:67:0] Put# [1:21:6775:0:0:77:0] Put# [1:21:6776:0:0:91:0] Put# [1:21:6777:0:0:32:0] Put# [1:21:6778:0:0:59:0] Put# [1:21:6779:0:0:82:0] Put# [1:21:6780:0:0:37:0] Put# [1:21:6781:0:0:98:0] Put# [1:21:6782:0:0:73:0] Put# [1:21:6783:0:0:52:0] Put# [1:21:6784:0:0:19:0] Put# [1:21:6785:0:0:90:0] Put# [1:21:6786:0:0:64:0] Put# [1:21:6787:0:0:86:0] Put# [1:21:6788:0:0:19:0] Put# [1:21:6789:0:0:100:0] Put# [1:21:6790:0:0:3:0] Put# [1:21:6791:0:0:41:0] Put# [1:21:6792:0:0:59:0] Put# [1:21:6793:0:0:83:0] Put# [1:21:6794:0:0:30:0] Put# [1:21:6795:0:0:47:0] Put# [1:21:6796:0:0:84:0] Put# [1:21:6797:0:0:76:0] Put# [1:21:6798:0:0:9:0] Put# [1:21:6799:0:0:21:0] Put# [1:21:6800:0:0:33:0] Put# [1:21:6801:0:0:56:0] Put# [1:21:6802:0:0:46:0] Put# [1:21:6803:0:0:68:0] Put# [1:21:6804:0:0:59:0] Put# [1:21:6805:0:0:3:0] Put# [1:21:6806:0:0:71:0] Put# [1:21:6807:0:0:52:0] Put# [1:21:6808:0:0:13:0] Put# [1:21:6809:0:0:75:0] Put# [1:21:6810:0:0:62:0] Put# [1:21:6811:0:0:14:0] Put# [1:21:6812:0:0:91:0] Put# [1:21:6813:0:0:19:0] Put# [1:21:6814:0:0:79:0] Put# [1:21:6815:0:0:7:0] Put# [1:21:6816:0:0:60:0] Put# [1:21:6817:0:0:85:0] Put# [1:21:6818:0:0:64:0] Put# [1:21:6819:0:0:65:0] Put# [1:21:6820:0:0:21:0] Put# [1:21:6821:0:0:81:0] Put# [1:21:6822:0:0:97:0] Put# [1:21:6823:0:0:64:0] Put# [1:21:6824:0:0:79:0] Put# [1:21:6825:0:0:77:0] Put# [1:21:6826:0:0:7:0] Put# [1:21:6827:0:0:31:0] Put# [1:21:6828:0:0:50:0] Put# [1:21:6829:0:0:80:0] Put# [1:21:6830:0:0:79:0] Put# [1:21:6831:0:0:84:0] Put# [1:21:6832:0:0:4:0] Put# [1:21:6833:0:0:28:0] Put# [1:21:6834:0:0:4:0] Put# [1:21:6835:0:0:56:0] Put# [1:21:6836:0:0:31:0] Put# [1:21:6837:0:0:54:0] Put# [1:21:6838:0:0:13:0] Put# [1:21:6839:0:0:98:0] Put# [1:21:6840:0:0:4:0] Put# [1:21:6841:0:0:73:0] Put# [1:21:6842:0:0:79:0] Put# [1:21:6843:0:0:66:0] Put# [1:21:6844:0:0:1:0] Put# [1:21:6845:0:0:50:0] Put# [1:21:6846:0:0:55:0] Put# [1:21:6847:0:0:15:0] Put# [1:21:6848:0:0:92:0] Put# [1:21:6849:0:0:30:0] Put# [1:21:6850:0:0:44:0] Put# [1:21:6851:0:0:79:0] Put# [1:21:6852:0:0:75:0] Put# [1:21:6853:0:0:87:0] Put# [1:21:6854:0:0:28:0] Put# [1:21:6855:0:0:98:0] Put# [1:21:6856:0:0:80:0] Put# [1:21:6857:0:0:57:0] Put# [1:21:6858:0:0:65:0] Put# [1:21:6859:0:0:57:0] Put# [1:21:6860:0:0:51:0] Put# [1:21:6861:0:0:58:0] Put# [1:21:6862:0:0:100:0] Put# [1:21:6863:0:0:18:0] Put# [1:21:6864:0:0:16:0] Put# [1:21:6865:0:0:4:0] Put# [1:21:6866:0:0:80:0] Put# [1:21:6867:0:0:40:0] Put# [1:21:6868:0:0:56:0] Put# [1:21:6869:0:0:48:0] Put# [1:21:6870:0:0:4:0] Put# [1:21:6871:0:0:29:0] Put# [1:21:6872:0:0:90:0] Put# [1:21:6873:0:0:23:0] Put# [1:21:6874:0:0:62:0] Put# [1:21:6875:0:0:55:0] Put# [1:21:6876:0:0:100:0] Put# [1:21:6877:0:0:1:0] Put# [1:21:6878:0:0:88:0] Put# [1:21:6879:0:0:67:0] Put# [1:21:6880:0:0:66:0] Put# [1:21:6881:0:0:93:0] Put# [1:21:6882:0:0:57:0] Put# [1:21:6883:0:0:19:0] Put# [1:21:6884:0:0:63:0] Put# [1:21:6885:0:0:2:0] Put# [1:21:6886:0:0:58:0] Put# [1:21:6887:0:0:6:0] Put# [1:21:6888:0:0:20:0] Put# [1:21:6889:0:0:44:0] Put# [1:21:6890:0:0:15:0] Put# [1:21:6891:0:0:81:0] Put# [1:21:6892:0:0:50:0] Put# [1:21:6893:0:0:52:0] Put# [1:21:6894:0:0:22:0] Put# [1:21:6895:0:0:8:0] Put# [1:21:6896:0:0:33:0] Put# [1:21:6897:0:0:29:0] Put# [1:21:6898:0:0:51:0] Put# [1:21:6899:0:0:10:0] Put# [1:21:6900:0:0:54:0] Put# [1:21:6901:0:0:83:0] Put# [1:21:6902:0:0:20:0] Put# [1:21:6903:0:0:80:0] Put# [1:21:6904:0:0:48:0] Put# [1:21:6905:0:0:58:0] Put# [1:21:6906:0:0:14:0] Put# [1:21:6907:0:0:55:0] Put# [1:21:6908:0:0:70:0] Put# [1:21:6909:0:0:90:0] Put# [1:21:6910:0:0:54:0] Put# [1:21:6911:0:0:23:0] Put# [1:21:6912:0:0:66:0] Put# [1:21:6913:0:0:9:0] Put# [1:21:6914:0:0:89:0] Put# [1:21:6915:0:0:35:0] Put# [1:21:6916:0:0:78:0] Put# [1:21:6917:0:0:63:0] Put# [1:21:6918:0:0:47:0] Put# [1:21:6919:0:0:44:0] Put# [1:21:6920:0:0:19:0] Put# [1:21:6921:0:0:5:0] Put# [1:21:6922:0:0:79:0] Put# [1:21:6923:0:0:22:0] Put# [1:21:6924:0:0:94:0] Put# [1:21:6925:0:0:12:0] Put# [1:21:6926:0:0:84:0] Put# [1:21:6927:0:0:81:0] Put# [1:21:6928:0:0:23:0] Put# [1:21:6929:0:0:48:0] Put# [1:21:6930:0:0:46:0] Put# [1:21:6931:0:0:29:0] Put# [1:21:6932:0:0:68:0] Put# [1:21:6933:0:0:88:0] Put# [1:21:6934:0:0:48:0] Put# [1:21:6935:0:0:81:0] Put# [1:21:6936:0:0:6:0] Put# [1:21:6937:0:0:31:0] Put# [1:21:6938:0:0:78:0] Put# [1:21:6939:0:0:70:0] Put# [1:21:6940:0:0:80:0] Put# [1:21:6941:0:0:36:0] Put# [1:21:6942:0:0:17:0] Put# [1:21:6943:0:0:31:0] Put# [1:21:6944:0:0:29:0] Put# [1:21:6945:0:0:52:0] Put# [1:21:6946:0:0:100:0] Put# [1:21:6947:0:0:99:0] Put# [1:21:6948:0:0:27:0] Put# [1:21:6949:0:0:64:0] Put# [1:21:6950:0:0:62:0] Put# [1:21:6951:0:0:18:0] Put# [1:21:6952:0:0:54:0] Put# [1:21:6953:0:0:7:0] Put# [1:21:6954:0:0:19:0] Put# [1:21:6955:0:0:95:0] Put# [1:21:6956:0:0:17:0] Put# [1:21:6957:0:0:6:0] Put# [1:21:6958:0:0:19:0] Put# [1:21:6959:0:0:33:0] Put# [1:21:6960:0:0:31:0] Put# [1:21:6961:0:0:16:0] Put# [1:21:6962:0:0:94:0] Put# [1:21:6963:0:0:19:0] Put# [1:21:6964:0:0:11:0] Put# [1:21:6965:0:0:29:0] Put# [1:21:6966:0:0:74:0] Put# [1:21:6967:0:0:7:0] Put# [1:21:6968:0:0:48:0] Put# [1:21:6969:0:0:5:0] Put# [1:21:6970:0:0:26:0] Put# [1:21:6971:0:0:97:0] Put# [1:21:6972:0:0:72:0] Put# [1:21:6973:0:0:5:0] Put# [1:21:6974:0:0:18:0] Put# [1:21:6975:0:0:99:0] Put# [1:21:6976:0:0:95:0] Put# [1:21:6977:0:0:94:0] Put# [1:21:6978:0:0:87:0] Put# [1:21:6979:0:0:80:0] Put# [1:21:6980:0:0:81:0] Put# [1:21:6981:0:0:14:0] Put# [1:21:6982:0:0:92:0] Put# [1:21:6983:0:0:78:0] Put# [1:21:6984:0:0:46:0] Put# [1:21:6985:0:0:55:0] Put# [1:21:6986:0:0:12:0] Put# [1:21:6987:0:0:98:0] Put# [1:21:6988:0:0:88:0] Put# [1:21:6989:0:0:45:0] Put# [1:21:6990:0:0:4:0] Put# [1:21:6991:0:0:59:0] Put# [1:21:6992:0:0:82:0] Put# [1:21:6993:0:0:68:0] Put# [1:21:6994:0:0:81:0] Put# [1:21:6995:0:0:65:0] Put# [1:21:6996:0:0:71:0] Put# [1:21:6997:0:0:51:0] Put# [1:21:6998:0:0:10:0] Put# [1:21:6999:0:0:94:0] Put# [1:21:7000:0:0:31:0] Put# [1:21:7001:0:0:40:0] Put# [1:21:7002:0:0:93:0] Put# [1:21:7003:0:0:5:0] Put# [1:21:7004:0:0:30:0] Put# [1:21:7005:0:0:28:0] Put# [1:21:7006:0:0:18:0] Put# [1:21:7007:0:0:91:0] Put# [1:21:7008:0:0:8:0] Put# [1:21:7009:0:0:64:0] Put# [1:21:7010:0:0:71:0] Put# [1:21:7011:0:0:10:0] Put# [1:21:7012:0:0:64:0] Put# [1:21:7013:0:0:50:0] Put# [1:21:7014:0:0:74:0] Put# [1:21:7015:0:0:30:0] Put# [1:21:7016:0:0:6:0] Put# [1:21:7017:0:0:31:0] Put# [1:21:7018:0:0:78:0] Put# [1:21:7019:0:0:10:0] Put# [1:21:7020:0:0:54:0] Put# [1:21:7021:0:0:23:0] Put# [1:21:7022:0:0:84:0] Put# [1:21:7023:0:0:62:0] Put# [1:21:7024:0:0:38:0] Put# [1:21:7025:0:0:99:0] Put# [1:21:7026:0:0:51:0] Put# [1:21:7027:0:0:36:0] Put# [1:21:7028:0:0:33:0] Put# [1:21:7029:0:0:53:0] Put# [1:21:7030:0:0:43:0] Put# [1:21:7031:0:0:39:0] Put# [1:21:7032:0:0:98:0] Put# [1:21:7033:0:0:55:0] Put# [1:21:7034:0:0:99:0] Put# [1:21:7035:0:0:65:0] Put# [1:21:7036:0:0:30:0] Put# [1:21:7037:0:0:4:0] Put# [1:21:7038:0:0:34:0] Put# [1:21:7039:0:0:76:0] Put# [1:21:7040:0:0:76:0] Put# [1:21:7041:0:0:96:0] Put# [1:21:7042:0:0:76:0] Put# [1:21:7043:0:0:8:0] Put# [1:21:7044:0:0:48:0] Put# [1:21:7045:0:0:83:0] Put# [1:21:7046:0:0:34:0] Put# [1:21:7047:0:0:61:0] Put# [1:21:7048:0:0:98:0] Put# [1:21:7049:0:0:10:0] Put# [1:21:7050:0:0:64:0] Put# [1:21:7051:0:0:60:0] Put# [1:21:7052:0:0:70:0] Put# [1:21:7053:0:0:31:0] Put# [1:21:7054:0:0:34:0] Put# [1:21:7055:0:0:67:0] Put# [1:21:7056:0:0:21:0] Put# [1:21:7057:0:0:100:0] Put# [1:21:7058:0:0:49:0] Put# [1:21:7059:0:0:22:0] Put# [1:21:7060:0:0:42:0] Put# [1:21:7061:0:0:54:0] Put# [1:21:7062:0:0:56:0] Put# [1:21:7063:0:0:13:0] Put# [1:21:7064:0:0:36:0] Put# [1:21:7065:0:0:28:0] Put# [1:21:7066:0:0:25:0] Put# [1:21:7067:0:0:9:0] Put# [1:21:7068:0:0:86:0] Put# [1:21:7069:0:0:63:0] Put# [1:21:7070:0:0:71:0] Put# [1:21:7071:0:0:84:0] Put# [1:21:7072:0:0:98:0] Put# [1:21:7073:0:0:77:0] Put# [1:21:7074:0:0:77:0] Put# [1:21:7075:0:0:24:0] Put# [1:21:7076:0:0:39:0] Put# [1:21:7077:0:0:15:0] Put# [1:21:7078:0:0:36:0] Put# [1:21:7079:0:0:1:0] Put# [1:21:7080:0:0:14:0] Put# [1:21:7081:0:0:44:0] Put# [1:21:7082:0:0:52:0] Put# [1:21:7083:0:0:9:0] Put# [1:21:7084:0:0:2:0] Put# [1:21:7085:0:0:51:0] Put# [1:21:7086:0:0:56:0] Put# [1:21:7087:0:0:63:0] Put# [1:21:7088:0:0:18:0] Put# [1:21:7089:0:0:6:0] Put# [1:21:7090:0:0:53:0] Put# [1:21:7091:0:0:31:0] Put# [1:21:7092:0:0:50:0] Put# [1:21:7093:0:0:65:0] Put# [1:21:7094:0:0:17:0] Put# [1:21:7095:0:0:77:0] Put# [1:21:7096:0:0:86:0] Put# [1:21:7097:0:0:78:0] Put# [1:21:7098:0:0:65:0] Put# [1:21:7099:0:0:95:0] Put# [1:21:7100:0:0:64:0] Put# [1:21:7101:0:0:68:0] Put# [1:21:7102:0:0:98:0] Put# [1:21:7103:0:0:40:0] Put# [1:21:7104:0:0:75:0] Put# [1:21:7105:0:0:1:0] Put# [1:21:7106:0:0:6:0] Put# [1:21:7107:0:0:93:0] Put# [1:21:7108:0:0:9:0] Put# [1:21:7109:0:0:48:0] Put# [1:21:7110:0:0:82:0] Put# [1:21:7111:0:0:75:0] Put# [1:21:7112:0:0:28:0] Put# [1:21:7113:0:0:41:0] Put# [1:21:7114:0:0:21:0] Put# [1:21:7115:0:0:33:0] Put# [1:21:7116:0:0:42:0] Put# [1:21:7117:0:0:19:0] Put# [1:21:7118:0:0:19:0] Put# [1:21:7119:0:0:25:0] Put# [1:21:7120:0:0:80:0] Put# [1:21:7121:0:0:42:0] Put# [1:21:7122:0:0:55:0] Put# [1:21:7123:0:0:49:0] Put# [1:21:7124:0:0:14:0] Put# [1:21:7125:0:0:92:0] Put# [1:21:7126:0:0:51:0] Put# [1:21:7127:0:0:34:0] Put# [1:21:7128:0:0:53:0] Put# [1:21:7129:0:0:15:0] Put# [1:21:7130:0:0:6:0] >> KqpScan::RestrictSqlV0 [GOOD] >> KqpScan::JoinSimple >> KqpScan::MiltiExprWithPure [GOOD] >> KqpScan::AggregateNoColumnNoRemaps [GOOD] >> KqpScan::AggregateEmptySum >> StatisticsSaveLoad::Simple [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/console/ut/unittest >> TConsoleTests::TestRemoveAttributesExtSubdomain [GOOD] Test command err: 2025-06-30T09:20:41.320181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:41.320211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:41.320218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:41.320224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:41.320240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:41.320245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:41.320257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:41.320271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:41.320391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:41.320486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:41.325803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:41.325827Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:41.334876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:41.336904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:41.336954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046578944 2025-06-30T09:20:41.340407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:41.340526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:41.340614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.340728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: dc-1, pathId: [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:41.341475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:41.341513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:41.341722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:41.341730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:41.341747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:41.341753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046578944, domainId: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:41.341758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:41.341791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.393942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "hdd" } StoragePools { Name: "" Kind: "hdd-3" } StoragePools { Name: "" Kind: "hdd-1" } StoragePools { Name: "" Kind: "hdd-2" } } } TxId: 1 TabletId: 72057594046578944 , at schemeshard: 72057594046578944 2025-06-30T09:20:41.394009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //dc-1, opId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.394069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 0 2025-06-30T09:20:41.394076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046578944, LocalPathId: 1] source path: 2025-06-30T09:20:41.394119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046578944 2025-06-30T09:20:41.394131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:41.396333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046578944 PathId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.396404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //dc-1 2025-06-30T09:20:41.396478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.396491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046578944 2025-06-30T09:20:41.396497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:41.396503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:41.397324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.397343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046578944 2025-06-30T09:20:41.397349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:41.397826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.397852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.397860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.397867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:41.398455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046578944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:41.399040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046578944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:41.399121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:41.399510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.399520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:20:41.399528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:42.070382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:42.070447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046578944, at schemeshard: 72057594046578944 2025-06-30T09:20:42.070457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:42.070534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:42.070543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:42.070579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 1 2025-06-30T09:20:42.070591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:42.071421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:42.071440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046578944, txId: 1, path id: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:42.07148 ... : [OwnerId: 72057594046578944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046578944, LocalPathId: 3], Generation: 2, ActorId:[163:1609:2638], EffectiveACLVersion: 0, SubdomainVersion: 3, UserAttributesVersion: 3, TenantHive: 18446744073709551615, TenantSysViewProcessor: 72075186233409553, TenantStatisticsAggregator: 72075186233409554, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 3, tenantHive: 18446744073709551615, tenantSysViewProcessor: 72075186233409553, at schemeshard: 72057594046578944 2025-06-30T09:22:05.435670Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72075186233409546, cookie: 0 2025-06-30T09:22:05.436201Z node 163 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [163:1011:2309], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1/users/tenant-1 PathId: [OwnerId: 72075186233409546, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1/users/tenant-1" PathDescription { Self { Name: "dc-1/users/tenant-1" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 3 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 3 ProcessingParams { Version: 3 PlanResolution: 10 Coordinators: 72075186233409547 Coordinators: 72075186233409548 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 Mediators: 72075186233409551 Mediators: 72075186233409552 SchemeShard: 72075186233409546 SysViewProcessor: 72075186233409553 StatisticsAggregator: 72075186233409554 } DomainKey { SchemeShard: 72057594046578944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 9 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046578944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { } SecurityState { Audience: "/dc-1/users/tenant-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "name1" Value: "value1" } } PathId: 1 PathOwnerId: 72075186233409546 } 2025-06-30T09:22:05.436297Z node 163 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [163:1011:2309], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1/users/tenant-1 PathId: [OwnerId: 72075186233409546, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1/users/tenant-1" PathDescription { Self { Name: "dc-1/users/tenant-1" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 3 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 3 ProcessingParams { Version: 3 PlanResolution: 10 Coordinators: 72075186233409547 Coordinators: 72075186233409548 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 Mediators: 72075186233409551 Mediators: 72075186233409552 SchemeShard: 72075186233409546 SysViewProcessor: 72075186233409553 StatisticsAggregator: 72075186233409554 } DomainKey { SchemeShard: 72057594046578944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 9 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046578944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { } SecurityState { Audience: "/dc-1/users/tenant-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "name1" Value: "value1" } } PathId: 1 PathOwnerId: 72075186233409546 }, by path# { Subscriber: { Subscriber: [163:1475:2557] DomainOwnerId: 72057594046578944 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72075186233409546, LocalPathId: 1] DomainId: [OwnerId: 72057594046578944, LocalPathId: 3] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# { Subscriber: { Subscriber: [163:1475:2557] DomainOwnerId: 72057594046578944 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72075186233409546, LocalPathId: 1] DomainId: [OwnerId: 72057594046578944, LocalPathId: 3] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 } 2025-06-30T09:22:05.436989Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046578944, cookie: 281474976715661 2025-06-30T09:22:05.437027Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046578944, LocalPathId: 3], at schemeshard: 72057594046578944 Reply: Status: StatusSuccess Path: "/dc-1/users/tenant-1" PathDescription { Self { Name: "tenant-1" PathId: 3 SchemeshardId: 72057594046578944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976715657 CreateStep: 1000 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 3 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 10 Coordinators: 72075186233409547 Coordinators: 72075186233409548 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 Mediators: 72075186233409551 Mediators: 72075186233409552 SchemeShard: 72075186233409546 SysViewProcessor: 72075186233409553 StatisticsAggregator: 72075186233409554 } DomainKey { SchemeShard: 72057594046578944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 9 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046578944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "name1" Value: "value1" } } PathId: 3 PathOwnerId: 72057594046578944 Reply: Status: StatusSuccess Path: "/dc-1/users/tenant-1" PathDescription { Self { Name: "tenant-1" PathId: 3 SchemeshardId: 72057594046578944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976715657 CreateStep: 1000 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 3 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 10 Coordinators: 72075186233409547 Coordinators: 72075186233409548 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 Mediators: 72075186233409551 Mediators: 72075186233409552 SchemeShard: 72075186233409546 SysViewProcessor: 72075186233409553 StatisticsAggregator: 72075186233409554 } DomainKey { SchemeShard: 72057594046578944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 9 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046578944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "name1" Value: "value1" } } PathId: 3 PathOwnerId: 72057594046578944 >> SelfHeal::TestOneMaintenanceRequestMirror3dc [GOOD] >> SelfHeal::TestOneMaintenanceRequest4Plus2Block ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::Simple [GOOD] Test command err: 2025-06-30T09:22:03.810721Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:420:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:03.810791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:03.810804Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003278/r3tmp/tmptQ8NR8/pdisk_1.dat 2025-06-30T09:22:03.917999Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8097, node 1 2025-06-30T09:22:04.021139Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:04.021163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:04.021169Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:04.021267Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:04.021946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:04.113488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:04.113530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:04.126453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29647 2025-06-30T09:22:04.523725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:22:05.464134Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-30T09:22:05.481168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:05.481201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:05.525137Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:22:05.525898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:05.690419Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:05.715470Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.715686Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.715905Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.715958Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.716020Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.716055Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.716076Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.716103Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.716123Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.909580Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:05.909632Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:05.924642Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:06.035185Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:06.051542Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-30T09:22:06.051582Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-30T09:22:06.059953Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-30T09:22:06.060311Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-30T09:22:06.060344Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-30T09:22:06.060351Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-30T09:22:06.060358Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-30T09:22:06.060366Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-30T09:22:06.060373Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-30T09:22:06.060382Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-30T09:22:06.060799Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-30T09:22:06.079922Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7960: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:22:06.079965Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7990: ConnectToSA(), pipe client id: [2:1799:2566], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:22:06.080774Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1804:2570] 2025-06-30T09:22:06.084248Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-30T09:22:06.084878Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1848:2595] 2025-06-30T09:22:06.085372Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1848:2595], schemeshard id = 72075186224037897 2025-06-30T09:22:06.095396Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-30T09:22:06.095423Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-30T09:22:06.095438Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-30T09:22:06.100112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.102814Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-30T09:22:06.102870Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-30T09:22:06.267630Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-30T09:22:06.382418Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-30T09:22:06.446679Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-30T09:22:07.102956Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:07.103709Z node 1 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-30T09:22:07.103853Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-30T09:22:07.106508Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-30T09:22:07.107451Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2171:3038], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.107473Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2187:3043], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.107484Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.109176Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:07.123575Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2191:3046], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:22:07.426696Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2285:3079] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:07.514514Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2307:3091]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-30T09:22:07.514585Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-30T09:22:07.514598Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2309:3093] 2025-06-30T09:22:07.514627Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2309:3093] 2025-06-30T09:22:07.514844Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2310:2803] 2025-06-30T09:22:07.514923Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2309:3093], server id = [2:2310:2803], tablet id = 72075186224037894, status = OK 2025-06-30T09:22:07.514967Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2310:2803], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-30T09:22:07.514986Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-30T09:22:07.515031Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-30T09:22:07.515042Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2307:3091], StatRequests.size() = 1 2025-06-30T09:22:07.576664Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=YTVkMzBjZTgtYjc1NjkyNzgtNzNhNzYzNTItNWE0MjExOTU=, TxId: 2025-06-30T09:22:07.576700Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=YTVkMzBjZTgtYjc1NjkyNzgtNzNhNzYzNTItNWE0MjExOTU=, TxId: 2025-06-30T09:22:07.577009Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-30T09:22:07.577615Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-06-30T09:22:07.599215Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2343:3114]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-30T09:22:07.599289Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-30T09:22:07.599298Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2343:3114], StatRequests.size() = 1 2025-06-30T09:22:07.654033Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=MTE2YjQyZDgtMjQyYzRmNzYtNTM4ZWM4OWEtMjM4NWZlNzE=, TxId: 01jz029m4r6mnf4c0h912mghqc 2025-06-30T09:22:07.654083Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=MTE2YjQyZDgtMjQyYzRmNzYtNTM4ZWM4OWEtMjM4NWZlNzE=, TxId: 01jz029m4r6mnf4c0h912mghqc 2025-06-30T09:22:07.654701Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-30T09:22:07.655416Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-06-30T09:22:07.668308Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=NDJhZTcwOTgtNTJkYjY3MS1kMmQ3YjI1ZC00MzI5MGIxZg==, TxId: 01jz029m576bhkjmv81warffjy 2025-06-30T09:22:07.668352Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=NDJhZTcwOTgtNTJkYjY3MS1kMmQ3YjI1ZC00MzI5MGIxZg==, TxId: 01jz029m576bhkjmv81warffjy >> KqpScan::YqlTableSample [GOOD] >> KqpScan::DecimalColumn [GOOD] >> KqpScan::CustomWindow ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::MiltiExprWithPure [GOOD] Test command err: Trying to start YDB, gRPC: 9982, MsgBus: 27528 2025-06-30T09:22:04.115829Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670247107990644:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:04.116206Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002ec3/r3tmp/tmpFNyah6/pdisk_1.dat 2025-06-30T09:22:04.176638Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:04.176904Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670247107990540:2080] 1751275324114087 != 1751275324114090 TServer::EnableGrpc on GrpcPort 9982, node 1 2025-06-30T09:22:04.190929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:04.190942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:04.190944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:04.190979Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27528 2025-06-30T09:22:04.216411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:04.216445Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:04.218100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27528 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:04.271016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:04.278693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:22:04.291691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:04.320520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.374378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:04.403442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.548431Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670247107992143:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:04.548470Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:04.603398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.661490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.672096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.689622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.709627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.725611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.741359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.773593Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670247107992797:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:04.773637Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:04.773813Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670247107992802:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:04.775101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:04.779379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:04.779456Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670247107992804:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:04.848342Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670247107992855:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:05.023492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.117792Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:05.135825Z node 1 :KQP_RESOURCE_MANAGER WARN: ... de: 1060
:3:13: Error: Scan query should have a single result set., code: 2029 Trying to start YDB, gRPC: 14780, MsgBus: 25784 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002ec3/r3tmp/tmp8TM287/pdisk_1.dat 2025-06-30T09:22:06.883265Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:06.883509Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14780, node 3 2025-06-30T09:22:06.899375Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:06.899390Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:06.899392Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:06.899443Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25784 TClient is connected to server localhost:25784 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:06.964619Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:06.964662Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:06.965058Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:06.965783Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:06.998892Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.017474Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:07.051864Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.075710Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.408065Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670256364400152:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.408091Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.414656Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.490250Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.515045Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.559332Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.589065Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.613652Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.650590Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.683620Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670256364400807:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.683649Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.683935Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670256364400812:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.684964Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:07.688967Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:07.689068Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670256364400814:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:07.781912Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670256364400865:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:07.863441Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:07.963677Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7521670256364401145:2474], status: PRECONDITION_FAILED, issues:
: Error: Execution, code: 1060
:3:13: Error: Scan query should have a single result set., code: 2029
: Error: Execution, code: 1060
:3:13: Error: Scan query should have a single result set., code: 2029 2025-06-30T09:22:07.963770Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=3&id=NzM5YTI5YWItNTgxMTRjYi1jMzQ4OGRhOC1jNjgxY2QwZg==, ActorId: [3:7521670256364401138:2470], ActorState: ExecuteState, TraceId: 01jz029meee17hz2pzypyew8wb, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id:
: Error: Execution, code: 1060
:3:13: Error: Scan query should have a single result set., code: 2029
: Error: Execution, code: 1060
:3:13: Error: Scan query should have a single result set., code: 2029 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::SecondaryIndex [GOOD] Test command err: Trying to start YDB, gRPC: 11579, MsgBus: 7827 2025-06-30T09:22:04.331522Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670245184697063:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:04.331555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002edf/r3tmp/tmpqkgh7u/pdisk_1.dat 2025-06-30T09:22:04.404528Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11579, node 1 2025-06-30T09:22:04.428011Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:04.428022Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:04.428024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:04.428074Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:04.430402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:04.430432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:04.431771Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7827 TClient is connected to server localhost:7827 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:22:04.507214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:04.512568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:22:04.520267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:04.546794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:04.578553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:04.597427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:04.813462Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670245184698436:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:04.813490Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:04.863101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.879206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.887155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.951050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.971371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.002084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.022429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.053035Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670249479666389:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:05.053064Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:05.053286Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670249479666394:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:05.054463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:05.058525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-30T09:22:05.058605Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670249479666396:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:22:05.154716Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670249479666447:3399] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:05.332445Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:05.478936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:05.612439Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521670249479666999:2494] TxId: 281474976710675. Ctx: { TraceId: 01jz029j2v3vjxjj9aqj25t99c, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node ... r subscription [2:7521670251726807621:2080] 1751275326039496 != 1751275326039499 TClient is connected to server localhost:29632 2025-06-30T09:22:06.151166Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:06.151195Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:06.159120Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29632 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:06.224962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:06.226889Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:06.232685Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:06.246549Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:06.276029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:06.289858Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:06.508545Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670251726809216:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.508566Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.516914Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.529985Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.542892Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.603045Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.616594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.629766Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.648142Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.666389Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670251726809872:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.666423Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.666529Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670251726809877:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.667565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:06.670159Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670251726809879:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:06.744361Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670251726809930:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:06.907823Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.921607Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.935993Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.045601Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:07.321462Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275327361, txId: 281474976715677] shutting down 2025-06-30T09:22:07.456689Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275327487, txId: 281474976715679] shutting down 2025-06-30T09:22:07.584538Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275327613, txId: 281474976715681] shutting down 2025-06-30T09:22:07.713228Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275327746, txId: 281474976715683] shutting down >> KqpScan::UnionBasic [GOOD] >> KqpScan::UnionMixed >> StatisticsSaveLoad::ForbidAccess [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::RestrictSqlV0 [GOOD] Test command err: Trying to start YDB, gRPC: 23708, MsgBus: 10545 2025-06-30T09:22:04.903006Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670247165802210:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:04.903458Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002eb3/r3tmp/tmpvvWKTh/pdisk_1.dat 2025-06-30T09:22:04.998274Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:05.000687Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670247165802096:2080] 1751275324900818 != 1751275324900821 TServer::EnableGrpc on GrpcPort 23708, node 1 2025-06-30T09:22:05.009898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:05.009929Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:05.010586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:05.032127Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:05.032154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:05.032157Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:05.032214Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10545 TClient is connected to server localhost:10545 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:05.203944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:05.226871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:05.247721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:05.332615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:05.391337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.416901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:05.615996Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670251460771001:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:05.616043Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:05.697765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.720143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.744870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.757490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.772474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.788468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.802895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.832455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670251460771651:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:05.832494Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:05.832583Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670251460771656:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:05.833725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:05.838286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:05.838383Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670251460771658:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:05.892893Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670251460771709:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:05.903600Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:06.171497Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275326167, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 4376, MsgBus: 18558 2025-06-30T09:22:06.519528Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670254968681026:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:06.520398Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002eb3/r3tmp/tmpphyDq3/pdisk_1.dat 2025-06-30T09:22:06.561807Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:06.562818Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670254968681005:2080] 1751275326519243 != 1751275326519246 TServer::EnableGrpc on GrpcPort 4376, node 2 2025-06-30T09:22:06.583426Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:06.583444Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:06.583447Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:06.583503Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18558 2025-06-30T09:22:06.643289Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:06.643325Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:06.647396Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18558 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:22:06.687731Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:06.690049Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:06.699496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:06.760144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:06.783996Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:06.797371Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.055536Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670259263649899:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.055573Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.062964Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.076590Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.087293Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.103280Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.159679Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.175399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.233546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.257010Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670259263650557:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.257039Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.257216Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670259263650562:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.258378Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:07.263201Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:07.263313Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670259263650564:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:07.359902Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670259263650615:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:07.523960Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup;
:1:0: Error: V0 syntax is disabled 2025-06-30T09:22:07.869392Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521670259263650895:2474], status: GENERIC_ERROR, issues:
:1:0: Error: V0 syntax is disabled 2025-06-30T09:22:07.869503Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=YWU3ZDQyNy04MTg4MTFjYS1hYWE3NThkMi00NzNmMTNiZQ==, ActorId: [2:7521670259263650888:2470], ActorState: ExecuteState, TraceId: 01jz029mbq8xf0e754wn9yset0, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> KqpScan::CountDistinct [GOOD] >> KqpScan::BoolFlag ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::YqlTableSample [GOOD] Test command err: Trying to start YDB, gRPC: 19076, MsgBus: 2900 2025-06-30T09:22:05.503313Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670247309996868:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:05.505135Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e97/r3tmp/tmpbVJ2aK/pdisk_1.dat 2025-06-30T09:22:05.601578Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19076, node 1 2025-06-30T09:22:05.622294Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:05.622309Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:05.622311Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:05.622355Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2900 2025-06-30T09:22:05.667419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:05.667450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:05.668308Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2900 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:05.735741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:05.744997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:05.751271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:05.796977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:05.864582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:05.880410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:06.151484Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670251604965705:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.151509Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.244702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.254318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.263202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.279270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.296341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.306011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.320953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.346327Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670251604966356:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.346377Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.346430Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670251604966361:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.347280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:06.349147Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670251604966363:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:06.451121Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670251604966414:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:06.505170Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:06.680491Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521670251604966703:2470] TxId: 281474976715673. Ctx: { TraceId: 01jz029k601dkpmbc4127ag7bd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTAwYzgzMzctNTI4ODNiNTEtMThlNTUwMGEtMzNiNzlhNjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-30T09:22:06.682434Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275326678, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 11412, MsgBus: 64436 2025-06-30T09:22:07.058340Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670259501260523:2245];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e97/r3tmp/tmpCz37Ey/pdisk_1.dat 2025-06-30T09:22:07.067819Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:07.074955Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11412, node 2 2025-06-30T09:22:07.085462Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:07.085476Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:07.085479Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:07.085528Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64436 TClient is connected to server localhost:64436 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:07.162322Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:07.162355Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:07.162753Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:07.163381Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:22:07.164878Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:07.166982Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.178780Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.205134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.239996Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.633783Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670259501261888:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.633822Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.639611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.660063Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.682761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.708761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.723751Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.757305Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.784456Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.831246Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670259501262541:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.831285Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.831491Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670259501262546:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.832673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:07.837118Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670259501262548:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:07.935981Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670259501262599:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:08.062913Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:08.339974Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521670263796230175:2474], status: UNSUPPORTED, issues:
: Error: Default error
:1:15: Error: ATOM evaluation is not supported in YDB queries., code: 2030 2025-06-30T09:22:08.340522Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=NWExNGY4MTAtMmU3ZGJlZjktZDZhZTJjOC03YmU3NmIzZg==, ActorId: [2:7521670263796230167:2470], ActorState: ExecuteState, TraceId: 01jz029mta8p1adx2ma4w93c2p, ReplyQueryCompileError, status UNSUPPORTED remove tx with tx_id: >> KqpScan::TooManyComputeActors [GOOD] >> KqpScan::AggregateEmptySum [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::ForbidAccess [GOOD] Test command err: 2025-06-30T09:22:03.262766Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:420:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:03.262839Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:03.262867Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003279/r3tmp/tmpERoACe/pdisk_1.dat 2025-06-30T09:22:03.381541Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20142, node 1 2025-06-30T09:22:03.527325Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:03.527352Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:03.527357Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:03.527487Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:03.528185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:03.615208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:03.615255Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:03.632306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1732 2025-06-30T09:22:04.016933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:22:04.919095Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-30T09:22:04.935620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:04.935669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:05.012463Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:22:05.014415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:05.227582Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:05.269870Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.270085Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.270276Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.270316Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.270370Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.270417Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.270444Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.270461Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.270482Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:22:05.487507Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:05.487559Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:05.503726Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:05.588719Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:05.647608Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-30T09:22:05.647660Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-30T09:22:05.675306Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-30T09:22:05.675426Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-30T09:22:05.675461Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-30T09:22:05.675470Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-30T09:22:05.675477Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-30T09:22:05.675485Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-30T09:22:05.675492Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-30T09:22:05.675503Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-30T09:22:05.675675Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-30T09:22:05.694673Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7960: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:22:05.694717Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7990: ConnectToSA(), pipe client id: [2:1799:2565], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:22:05.696587Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1811:2574] 2025-06-30T09:22:05.704276Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1843:2590] 2025-06-30T09:22:05.704803Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1843:2590], schemeshard id = 72075186224037897 2025-06-30T09:22:05.705866Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-30T09:22:05.719300Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-30T09:22:05.719330Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-30T09:22:05.719346Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-30T09:22:05.731955Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.734493Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-30T09:22:05.734545Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-30T09:22:05.892568Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-30T09:22:06.036231Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-30T09:22:06.095186Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-30T09:22:06.647106Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:06.699549Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2147:3023], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.699617Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.705207Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.011069Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2454:3073], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.011249Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.011782Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2459:3077]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-30T09:22:07.011837Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-30T09:22:07.011853Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2461:3079] 2025-06-30T09:22:07.011890Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2461:3079] 2025-06-30T09:22:07.012148Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2462:2951] 2025-06-30T09:22:07.012242Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2461:3079], server id = [2:2462:2951], tablet id = 72075186224037894, status = OK 2025-06-30T09:22:07.012309Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2462:2951], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-30T09:22:07.012329Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-30T09:22:07.012380Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-30T09:22:07.012392Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2459:3077], StatRequests.size() = 1 2025-06-30T09:22:07.016374Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2466:3083], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.016419Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.016540Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2471:3088], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.018425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:07.160544Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-30T09:22:07.160591Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-30T09:22:07.218993Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2461:3079], schemeshard count = 1 2025-06-30T09:22:07.549812Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2473:3090], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-30T09:22:07.668764Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2584:3160] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:07.671817Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2607:3176]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-30T09:22:07.671881Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-30T09:22:07.671889Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2607:3176], StatRequests.size() = 1 2025-06-30T09:22:07.682445Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz029kgtcp516vh4d5rhs7fg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTBjMjEyNDItNGFhZmM4NWEtZDZjMjBhMzItYzU4YTE3MmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:07.735841Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:2686:3206], for# user@builtin, access# DescribeSchema 2025-06-30T09:22:07.735879Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:2686:3206], for# user@builtin, access# DescribeSchema 2025-06-30T09:22:07.743097Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:2676:3202], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/Database/.metadata/_statistics]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:22:07.744538Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NjU0MTcyN2MtMWM1MTEyYzEtZWQ0YjM0OWUtNGZjZDIyYTM=, ActorId: [1:2667:3194], ActorState: ExecuteState, TraceId: 01jz029m7e18rj2y04gc3tj0g5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: >> KqpScan::JoinSimple [GOOD] >> KqpScan::JoinWithParams >> KqpScan::AggregateByColumn [GOOD] >> KqpScan::AggregateCountStar >> TKeyValueTest::TestInlineCopyRangeWorksNewApi [GOOD] |81.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |81.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |81.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |81.9%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpScan::Grep |81.9%| [TA] $(B)/ydb/core/statistics/database/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::TooManyComputeActors [GOOD] >> KqpScan::CustomWindow [GOOD] Test command err: Trying to start YDB, gRPC: 21292, MsgBus: 61845 2025-06-30T09:22:06.085229Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670252059232729:2192];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:06.085277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002ea3/r3tmp/tmp49M7Xw/pdisk_1.dat 2025-06-30T09:22:06.264294Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670252059232573:2080] 1751275326077380 != 1751275326077383 2025-06-30T09:22:06.266165Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21292, node 1 2025-06-30T09:22:06.298961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:06.298978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:06.298981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:06.299035Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61845 2025-06-30T09:22:06.416502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:06.416541Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:06.417681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61845 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:06.500201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:06.504362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:06.507784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:06.562093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:06.612781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:06.641995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:06.926213Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670252059234173:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.926258Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.986405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.015153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.037434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.062290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.083967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.085905Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:07.100728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.115259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.150029Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670256354202136:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.150066Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.150116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670256354202141:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.151301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:07.155855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:07.155947Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670256354202143:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:07.230036Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670256354202194:3405] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:07.583872Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275327581, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 14867, MsgBus: 20887 2025-06-30T09:22:07.943173Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670259651491183:2061];send_to=[0:7307199536658146131:7762515]; 202 ... de 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:07.991801Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:07.991873Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20887 TClient is connected to server localhost:20887 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:08.055820Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:08.055858Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting waiting... 2025-06-30T09:22:08.056372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:08.057229Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:08.063083Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:08.075614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.109584Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.175613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.208247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.352336Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670263946460057:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.352389Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.363216Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.395753Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.421845Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.443521Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.468559Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.489026Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.505977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.542990Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670263946460710:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.543029Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.543228Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670263946460715:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.544304Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:08.547688Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:08.547790Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670263946460717:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:08.627896Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670263946460768:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:08.906527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.946449Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:09.154449Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=2&id=NmEyNTRhY2UtZmNiMTdmNzEtOWRlMWViNTMtZWIyMjFlNjc=, ActorId: [2:7521670263946461276:2494], ActorState: ExecuteState, TraceId: 01jz029nf21v8146c984svt88r, Create QueryResponse for error on request, msg: 2025-06-30T09:22:09.155908Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275329202, txId: 281474976715674] shutting down
: Warning: Type annotation, code: 1030
:7:13: Warning: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:8:18: Warning: At function: AssumeColumnOrderPartial, At function: Aggregate, At function: Filter, At lambda, At function: Coalesce
:9:67: Warning: At function: And
:9:39: Warning: At function: <
:9:46: Warning: At function: -
:9:46: Warning: Integral type implicit bitcast: Optional and Int32, code: 1107
: Error: Requested too many execution units: 21, code: 2029 >> KqpScan::CrossJoinOneColumn >> KqpSplit::AfterResultMultiRange+Ascending |81.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |81.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineCopyRangeWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:53:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:53:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:53:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:53:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:52:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:52:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:83:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:83:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:54:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:54:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:54:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:54:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:53:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:53:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:86:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:88:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:88:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:53:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:53:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:89:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:93:2057] recipient: [11:91:2119] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:95:2057] recipient: [11:91:2119] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:94:2120] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:52:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:52:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... 46:81:2112] !Reboot 72057594037927937 (actor [46:59:2099]) rebooted! !Reboot 72057594037927937 (actor [46:59:2099]) tablet resolver refreshed! new actor is[46:84:2113] Leader for TabletID 72057594037927937 is [46:84:2113] sender: [46:170:2057] recipient: [46:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [47:57:2057] recipient: [47:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [47:57:2057] recipient: [47:53:2097] Leader for TabletID 72057594037927937 is [47:59:2099] sender: [47:60:2057] recipient: [47:53:2097] Leader for TabletID 72057594037927937 is [47:59:2099] sender: [47:77:2057] recipient: [47:14:2061] !Reboot 72057594037927937 (actor [47:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [47:59:2099] sender: [47:80:2057] recipient: [47:38:2085] Leader for TabletID 72057594037927937 is [47:59:2099] sender: [47:83:2057] recipient: [47:82:2112] Leader for TabletID 72057594037927937 is [47:59:2099] sender: [47:84:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [47:85:2113] sender: [47:86:2057] recipient: [47:82:2112] !Reboot 72057594037927937 (actor [47:59:2099]) rebooted! !Reboot 72057594037927937 (actor [47:59:2099]) tablet resolver refreshed! new actor is[47:85:2113] Leader for TabletID 72057594037927937 is [47:85:2113] sender: [47:171:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:57:2057] recipient: [48:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:57:2057] recipient: [48:54:2097] Leader for TabletID 72057594037927937 is [48:59:2099] sender: [48:60:2057] recipient: [48:54:2097] Leader for TabletID 72057594037927937 is [48:59:2099] sender: [48:77:2057] recipient: [48:14:2061] !Reboot 72057594037927937 (actor [48:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [48:59:2099] sender: [48:83:2057] recipient: [48:38:2085] Leader for TabletID 72057594037927937 is [48:59:2099] sender: [48:86:2057] recipient: [48:14:2061] Leader for TabletID 72057594037927937 is [48:59:2099] sender: [48:87:2057] recipient: [48:85:2115] Leader for TabletID 72057594037927937 is [48:88:2116] sender: [48:89:2057] recipient: [48:85:2115] !Reboot 72057594037927937 (actor [48:59:2099]) rebooted! !Reboot 72057594037927937 (actor [48:59:2099]) tablet resolver refreshed! new actor is[48:88:2116] Leader for TabletID 72057594037927937 is [48:88:2116] sender: [48:174:2057] recipient: [48:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:57:2057] recipient: [49:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:57:2057] recipient: [49:53:2097] Leader for TabletID 72057594037927937 is [49:59:2099] sender: [49:60:2057] recipient: [49:53:2097] Leader for TabletID 72057594037927937 is [49:59:2099] sender: [49:77:2057] recipient: [49:14:2061] !Reboot 72057594037927937 (actor [49:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [49:59:2099] sender: [49:83:2057] recipient: [49:38:2085] Leader for TabletID 72057594037927937 is [49:59:2099] sender: [49:86:2057] recipient: [49:14:2061] Leader for TabletID 72057594037927937 is [49:59:2099] sender: [49:87:2057] recipient: [49:85:2115] Leader for TabletID 72057594037927937 is [49:88:2116] sender: [49:89:2057] recipient: [49:85:2115] !Reboot 72057594037927937 (actor [49:59:2099]) rebooted! !Reboot 72057594037927937 (actor [49:59:2099]) tablet resolver refreshed! new actor is[49:88:2116] Leader for TabletID 72057594037927937 is [49:88:2116] sender: [49:174:2057] recipient: [49:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [50:57:2057] recipient: [50:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [50:57:2057] recipient: [50:53:2097] Leader for TabletID 72057594037927937 is [50:59:2099] sender: [50:60:2057] recipient: [50:53:2097] Leader for TabletID 72057594037927937 is [50:59:2099] sender: [50:77:2057] recipient: [50:14:2061] !Reboot 72057594037927937 (actor [50:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [50:59:2099] sender: [50:84:2057] recipient: [50:38:2085] Leader for TabletID 72057594037927937 is [50:59:2099] sender: [50:86:2057] recipient: [50:14:2061] Leader for TabletID 72057594037927937 is [50:59:2099] sender: [50:88:2057] recipient: [50:87:2115] Leader for TabletID 72057594037927937 is [50:89:2116] sender: [50:90:2057] recipient: [50:87:2115] !Reboot 72057594037927937 (actor [50:59:2099]) rebooted! !Reboot 72057594037927937 (actor [50:59:2099]) tablet resolver refreshed! new actor is[50:89:2116] Leader for TabletID 72057594037927937 is [50:89:2116] sender: [50:175:2057] recipient: [50:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [51:57:2057] recipient: [51:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [51:57:2057] recipient: [51:54:2097] Leader for TabletID 72057594037927937 is [51:59:2099] sender: [51:60:2057] recipient: [51:54:2097] Leader for TabletID 72057594037927937 is [51:59:2099] sender: [51:77:2057] recipient: [51:14:2061] !Reboot 72057594037927937 (actor [51:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [51:59:2099] sender: [51:87:2057] recipient: [51:38:2085] Leader for TabletID 72057594037927937 is [51:59:2099] sender: [51:90:2057] recipient: [51:14:2061] Leader for TabletID 72057594037927937 is [51:59:2099] sender: [51:91:2057] recipient: [51:89:2118] Leader for TabletID 72057594037927937 is [51:92:2119] sender: [51:93:2057] recipient: [51:89:2118] !Reboot 72057594037927937 (actor [51:59:2099]) rebooted! !Reboot 72057594037927937 (actor [51:59:2099]) tablet resolver refreshed! new actor is[51:92:2119] Leader for TabletID 72057594037927937 is [51:92:2119] sender: [51:178:2057] recipient: [51:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [52:57:2057] recipient: [52:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [52:57:2057] recipient: [52:53:2097] Leader for TabletID 72057594037927937 is [52:59:2099] sender: [52:60:2057] recipient: [52:53:2097] Leader for TabletID 72057594037927937 is [52:59:2099] sender: [52:77:2057] recipient: [52:14:2061] !Reboot 72057594037927937 (actor [52:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [52:59:2099] sender: [52:87:2057] recipient: [52:38:2085] Leader for TabletID 72057594037927937 is [52:59:2099] sender: [52:90:2057] recipient: [52:14:2061] Leader for TabletID 72057594037927937 is [52:59:2099] sender: [52:91:2057] recipient: [52:89:2118] Leader for TabletID 72057594037927937 is [52:92:2119] sender: [52:93:2057] recipient: [52:89:2118] !Reboot 72057594037927937 (actor [52:59:2099]) rebooted! !Reboot 72057594037927937 (actor [52:59:2099]) tablet resolver refreshed! new actor is[52:92:2119] Leader for TabletID 72057594037927937 is [52:92:2119] sender: [52:178:2057] recipient: [52:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:57:2057] recipient: [53:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:57:2057] recipient: [53:54:2097] Leader for TabletID 72057594037927937 is [53:59:2099] sender: [53:60:2057] recipient: [53:54:2097] Leader for TabletID 72057594037927937 is [53:59:2099] sender: [53:77:2057] recipient: [53:14:2061] !Reboot 72057594037927937 (actor [53:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [53:59:2099] sender: [53:88:2057] recipient: [53:38:2085] Leader for TabletID 72057594037927937 is [53:59:2099] sender: [53:91:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [53:59:2099] sender: [53:92:2057] recipient: [53:90:2118] Leader for TabletID 72057594037927937 is [53:93:2119] sender: [53:94:2057] recipient: [53:90:2118] !Reboot 72057594037927937 (actor [53:59:2099]) rebooted! !Reboot 72057594037927937 (actor [53:59:2099]) tablet resolver refreshed! new actor is[53:93:2119] Leader for TabletID 72057594037927937 is [53:93:2119] sender: [53:179:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:57:2057] recipient: [54:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:57:2057] recipient: [54:52:2097] Leader for TabletID 72057594037927937 is [54:59:2099] sender: [54:60:2057] recipient: [54:52:2097] Leader for TabletID 72057594037927937 is [54:59:2099] sender: [54:77:2057] recipient: [54:14:2061] !Reboot 72057594037927937 (actor [54:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [54:59:2099] sender: [54:91:2057] recipient: [54:38:2085] Leader for TabletID 72057594037927937 is [54:59:2099] sender: [54:94:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [54:59:2099] sender: [54:95:2057] recipient: [54:93:2121] Leader for TabletID 72057594037927937 is [54:96:2122] sender: [54:97:2057] recipient: [54:93:2121] !Reboot 72057594037927937 (actor [54:59:2099]) rebooted! !Reboot 72057594037927937 (actor [54:59:2099]) tablet resolver refreshed! new actor is[54:96:2122] Leader for TabletID 72057594037927937 is [54:96:2122] sender: [54:182:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:57:2057] recipient: [55:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:57:2057] recipient: [55:54:2097] Leader for TabletID 72057594037927937 is [55:59:2099] sender: [55:60:2057] recipient: [55:54:2097] Leader for TabletID 72057594037927937 is [55:59:2099] sender: [55:77:2057] recipient: [55:14:2061] !Reboot 72057594037927937 (actor [55:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [55:59:2099] sender: [55:91:2057] recipient: [55:38:2085] Leader for TabletID 72057594037927937 is [55:59:2099] sender: [55:94:2057] recipient: [55:14:2061] Leader for TabletID 72057594037927937 is [55:59:2099] sender: [55:95:2057] recipient: [55:93:2121] Leader for TabletID 72057594037927937 is [55:96:2122] sender: [55:97:2057] recipient: [55:93:2121] !Reboot 72057594037927937 (actor [55:59:2099]) rebooted! !Reboot 72057594037927937 (actor [55:59:2099]) tablet resolver refreshed! new actor is[55:96:2122] Leader for TabletID 72057594037927937 is [55:96:2122] sender: [55:182:2057] recipient: [55:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [56:57:2057] recipient: [56:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [56:57:2057] recipient: [56:53:2097] Leader for TabletID 72057594037927937 is [56:59:2099] sender: [56:60:2057] recipient: [56:53:2097] Leader for TabletID 72057594037927937 is [56:59:2099] sender: [56:77:2057] recipient: [56:14:2061] !Reboot 72057594037927937 (actor [56:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [56:59:2099] sender: [56:92:2057] recipient: [56:38:2085] Leader for TabletID 72057594037927937 is [56:59:2099] sender: [56:95:2057] recipient: [56:14:2061] Leader for TabletID 72057594037927937 is [56:59:2099] sender: [56:96:2057] recipient: [56:94:2121] Leader for TabletID 72057594037927937 is [56:97:2122] sender: [56:98:2057] recipient: [56:94:2121] !Reboot 72057594037927937 (actor [56:59:2099]) rebooted! !Reboot 72057594037927937 (actor [56:59:2099]) tablet resolver refreshed! new actor is[56:97:2122] Leader for TabletID 72057594037927937 is [0:0:0] sender: [57:57:2057] recipient: [57:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [57:57:2057] recipient: [57:53:2097] Leader for TabletID 72057594037927937 is [57:59:2099] sender: [57:60:2057] recipient: [57:53:2097] Leader for TabletID 72057594037927937 is [57:59:2099] sender: [57:77:2057] recipient: [57:14:2061] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::AggregateEmptySum [GOOD] Test command err: Trying to start YDB, gRPC: 7901, MsgBus: 31890 2025-06-30T09:22:04.189097Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670245003244467:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:04.189144Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002f0a/r3tmp/tmpBOBcEg/pdisk_1.dat 2025-06-30T09:22:04.257788Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:04.260003Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670245003244442:2080] 1751275324188854 != 1751275324188857 TServer::EnableGrpc on GrpcPort 7901, node 1 2025-06-30T09:22:04.274553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:04.274570Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:04.274577Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:04.274627Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:04.292011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:04.292055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:04.293106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31890 TClient is connected to server localhost:31890 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:04.363634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:04.367341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:04.376008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:04.465017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:04.505940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.527723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:04.618806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670245003246046:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:04.618849Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:04.694004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.712006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.782177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.796417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.809765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.824092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.835890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:04.853636Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670245003246701:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:04.853668Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:04.853695Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670245003246706:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:04.854665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:04.864501Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670245003246708:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:04.919095Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670245003246759:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:05.194988Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:05.248433Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521670249298214368:2469] TxId: 281474976715673. Ctx: { TraceId: 01jz029hnjbkfqwsd4588zyhk9, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTIzNTZmYmYtN2IwZTU0ZjgtZTYxNjc0ZWUtY2UwNDc3MzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-30T09:22:05.883647Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275325289, txId: 281474976715672] shutting down Trying to start YDB, gRPC ... cted event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:07.915402Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275327389, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 27348, MsgBus: 3166 2025-06-30T09:22:08.272645Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670263448342868:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:08.272754Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002f0a/r3tmp/tmpUL1Mme/pdisk_1.dat 2025-06-30T09:22:08.293008Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:08.294062Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670263448342755:2080] 1751275328268907 != 1751275328268910 TServer::EnableGrpc on GrpcPort 27348, node 3 2025-06-30T09:22:08.307418Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:08.307434Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:08.307437Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:08.307484Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3166 TClient is connected to server localhost:3166 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:08.383370Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:08.383405Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:08.383758Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:08.384344Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:22:08.386307Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:08.395764Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.421426Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.468093Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:08.489351Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.751135Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670263448344350:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.751165Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.771691Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.791854Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.805351Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.816696Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.828919Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.841980Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.859286Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.876073Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670263448345000:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.876115Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.876313Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670263448345005:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.877391Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:08.880152Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:08.880330Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670263448345007:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:08.936530Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670263448345058:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:09.254691Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275329209, txId: 281474976715672] shutting down 2025-06-30T09:22:09.273661Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KikimrIcGateway::TestCreateSameExternalTable |81.9%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/test-results/unittest/{meta.json ... results_accumulator.log} |81.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |81.9%| [TA] {RESULT} $(B)/ydb/core/statistics/database/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KikimrProvider::TestFillAuthPropertiesBasic [GOOD] >> KikimrProvider::TestFillAuthPropertiesAws [GOOD] >> KikimrProvider::AlterTableAddIndexWithTableSettings >> KqpScan::BoolFlag [GOOD] >> KqpScan::UnionMixed [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History_Sticky |81.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> KikimrProvider::AlterTableAddIndexWithTableSettings [GOOD] |81.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> KqpScan::JoinWithParams [GOOD] >> KqpScan::JoinLeftOnly |81.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrProvider::AlterTableAddIndexWithTableSettings [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::BoolFlag [GOOD] Test command err: Trying to start YDB, gRPC: 9412, MsgBus: 5356 2025-06-30T09:22:04.986317Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670246066819665:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:04.986342Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002eaa/r3tmp/tmpgIhJKn/pdisk_1.dat 2025-06-30T09:22:05.108799Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9412, node 1 2025-06-30T09:22:05.134158Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:05.134174Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:05.134177Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:05.134230Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:05.154183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:05.154221Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:05.155476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5356 TClient is connected to server localhost:5356 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:05.319671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:05.323421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:22:05.328146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:05.428713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:05.478051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:05.501360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:05.767566Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670250361788538:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:05.767619Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:05.905345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.930384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.954542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.977392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:05.994832Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:06.000712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.032857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.060640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:06.099316Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670254656756497:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.099347Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.099477Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670254656756502:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:06.100522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:06.104761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-30T09:22:06.105014Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670254656756504:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:22:06.160129Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670254656756555:3399] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:06.554043Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275326451, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 12225, MsgBus: 1579 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002eaa/r3tmp/tmpvxfPez/pdisk_1.dat 2025-06-30T09:22:06.967090Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 12225, node 2 2025-06-30T09:22:06.988591Z node 2 :IMPORT WARN: ... 670, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 17164, MsgBus: 14250 2025-06-30T09:22:09.133772Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670268028027996:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:09.133819Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002eaa/r3tmp/tmpLCsScc/pdisk_1.dat 2025-06-30T09:22:09.161954Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:09.162210Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670268028027970:2080] 1751275329133607 != 1751275329133610 TServer::EnableGrpc on GrpcPort 17164, node 3 2025-06-30T09:22:09.183501Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:09.183516Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:09.183518Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:09.183584Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14250 TClient is connected to server localhost:14250 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:22:09.238895Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:09.241192Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:22:09.243652Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:09.243683Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:09.245124Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:22:09.247603Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.275088Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:09.303305Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:09.318586Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:09.521713Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670268028029572:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.521758Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.532422Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.544147Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.560217Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.568025Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.582190Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.596945Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.655694Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.715174Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670268028030233:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.715210Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.715291Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670268028030238:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.716098Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:09.721188Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670268028030240:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:22:09.815125Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670268028030291:3397] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:09.987818Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.131032Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275330168, txId: 281474976710674] shutting down 2025-06-30T09:22:10.137523Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::UnionMixed [GOOD] Test command err: Trying to start YDB, gRPC: 13199, MsgBus: 30195 2025-06-30T09:22:07.375401Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670257508775900:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:07.375513Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e8f/r3tmp/tmpn9kDKn/pdisk_1.dat 2025-06-30T09:22:07.510934Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:07.512008Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670257508775717:2080] 1751275327372509 != 1751275327372512 TServer::EnableGrpc on GrpcPort 13199, node 1 2025-06-30T09:22:07.558991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:07.559006Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:07.559008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:07.559062Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:07.570864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:07.570899Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:07.579161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30195 TClient is connected to server localhost:30195 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:07.780039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:07.787140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:07.799853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.833313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.865891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.879967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.996138Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670257508777315:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.996172Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.120212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.131567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.145663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.153943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.172061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.185097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.199901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.223248Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670261803745263:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.223272Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.223340Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670261803745268:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.224348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:08.231336Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670261803745270:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:08.322766Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670261803745321:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:08.374047Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:08.553287Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521670261803745641:2470] TxId: 281474976715673. Ctx: { TraceId: 01jz029mzx3aj4vvvjnrxbdm3n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2ExOTExNjUtZTNlMTEzODctOWRhY2VhYS04NTI4YjBlMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-30T09:22:08.556603Z node 1 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [1:7521670261803745701:2057], tablet: [1:7521670257508776476:2285], scanId: 4, table: /Root/EightShard 2025-06-30T0 ... cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e8f/r3tmp/tmp9UNtEL/pdisk_1.dat 2025-06-30T09:22:09.043680Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:09.046541Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670268010163747:2080] 1751275329019992 != 1751275329019995 TServer::EnableGrpc on GrpcPort 29122, node 2 2025-06-30T09:22:09.059412Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:09.059432Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:09.059435Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:09.059490Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29633 TClient is connected to server localhost:29633 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:22:09.128052Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:09.128099Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:09.129368Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:09.129594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:09.131239Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:09.140094Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:09.162899Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:09.240766Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:09.264788Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:09.501423Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670268010165376:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.501460Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.515727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.529858Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.541599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.554747Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.568171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.582219Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.640172Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.665258Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670268010166030:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.665288Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.665372Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670268010166035:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.666404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:09.669296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:09.669361Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670268010166037:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:09.741243Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670268010166088:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:10.007271Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7521670272305133735:2055], tablet: [2:7521670268010164535:2289], scanId: 9, table: /Root/EightShard 2025-06-30T09:22:10.007294Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7521670272305133737:2056], tablet: [2:7521670268010164524:2284], scanId: 7, table: /Root/EightShard 2025-06-30T09:22:10.007302Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7521670272305133739:2057], tablet: [2:7521670268010164536:2290], scanId: 8, table: /Root/EightShard 2025-06-30T09:22:10.008654Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275330042, txId: 281474976715672] shutting down 2025-06-30T09:22:10.027020Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KikimrIcGateway::TestLoadExternalTable >> KikimrIcGateway::TestCreateExternalTable >> KikimrIcGateway::TestCreateSameExternalTable [GOOD] >> KikimrIcGateway::TestDropExternalTable >> KqpScan::AggregateCountStar [GOOD] >> KqpScan::AggregateEmptyCountStar >> LdapAuthProviderTest_LdapsScheme::LdapRefreshRemoveUserBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoWithError |81.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |81.9%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |81.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant >> KqpScan::Grep [GOOD] >> KqpScan::FullFrameWindow >> KikimrIcGateway::TestLoadBasicSecretValueFromExternalDataSourceMetadata >> KikimrIcGateway::TestCreateExternalTable [GOOD] >> KikimrIcGateway::TestCreateResourcePool >> KikimrIcGateway::TestLoadTableMetadata >> KikimrProvider::TestFillAuthPropertiesNone [GOOD] >> KikimrProvider::TestFillAuthPropertiesServiceAccount [GOOD] >> KikimrProvider::TestFillAuthPropertiesMdbBasic [GOOD] |81.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |81.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |81.9%| [LD] {RESULT} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut >> TOlapReboots::CreateStandaloneTable [GOOD] >> KikimrIcGateway::TestDropExternalTable [GOOD] >> KikimrIcGateway::TestDropExternalDataSource >> TSchemeShardLoginTest::ChangeAccountLockoutParameters [GOOD] >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb >> KikimrIcGateway::TestListPath >> KqpSplit::AfterResultMultiRange+Ascending [GOOD] >> KqpSplit::AfterResultMultiRange+Descending >> ReadAttributesUtils::AttributesGatheringEmpry [GOOD] >> ReadAttributesUtils::AttributesGatheringFilter [GOOD] >> ReadAttributesUtils::AttributesGatheringRecursive [GOOD] >> KqpScan::JoinLeftOnly [GOOD] >> KqpScan::CrossJoinOneColumn [GOOD] >> ReadAttributesUtils::ReplaceAttributesEmpty [GOOD] >> ReadAttributesUtils::ReplaceAttributesFilter [GOOD] >> KikimrIcGateway::TestCreateResourcePool [GOOD] >> KikimrIcGateway::TestALterResourcePool |81.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrProvider::TestFillAuthPropertiesMdbBasic [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::CreateStandaloneTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:45.839187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:45.839214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:45.839221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:45.839227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:45.839241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:45.839246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:45.839256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:45.839270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:45.839388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:45.839474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:45.857070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:45.857094Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:45.857199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:45.860287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:45.861410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:45.861459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:45.863278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:45.863338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:45.863473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:45.863537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:45.864475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:45.864528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:45.864788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:45.864801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:45.864812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:45.864821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:45.864828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:45.864857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:45.866489Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:45.891849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:45.891965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:45.892040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:45.892049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:45.892099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:45.892115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:45.892825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:45.892867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:45.892934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:45.892958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:45.892965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:45.892971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:45.893432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:45.893446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:45.893453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:45.893876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:45.893887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:45.893894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:45.893903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:45.894798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:45.895259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:45.895295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:45.895515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:45.895547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... sTx::Execute;tx_current=0;tx_id=1002;fline=tx_controller.cpp:215;event=finished_tx;tx_id=1002; FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-30T09:22:11.755074Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:11.755096Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:11.755167Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:22:11.755221Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:11.755230Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [68:210:2210], at schemeshard: 72057594046678944, txId: 1002, path id: 1 2025-06-30T09:22:11.755238Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [68:210:2210], at schemeshard: 72057594046678944, txId: 1002, path id: 3 2025-06-30T09:22:11.755375Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-30T09:22:11.755388Z node 68 :FLAT_TX_SCHEMESHARD INFO: create_table.cpp:459: TCreateColumnTable TProposedWaitParts operationId# 1002:0 ProgressState at tablet: 72057594046678944 2025-06-30T09:22:11.755403Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: create_table.cpp:485: TCreateColumnTable TProposedWaitParts operationId# 1002:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-06-30T09:22:11.755712Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-30T09:22:11.755739Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-30T09:22:11.755746Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1002 2025-06-30T09:22:11.755753Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-30T09:22:11.755762Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:11.756120Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-30T09:22:11.756141Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-30T09:22:11.756147Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1002 2025-06-30T09:22:11.756154Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-30T09:22:11.756160Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:22:11.756180Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1002, ready parts: 0/1, is published: true 2025-06-30T09:22:11.757033Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1002:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-30T09:22:11.757250Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-30T09:22:11.757594Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-30T09:22:11.771579Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6237: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1002 2025-06-30T09:22:11.771603Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409546, partId: 0 2025-06-30T09:22:11.771631Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1002 FAKE_COORDINATOR: Erasing txId 1002 2025-06-30T09:22:11.772160Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-30T09:22:11.772211Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-30T09:22:11.772220Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1002:0 ProgressState 2025-06-30T09:22:11.772241Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1002:0 progress is 1/1 2025-06-30T09:22:11.772248Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-30T09:22:11.772255Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1002:0 progress is 1/1 2025-06-30T09:22:11.772260Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-30T09:22:11.772266Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1002, ready parts: 1/1, is published: true 2025-06-30T09:22:11.772284Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [68:368:2344] message: TxId: 1002 2025-06-30T09:22:11.772293Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-30T09:22:11.772300Z node 68 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1002:0 2025-06-30T09:22:11.772306Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1002:0 2025-06-30T09:22:11.772344Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:22:11.772849Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-30T09:22:11.772864Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [68:369:2345] TestWaitNotification: OK eventTxId 1002 2025-06-30T09:22:11.772995Z node 68 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ColumnTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:11.773077Z node 68 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ColumnTable" took 92us result status StatusSuccess 2025-06-30T09:22:11.773258Z node 68 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ColumnTable" PathDescription { Self { Name: "ColumnTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "ColumnTable" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } ColumnShardCount: 1 Sharding { ColumnShards: 72075186233409546 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "timestamp" } } StorageConfig { DataChannelCount: 64 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |81.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> ReadAttributesUtils::AttributesGatheringRecursive [GOOD] >> KikimrIcGateway::TestLoadExternalTable [GOOD] >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata >> KikimrIcGateway::TestDropExternalDataSource [GOOD] |81.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |81.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TOlapReboots::CreateStore [GOOD] >> KqpScan::AggregateEmptyCountStar [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::JoinLeftOnly [GOOD] Test command err: Trying to start YDB, gRPC: 61433, MsgBus: 18832 2025-06-30T09:22:08.203389Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670263027840390:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:08.203407Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e8c/r3tmp/tmp6T4gWj/pdisk_1.dat 2025-06-30T09:22:08.306775Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:08.306865Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670263027840362:2080] 1751275328203068 != 1751275328203071 2025-06-30T09:22:08.316947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:08.316967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:08.317641Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61433, node 1 2025-06-30T09:22:08.354995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:08.355012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:08.355015Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:08.355062Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18832 TClient is connected to server localhost:18832 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:08.481337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:08.495271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:08.498857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.580920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.621248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.654376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.747352Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670263027841960:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.747382Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.800278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.857121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.918682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.943085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.960419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.973353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.989767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:09.007457Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670267322809910:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.007486Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.007654Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670267322809915:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:09.008854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:09.014572Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670267322809917:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:09.100995Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670267322809968:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:09.205691Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:09.238112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:09.374932Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521670267322810521:2494] TxId: 281474976715675. Ctx: { TraceId: 01jz029nrfa4ae9gek7xnpfwg7, Database: /Root, DatabaseId: /Root, SessionId: ydb ... base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e8c/r3tmp/tmpxvxm1m/pdisk_1.dat 2025-06-30T09:22:10.885905Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:10.886137Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670271413102512:2080] 1751275330866663 != 1751275330866666 TServer::EnableGrpc on GrpcPort 26801, node 3 2025-06-30T09:22:10.895455Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:10.895467Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:10.895469Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:10.895506Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21577 TClient is connected to server localhost:21577 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:10.974650Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:10.974680Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:10.975080Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:10.975532Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:10.978453Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:10.989714Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.018082Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.057861Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.131785Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.422153Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670275708071406:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:11.422210Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:11.426545Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.442339Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.457242Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.472522Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.485719Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.544735Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.562496Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.600935Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670275708072059:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:11.601016Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:11.601438Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670275708072067:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:11.602810Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:11.608780Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:11.608973Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670275708072069:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:11.690194Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670275708072120:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:11.874338Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:11.979996Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:12.129484Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275332156, txId: 281474976715674] shutting down 2025-06-30T09:22:12.213059Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275332240, txId: 281474976715676] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::CrossJoinOneColumn [GOOD] Test command err: Trying to start YDB, gRPC: 31997, MsgBus: 3440 2025-06-30T09:22:07.312576Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670256544893552:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:07.314469Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e93/r3tmp/tmpJiKuMu/pdisk_1.dat 2025-06-30T09:22:07.411807Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31997, node 1 2025-06-30T09:22:07.459054Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:07.459070Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:07.459073Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:07.459131Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:07.470974Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:07.471012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:07.475232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3440 TClient is connected to server localhost:3440 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:07.646629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:07.654997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:07.671009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.751889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:07.783086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:07.796885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.895315Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670256544895121:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.895346Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:07.935710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.946199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.960516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.977419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:07.988066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.002100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.014014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.029750Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670260839863064:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.029775Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.029785Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670260839863069:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.030761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:08.034288Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670260839863071:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:08.113423Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670260839863125:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:08.290146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.313016Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:08.529184Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275328467, txId: 281474976715674] shutting down 2025-06-30T09:22:08.662203Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275328621, txId: 281474976715676] shutting down Trying to start YDB, gRPC: 28612, MsgBus: ... tor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 29849, MsgBus: 12386 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e93/r3tmp/tmpqOgVx8/pdisk_1.dat 2025-06-30T09:22:10.119477Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:10.153192Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670268669008715:2080] 1751275330100963 != 1751275330100966 2025-06-30T09:22:10.154968Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29849, node 3 2025-06-30T09:22:10.162934Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:10.162948Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:10.162950Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:10.163008Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:10.201067Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:10.201114Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:10.201985Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12386 TClient is connected to server localhost:12386 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:10.315680Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:10.319264Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:10.339521Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:10.364066Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:10.390425Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:10.405008Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:10.623574Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670268669010321:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.623601Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.630987Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.641121Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.656442Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.669207Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.685211Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.696325Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.709722Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.727426Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670268669010971:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.727459Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.727469Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670268669010976:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.728395Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:10.735551Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670268669010978:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:10.828985Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670268669011029:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:11.020263Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.102525Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:11.474230Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275331260, txId: 281474976715674] shutting down 2025-06-30T09:22:12.232133Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275331638, txId: 281474976715677] shutting down >> LdapAuthProviderTest_nonSecure::LdapRefreshRemoveUserBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoWithError |81.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> ReadAttributesUtils::ReplaceAttributesFilter [GOOD] >> KikimrIcGateway::TestALterResourcePool [GOOD] >> KikimrIcGateway::TestLoadTableMetadata [GOOD] >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata >> KikimrIcGateway::TestListPath [GOOD] >> KikimrIcGateway::TestDropTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestDropExternalDataSource [GOOD] Test command err: Trying to start YDB, gRPC: 11314, MsgBus: 5592 2025-06-30T09:22:10.542639Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670270085798557:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:10.542658Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003657/r3tmp/tmpvweZXh/pdisk_1.dat 2025-06-30T09:22:10.627567Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:10.636390Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 11314, node 1 2025-06-30T09:22:10.645684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:10.645720Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:10.647323Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:10.653496Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:10.653512Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:10.653514Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:10.653563Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5592 TClient is connected to server localhost:5592 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:22:10.743108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:10.748970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:10.759855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:22:10.777129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:352) 2025-06-30T09:22:10.779170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-30T09:22:10.781541Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670270085799166:2337] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/f1/f2/external_table\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeExternalTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:133" severity: 1 }
: Error: Scheme operation failed, status: ExecComplete, reason: Check failed: path: '/Root/f1/f2/external_table', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeExternalTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:133 2025-06-30T09:22:10.781663Z node 1 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715660, ProxyStatus: ExecComplete, SchemeShardReason: Check failed: path: '/Root/f1/f2/external_table', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeExternalTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:133 Trying to start YDB, gRPC: 12934, MsgBus: 29859 2025-06-30T09:22:11.304342Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670274593205553:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:11.306369Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003657/r3tmp/tmppfWmZ2/pdisk_1.dat 2025-06-30T09:22:11.328033Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12934, node 2 2025-06-30T09:22:11.379002Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:11.379020Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:11.379025Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:11.379083Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29859 2025-06-30T09:22:11.411887Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:11.411946Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:11.413273Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29859 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:22:11.449760Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:11.450982Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:11.457917Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:22:11.468867Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:352) Trying to start YDB, gRPC: 23272, MsgBus: 7877 2025-06-30T09:22:12.067792Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670280280643191:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:12.069298Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003657/r3tmp/tmplcDwJt/pdisk_1.dat 2025-06-30T09:22:12.119635Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23272, node 3 2025-06-30T09:22:12.143210Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:12.143226Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:12.143230Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:12.143290Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7877 TClient is connected to server localhost:7877 2025-06-30T09:22:12.189978Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:12.190010Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:22:12.190998Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:12.195442Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:12.197080Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:22:12.204462Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::CreateStore [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:46.234339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:46.234369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.234374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:46.234379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:46.234392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:46.234397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:46.234406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.234423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:46.234542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:46.234640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:46.251076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:46.251111Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:46.251253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.259497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:46.260651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:46.260703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:46.280044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:46.280125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:46.280287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.280361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:46.281599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.281664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:46.281976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:46.281993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.282004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:46.282015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:46.282022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:46.282049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:46.283760Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.309598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:46.309701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.309792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:46.309805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:46.309854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:46.309873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:46.310871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.310927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:46.311005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.311032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:46.311040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:46.311046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:46.311642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.311658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:46.311665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:46.312118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.312132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.312139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:46.312148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:46.312975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:46.313485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:46.313530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:46.313751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.313800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... 57594046678944, LocalPathId: 3] was 3 2025-06-30T09:22:12.645160Z node 68 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186233409546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=1002;fline=tx_controller.cpp:215;event=finished_tx;tx_id=1002; FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-30T09:22:12.645339Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:12.645348Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:12.645393Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:22:12.645432Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:12.645439Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [68:212:2212], at schemeshard: 72057594046678944, txId: 1002, path id: 1 2025-06-30T09:22:12.645446Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [68:212:2212], at schemeshard: 72057594046678944, txId: 1002, path id: 3 2025-06-30T09:22:12.645531Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-30T09:22:12.645540Z node 68 :FLAT_TX_SCHEMESHARD INFO: create_store.cpp:245: TCreateOlapStore TProposedWaitParts operationId# 1002:0 ProgressState at tablet: 72057594046678944 2025-06-30T09:22:12.645549Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: create_store.cpp:268: TCreateOlapStore TProposedWaitParts operationId# 1002:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-06-30T09:22:12.645767Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-30T09:22:12.645786Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-30T09:22:12.645792Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1002 2025-06-30T09:22:12.645799Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-30T09:22:12.645806Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:12.646109Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-30T09:22:12.646129Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-30T09:22:12.646134Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1002 2025-06-30T09:22:12.646141Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-30T09:22:12.646146Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:22:12.646164Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1002, ready parts: 0/1, is published: true 2025-06-30T09:22:12.647136Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1002:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-30T09:22:12.647351Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-30T09:22:12.647777Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-30T09:22:12.658832Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6237: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1002 2025-06-30T09:22:12.658861Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409546, partId: 0 2025-06-30T09:22:12.658892Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1002 FAKE_COORDINATOR: Erasing txId 1002 2025-06-30T09:22:12.659408Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-30T09:22:12.659449Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-30T09:22:12.659458Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1002:0 ProgressState 2025-06-30T09:22:12.659475Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1002:0 progress is 1/1 2025-06-30T09:22:12.659481Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-30T09:22:12.659487Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1002:0 progress is 1/1 2025-06-30T09:22:12.659491Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-30T09:22:12.659497Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1002, ready parts: 1/1, is published: true 2025-06-30T09:22:12.659513Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [68:366:2342] message: TxId: 1002 2025-06-30T09:22:12.659522Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-30T09:22:12.659529Z node 68 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1002:0 2025-06-30T09:22:12.659534Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1002:0 2025-06-30T09:22:12.659574Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:22:12.660036Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-30T09:22:12.660052Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [68:367:2343] TestWaitNotification: OK eventTxId 1002 2025-06-30T09:22:12.660165Z node 68 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:12.660230Z node 68 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore" took 74us result status StatusSuccess 2025-06-30T09:22:12.660427Z node 68 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/OlapStore" PathDescription { Self { Name: "OlapStore" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnStore CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnStoreVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnStoreDescription { Name: "OlapStore" ColumnShardCount: 1 ColumnShards: 72075186233409546 SchemaPresets { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } } Columns { Id: 2 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } NextColumnFamilyId: 1 } } NextSchemaPresetId: 2 NextTtlSettingsPresetId: 1 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoDisableNestedGroupsGood ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::AggregateEmptyCountStar [GOOD] Test command err: Trying to start YDB, gRPC: 15921, MsgBus: 14177 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e8e/r3tmp/tmp9NSUaj/pdisk_1.dat 2025-06-30T09:22:07.869065Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:07.911394Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670259139748322:2080] 1751275327715508 != 1751275327715511 2025-06-30T09:22:07.912488Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15921, node 1 2025-06-30T09:22:07.947243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:07.947279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:07.951268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:07.951614Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:07.951617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:07.951620Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:07.951670Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14177 TClient is connected to server localhost:14177 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:08.176764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:08.183292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:08.191760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.311926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:08.391860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.438011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:08.602641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670263434717246:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.602677Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.675006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.687825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.701440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.716246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.731916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.743775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.753280Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:08.763519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:08.786901Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670263434717899:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.786929Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.786971Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670263434717904:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:08.787937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:08.790578Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670263434717906:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:08.858508Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670263434717957:3407] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:09.307667Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275329160, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 8105, MsgBus: 16231 2025-06-30T09:22:09.680866Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670268161285019:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:09.682754Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e8e/r3tmp/tmpiZFUdv/pdisk_1.dat 2025-06-30T09:22:09.713165Z node 2 :IMPORT WARN: schemeshard_import.cpp:305 ... /core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:10.683038Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:10.878781Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275330791, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 5246, MsgBus: 19651 2025-06-30T09:22:11.314527Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670273705502173:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:11.314863Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e8e/r3tmp/tmpCMVpHV/pdisk_1.dat 2025-06-30T09:22:11.389320Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:11.390836Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670273705502031:2080] 1751275331312440 != 1751275331312443 TServer::EnableGrpc on GrpcPort 5246, node 3 2025-06-30T09:22:11.414825Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:11.414840Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:11.414844Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:11.414896Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19651 2025-06-30T09:22:11.441165Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:11.441198Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:11.442192Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19651 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:11.487994Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:11.494183Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.515640Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.542543Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.556575Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.863224Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670273705503645:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:11.863272Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:11.872976Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.948114Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.965481Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.981377Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.039360Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.065512Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.097088Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.134831Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670278000471600:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.134875Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.135101Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670278000471605:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.136355Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:12.139639Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:12.139678Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670278000471607:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:12.228379Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670278000471658:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:12.315004Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:12.530992Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275332492, txId: 281474976715672] shutting down >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoDisableNestedGroupsGood ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestALterResourcePool [GOOD] Test command err: Trying to start YDB, gRPC: 15734, MsgBus: 23543 2025-06-30T09:22:11.099761Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670273386887513:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:11.099800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0035a0/r3tmp/tmp46X8P2/pdisk_1.dat 2025-06-30T09:22:11.190725Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:11.191004Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670273386887490:2080] 1751275331099355 != 1751275331099358 TServer::EnableGrpc on GrpcPort 15734, node 1 2025-06-30T09:22:11.261845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:11.261878Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:11.267237Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:11.267601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:11.267604Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:11.267607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:11.267662Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23543 TClient is connected to server localhost:23543 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:11.395765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:11.400340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:11.408990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:22:11.413473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:352) Trying to start YDB, gRPC: 29162, MsgBus: 12094 2025-06-30T09:22:11.935295Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670273236774141:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:11.935505Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0035a0/r3tmp/tmp3ATYIX/pdisk_1.dat 2025-06-30T09:22:11.980756Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:11.982871Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670273236774120:2080] 1751275331935061 != 1751275331935064 TServer::EnableGrpc on GrpcPort 29162, node 2 2025-06-30T09:22:11.997194Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:11.997210Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:11.997213Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:11.997271Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12094 2025-06-30T09:22:12.051279Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:12.051316Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:12.055410Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12094 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:12.103491Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:12.107566Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:12.118077Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) Trying to start YDB, gRPC: 31193, MsgBus: 4001 2025-06-30T09:22:12.521994Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670281464085309:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:12.522038Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0035a0/r3tmp/tmpOLlptr/pdisk_1.dat 2025-06-30T09:22:12.537631Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:12.538044Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670281464085289:2080] 1751275332521869 != 1751275332521872 TServer::EnableGrpc on GrpcPort 31193, node 3 2025-06-30T09:22:12.547474Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:12.547486Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:12.547488Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:12.547532Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4001 TClient is connected to server localhost:4001 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:12.626519Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:12.626567Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:12.626948Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:12.627692Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:22:12.628526Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:12.641552Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:12.648929Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterResourcePool, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp:155) >> KqpSplit::AfterResultMultiRange+Descending [GOOD] >> KikimrIcGateway::TestLoadBasicSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestLoadAwsSecretValueFromExternalDataSourceMetadata >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoDisableNestedGroupsGood >> KikimrIcGateway::TestDropTable [GOOD] >> KikimrIcGateway::TestDropResourcePool |81.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |81.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> VectorIndexBuildTest::PrefixedDuplicates |81.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpSplit::AfterResultMultiRange+Descending [GOOD] Test command err: Trying to start YDB, gRPC: 20537, MsgBus: 25165 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e7f/r3tmp/tmp6P6FJ2/pdisk_1.dat 2025-06-30T09:22:10.281629Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:10.304659Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:10.307728Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670270313367644:2080] 1751275330233649 != 1751275330233652 TServer::EnableGrpc on GrpcPort 20537, node 1 2025-06-30T09:22:10.323100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:10.323113Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:10.323115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:10.323161Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25165 2025-06-30T09:22:10.380620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:10.380649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:25165 2025-06-30T09:22:10.383112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:10.404319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:10.414810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:10.484619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.516799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:10.543959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:10.667309Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670270313369244:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.667338Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.719297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.728117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.736216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.751058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.766453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.785678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.842981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.867996Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670270313369904:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.868026Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.868166Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670270313369909:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.869523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:10.874146Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670270313369911:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:10.974383Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670270313369962:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:11.232368Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:11.302124Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521670274608337559:2468] TxId: 281474976715673. Ctx: { TraceId: 01jz029qkq2w2k48f3fgrdy9h5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGVlNzAxYzAtYzVlZjVkYi04NmEyOGQ0YS02YzQ5ZDJiNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database 2025-06-30T09:22:11.302255Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jz029qkq2w2k48f3fgrdy9h5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGVlNzAxYzAtYzVlZjVkYi04NmEyOGQ0YS02YzQ5ZDJiNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 captured evreadresult --------------- ... iscarding snapshot; our snapshot: [step: 1751275331344, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 10109, MsgBus: 2378 2025-06-30T09:22:12.217702Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670279983764580:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:12.217717Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e7f/r3tmp/tmpbAEUet/pdisk_1.dat 2025-06-30T09:22:12.237112Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10109, node 2 2025-06-30T09:22:12.247614Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:12.247628Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:12.247633Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:12.247676Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2378 TClient is connected to server localhost:2378 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:12.322117Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:12.322150Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:12.322619Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:12.323337Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:12.326705Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:12.341508Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:12.365848Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:12.378459Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:12.652550Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670279983766165:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.652575Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.662257Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.671552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.692616Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.701134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.712964Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.725525Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.739371Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.759364Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670279983766817:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.759409Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670279983766822:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.759409Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.760294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:12.765577Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670279983766824:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:12.844416Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670279983766875:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:13.049278Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jz029sbva1j8wn89nkv5jq0p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjM4ZmIzMTktYjVjOWYzZjItZDQ0M2Q1ZWQtNjlhZGFi, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 captured evreadresult ----------------------------------------------------------- resume evread ----------------------------------------------------------- 2025-06-30T09:22:13.219429Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:13.511507Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275333094, txId: 281474976715672] shutting down >> TDataShardTrace::TestTraceDistributedUpsert+UseSink >> KqpScan::FullFrameWindow [GOOD] >> KqpScan::EmptySet >> TDataShardTrace::TestTraceDistributedUpsert-UseSink >> TDataShardTrace::TestTraceDistributedSelectViaReadActors >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap [GOOD] >> TDataShardTrace::TestTraceWriteImmediateOnShard >> KikimrIcGateway::TestDropResourcePool [GOOD] >> YdbIndexTable::MultiShardTableUniqAndNonUniqIndex [GOOD] >> YdbIndexTable::MultiShardTableTwoIndexes >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata >> VectorIndexBuildTest::TTxReply_DoExecute_Throws >> TDataShardTrace::TestTraceDistributedSelect >> IndexBuildTest::RejectsCreate >> SystemView::ShowCreateTablePartitionAtKeys >> RetryPolicy::RetryWithBatching [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestDropResourcePool [GOOD] Test command err: Trying to start YDB, gRPC: 1818, MsgBus: 15251 2025-06-30T09:22:12.223964Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670279873867489:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:12.223981Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003516/r3tmp/tmpSIj6K6/pdisk_1.dat 2025-06-30T09:22:12.293155Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1818, node 1 2025-06-30T09:22:12.305391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:12.305407Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:12.305410Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:12.305462Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15251 TClient is connected to server localhost:15251 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:22:12.371972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:12.372009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:12.373253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:12.379187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:12.628942Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670279873868129:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.628978Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.679777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.742527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.751594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.761590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.823889Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670279873868443:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.823916Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670279873868448:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.823924Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.824694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:12.829151Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670279873868450:2328], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715664 completed, doublechecking } 2025-06-30T09:22:12.881351Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670279873868501:2556] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 24364, MsgBus: 20716 2025-06-30T09:22:13.151803Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670284959643535:2178];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:13.151879Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003516/r3tmp/tmpsp0r1K/pdisk_1.dat 2025-06-30T09:22:13.172854Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24364, node 2 2025-06-30T09:22:13.178808Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670284959643366:2080] 1751275333149532 != 1751275333149535 2025-06-30T09:22:13.183974Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:13.183988Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:13.183994Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:13.184051Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20716 TClient is connected to server localhost:20716 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:13.255718Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:13.255758Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:13.256278Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:13.256929Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:22:13.261418Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:13.272771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:22:13.275926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-30T09:22:13.600434Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670284959644032:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:13.600474Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:13.613767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:13.633267Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:13.643134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:13.656282Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:13.694121Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670284959644342:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:13.694164Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:13.698860Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670284959644347:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:13.700082Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:13.703610Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715664, at schemeshard: 72057594046644480 2025-06-30T09:22:13.703738Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670284959644349:2328], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715664 completed, doublechecking } 2025-06-30T09:22:13.757979Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670284959644400:2554] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 }
: Info: Success, code: 4 2025-06-30T09:22:13.809666Z node 2 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found Trying to start YDB, gRPC: 13115, MsgBus: 4448 2025-06-30T09:22:14.086291Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670288310564103:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:14.086315Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003516/r3tmp/tmpovWauv/pdisk_1.dat 2025-06-30T09:22:14.106078Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13115, node 3 2025-06-30T09:22:14.125915Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:14.125933Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:14.125936Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:14.125999Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4448 TClient is connected to server localhost:4448 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:14.191406Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:14.191459Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:14.191901Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:14.192377Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:22:14.197459Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 61152, MsgBus: 19453 2025-06-30T09:21:40.398777Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670143615456200:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:40.399878Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003421/r3tmp/tmps15lwF/pdisk_1.dat 2025-06-30T09:21:40.455220Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:40.457121Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670143615456041:2080] 1751275300394146 != 1751275300394149 TServer::EnableGrpc on GrpcPort 61152, node 1 2025-06-30T09:21:40.491007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:40.491026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:40.491029Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:40.491086Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:40.493264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:40.493299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:40.495328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19453 TClient is connected to server localhost:19453 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:40.583815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:40.591784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:40.602646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:40.641952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:40.713820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:40.730240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:40.947497Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670143615457650:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:40.947535Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:41.010703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:41.070173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:41.085125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:41.105374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:41.143925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:41.169222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:41.195126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:41.235089Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670147910425601:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:41.235128Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:41.235393Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670147910425606:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:41.236456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:41.249934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:41.250100Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670147910425608:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:41.337997Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670147910425659:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:41.394016Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:41.607802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:41.951026Z node 1 :KQP_EXECUTER ERROR: kqp_ ... zRiZDgtY2NhYmM2NGMtYWE5YzU0ZDYtNjYzYzI5MDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.878823Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719386. Ctx: { TraceId: 01jz029t7a8x3jgbj5sp5f1fxg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzZhYjViZTctMTdmZmU2YzQtYjFkOTQ0NWMtYWYxOThhMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.881213Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719387. Ctx: { TraceId: 01jz029t7q3pwfamt2vvbt0w6z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjE1MDJmMmUtMzg3ZjUxZTctYzQ1OTU1YmYtZDdiOTYzNjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.882431Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719388. Ctx: { TraceId: 01jz029t7qahh5qt5rfbakqkyc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzhhOTUzNWItZDZmMjMwYzMtYjhlMmEyMGQtMjVlNjMyMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.884151Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719389. Ctx: { TraceId: 01jz029t7p578kpqzheh0c4td7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=M2EyZDc2OWItZGQ4MmRmYmUtNTNjOWNkZjAtZDVhOTNiMjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.887523Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719390. Ctx: { TraceId: 01jz029t7p578kpqzheh0c4td7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=M2EyZDc2OWItZGQ4MmRmYmUtNTNjOWNkZjAtZDVhOTNiMjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.894998Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719391. Ctx: { TraceId: 01jz029t841qgmsp8cxapgm5s7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODU5YmE2MWUtOWY1NGEyOTctM2ZjNGY3MWUtMmI3N2M4Njg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.896790Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719392. Ctx: { TraceId: 01jz029t841qgmsp8cxapgm5s7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODU5YmE2MWUtOWY1NGEyOTctM2ZjNGY3MWUtMmI3N2M4Njg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.901597Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719393. Ctx: { TraceId: 01jz029t8b9em3e0hn53zweqmv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzAzNzRiZDgtY2NhYmM2NGMtYWE5YzU0ZDYtNjYzYzI5MDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.901784Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719394. Ctx: { TraceId: 01jz029t8c8j2w66jj50j5ab9d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjE1MDJmMmUtMzg3ZjUxZTctYzQ1OTU1YmYtZDdiOTYzNjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.906183Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719395. Ctx: { TraceId: 01jz029t8c0fb6e2syrc49wmm2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzZhYjViZTctMTdmZmU2YzQtYjFkOTQ0NWMtYWYxOThhMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.906378Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719396. Ctx: { TraceId: 01jz029t8c3dbstrdjzfs81rpw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzhhOTUzNWItZDZmMjMwYzMtYjhlMmEyMGQtMjVlNjMyMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.909422Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719397. Ctx: { TraceId: 01jz029t8c3dbstrdjzfs81rpw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzhhOTUzNWItZDZmMjMwYzMtYjhlMmEyMGQtMjVlNjMyMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.911414Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719398. Ctx: { TraceId: 01jz029t8c0fb6e2syrc49wmm2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzZhYjViZTctMTdmZmU2YzQtYjFkOTQ0NWMtYWYxOThhMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.918330Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719399. Ctx: { TraceId: 01jz029t8w89qdgwhqq63fq2nn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzAzNzRiZDgtY2NhYmM2NGMtYWE5YzU0ZDYtNjYzYzI5MDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.918799Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719401. Ctx: { TraceId: 01jz029t8v2j3adwjgw7gmyxh4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODU5YmE2MWUtOWY1NGEyOTctM2ZjNGY3MWUtMmI3N2M4Njg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.919557Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719400. Ctx: { TraceId: 01jz029t8vc6psbgmdvn10c1nh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=M2EyZDc2OWItZGQ4MmRmYmUtNTNjOWNkZjAtZDVhOTNiMjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.923680Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719402. Ctx: { TraceId: 01jz029t8v2j3adwjgw7gmyxh4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODU5YmE2MWUtOWY1NGEyOTctM2ZjNGY3MWUtMmI3N2M4Njg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.931576Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719403. Ctx: { TraceId: 01jz029t99avjwm5wa6f1cfv54, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzZhYjViZTctMTdmZmU2YzQtYjFkOTQ0NWMtYWYxOThhMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.932106Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719404. Ctx: { TraceId: 01jz029t960dyhmmj56dfvg740, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjI4YjlhMmYtODZiOTg3ZWQtZDVlYjU0NmMtZDIzNjI2NDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.932257Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719405. Ctx: { TraceId: 01jz029t967tdhkhnjyphym523, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzhhOTUzNWItZDZmMjMwYzMtYjhlMmEyMGQtMjVlNjMyMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.934924Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719406. Ctx: { TraceId: 01jz029t967tdhkhnjyphym523, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzhhOTUzNWItZDZmMjMwYzMtYjhlMmEyMGQtMjVlNjMyMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.936249Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719407. Ctx: { TraceId: 01jz029t960dyhmmj56dfvg740, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjI4YjlhMmYtODZiOTg3ZWQtZDVlYjU0NmMtZDIzNjI2NDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.936892Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719408. Ctx: { TraceId: 01jz029t967tdhkhnjyphym523, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzhhOTUzNWItZDZmMjMwYzMtYjhlMmEyMGQtMjVlNjMyMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.938212Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719409. Ctx: { TraceId: 01jz029t9d7b7nkyzj7evbmakr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODU5YmE2MWUtOWY1NGEyOTctM2ZjNGY3MWUtMmI3N2M4Njg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.938259Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719410. Ctx: { TraceId: 01jz029t9d8d2j2ycmxzcbdnt6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzAzNzRiZDgtY2NhYmM2NGMtYWE5YzU0ZDYtNjYzYzI5MDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.939944Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719411. Ctx: { TraceId: 01jz029t9d7b7nkyzj7evbmakr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODU5YmE2MWUtOWY1NGEyOTctM2ZjNGY3MWUtMmI3N2M4Njg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.940466Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719412. Ctx: { TraceId: 01jz029t9d8d2j2ycmxzcbdnt6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzAzNzRiZDgtY2NhYmM2NGMtYWE5YzU0ZDYtNjYzYzI5MDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.940912Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719413. Ctx: { TraceId: 01jz029t9d7b7nkyzj7evbmakr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODU5YmE2MWUtOWY1NGEyOTctM2ZjNGY3MWUtMmI3N2M4Njg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.943911Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719414. Ctx: { TraceId: 01jz029t9m5wn4qtqdsvacyafe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=M2EyZDc2OWItZGQ4MmRmYmUtNTNjOWNkZjAtZDVhOTNiMjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-30T09:22:13.945467Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719415. Ctx: { TraceId: 01jz029t9m5wn4qtqdsvacyafe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=M2EyZDc2OWItZGQ4MmRmYmUtNTNjOWNkZjAtZDVhOTNiMjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-30T09:22:13.949816Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719417. Ctx: { TraceId: 01jz029t9vfaj6dvvefxz65hna, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjE1MDJmMmUtMzg3ZjUxZTctYzQ1OTU1YmYtZDdiOTYzNjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-30T09:22:13.950017Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719416. Ctx: { TraceId: 01jz029t9v9jc895h1sbsymn9a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzZhYjViZTctMTdmZmU2YzQtYjFkOTQ0NWMtYWYxOThhMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:13.950873Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719418. Ctx: { TraceId: 01jz029t9vfaj6dvvefxz65hna, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjE1MDJmMmUtMzg3ZjUxZTctYzQ1OTU1YmYtZDdiOTYzNjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS >> SystemView::AuthGroups_Access >> KqpScan::EmptySet [GOOD] >> SystemView::CollectPreparedQueries >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestSecretsExistingValidation >> SystemView::StoragePoolsRanges >> SystemView::PartitionStatsOneSchemeShard >> VectorIndexBuildTest::TTxReply_DoExecute_Throws [GOOD] >> VectorIndexBuildTest::TTxProgress_Throws >> IndexBuildTest::RejectsCreate [GOOD] >> IndexBuildTest::RejectsDropIndex ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::EmptySet [GOOD] Test command err: Trying to start YDB, gRPC: 14415, MsgBus: 9588 2025-06-30T09:22:09.915974Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670265605741256:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:09.916817Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e88/r3tmp/tmp56opbO/pdisk_1.dat 2025-06-30T09:22:10.013881Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14415, node 1 2025-06-30T09:22:10.045833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:10.045847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:10.045849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:10.045896Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9588 2025-06-30T09:22:10.087280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:10.087314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:10.095192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9588 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:10.218040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:10.223335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:10.239634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:10.288911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:10.332063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:10.397648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:10.569783Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670269900710110:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.569815Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.634658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.648671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.659759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.675973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.693462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.703537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.717509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:10.786469Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670269900710770:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.786501Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.786844Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670269900710775:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:10.787975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:10.792839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:10.793036Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670269900710777:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:10.863359Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670269900710828:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:10.917370Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:11.097557Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275331134, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 7002, MsgBus: 5063 2025-06-30T09:22:11.575332Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670274408871151:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:11.577131Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cach ... e 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670278703840740:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:12.582809Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:14.079084Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275332772, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 8187, MsgBus: 2031 2025-06-30T09:22:14.413639Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670287046746350:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:14.414418Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e88/r3tmp/tmpqwqx6i/pdisk_1.dat 2025-06-30T09:22:14.427896Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:14.428056Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670287046746249:2080] 1751275334412366 != 1751275334412369 TServer::EnableGrpc on GrpcPort 8187, node 3 2025-06-30T09:22:14.441211Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:14.441229Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:14.441232Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:14.441285Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2031 TClient is connected to server localhost:2031 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:14.517912Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:14.517963Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:14.518339Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:14.518913Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:14.523644Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:14.535317Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:14.559723Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:14.572008Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:14.821570Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670287046747845:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:14.821608Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:14.824363Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:14.832936Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:14.846253Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:14.861609Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:14.874388Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:14.887885Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:14.904248Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:14.919668Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670287046748496:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:14.919700Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:14.919778Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670287046748501:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:14.920575Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:14.930170Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670287046748503:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:14.999517Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670287046748554:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:15.157276Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275335156, txId: 281474976715672] shutting down >> IndexBuildTest::RejectsDropIndex [GOOD] >> IndexBuildTest::RejectsCancel >> KikimrIcGateway::TestLoadAwsSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestLoadDataSourceProperties >> SystemView::VSlotsFields ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> RetryPolicy::RetryWithBatching [GOOD] Test command err: 2025-06-30T09:18:09.339635Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:09.339645Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:09.339649Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-30T09:18:09.339765Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-30T09:18:09.339780Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:09.339784Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:09.339806Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008959s 2025-06-30T09:18:09.339946Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-30T09:18:09.339957Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:09.339960Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:09.339977Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008924s 2025-06-30T09:18:09.340067Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-30T09:18:09.340073Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:09.340076Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-30T09:18:09.340089Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.006244s 2025-06-30T09:18:09.412058Z :TWriteSession_TestPolicy INFO: Random seed for debugging is 1751275089412050 2025-06-30T09:18:09.535118Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669235973068287:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:09.535143Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:18:09.539192Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521669236902132206:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:18:09.539215Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000f9f/r3tmp/tmpqJxrih/pdisk_1.dat 2025-06-30T09:18:09.577988Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:09.585504Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:18:09.606021Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:18:09.616668Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 28976, node 1 2025-06-30T09:18:09.629736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/000f9f/r3tmp/yandexxPDiAL.tmp 2025-06-30T09:18:09.629748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/000f9f/r3tmp/yandexxPDiAL.tmp 2025-06-30T09:18:09.629811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/000f9f/r3tmp/yandexxPDiAL.tmp 2025-06-30T09:18:09.629858Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:18:09.635396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:09.635423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:18:09.635636Z INFO: TTestServer started on Port 2166 GrpcPort 28976 2025-06-30T09:18:09.637026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2166 PQClient connected to localhost:28976 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:18:09.678330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:18:09.678353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:18:09.679918Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:18:09.680249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:18:09.683475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-30T09:18:09.996873Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669236902132432:2267], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:09.996899Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521669236902132479:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:09.996915Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:18:10.000008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:18:10.007581Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521669236902132484:2272], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-30T09:18:10.006797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-30T09:18:10.088096Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521669241197099808:2134] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:18:10.160100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:18:10.163523Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521669241197099815:2276], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:10.164933Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=NTUzMzEzMTktYWMyOWE1MmYtZGZiNGFjODYtYTA0ZjZmMTI=, ActorId: [2:7521669236902132428:2265], ActorState: ExecuteState, TraceId: 01jz022c2970hes4hn7daepe8q, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:18:10.165057Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:18:10.163881Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669240268036542:2303], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:18:10.164425Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=OGU5NjA1N2EtNDQ2MGQ1YjQtNjBiYjIwNjAtNzMwMTdjYjg=, ActorId: [1:7521669240268036488:2293], ActorState: ExecuteState, TraceId: 01jz022c624yejzn8jmqk2s7g8, ReplyQueryCompileError, status SCHEME_ERROR remove tx ... ng for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-06-30T09:22:14.549909Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:22:14.549916Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2025-06-30T09:22:14.549920Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:22:14.549930Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-30T09:22:14.549934Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:22:14.549939Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 3 is stored on disk 2025-06-30T09:22:14.549943Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:22:14.549948Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 5, partNo: 0, Offset: 4 is stored on disk 2025-06-30T09:22:14.549951Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:22:14.549958Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 6, partNo: 0, Offset: 5 is stored on disk 2025-06-30T09:22:14.549961Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:22:14.549967Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 6 is stored on disk 2025-06-30T09:22:14.549970Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:22:14.549975Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 7 is stored on disk 2025-06-30T09:22:14.549978Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:22:14.549984Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 8 is stored on disk 2025-06-30T09:22:14.549987Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-30T09:22:14.549992Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 9 is stored on disk 2025-06-30T09:22:14.549995Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-30T09:22:14.550013Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-30T09:22:14.550035Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:22:14.550041Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-30T09:22:14.550341Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session got write response: sequence_numbers: 1 sequence_numbers: 2 sequence_numbers: 3 sequence_numbers: 4 sequence_numbers: 5 sequence_numbers: 6 sequence_numbers: 7 sequence_numbers: 8 sequence_numbers: 9 sequence_numbers: 10 offsets: 0 offsets: 1 offsets: 2 offsets: 3 offsets: 4 offsets: 5 offsets: 6 offsets: 7 offsets: 8 offsets: 9 already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false write_statistics { persist_duration_ms: 1 } 2025-06-30T09:22:14.550376Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: acknoledged message 1 2025-06-30T09:22:14.550055Z node 17 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=1208, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:22:14.550389Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: acknoledged message 2 2025-06-30T09:22:14.550087Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 Topic 'rt3.dc1--test-topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 10 max time lag 0ms effective offset 0 2025-06-30T09:22:14.550106Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 1208 count 10 last offset 0, current partition end offset: 10 2025-06-30T09:22:14.550111Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-30T09:22:14.550123Z node 17 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 10 parts_count 0 source 1 size 1208 accessed 0 times before, last time 2025-06-30T09:22:14.000000Z 2025-06-30T09:22:14.550446Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: acknoledged message 3 2025-06-30T09:22:14.550133Z node 17 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-30T09:22:14.550455Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: acknoledged message 4 2025-06-30T09:22:14.550459Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: acknoledged message 5 2025-06-30T09:22:14.550147Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-30T09:22:14.550462Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: acknoledged message 6 2025-06-30T09:22:14.550186Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 10 count 10 size 1188 from pos 0 cbcount 10 2025-06-30T09:22:14.550467Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: acknoledged message 7 2025-06-30T09:22:14.550479Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: acknoledged message 8 2025-06-30T09:22:14.550209Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp done, result 1751275334547 queuesize 0 startOffset 0 2025-06-30T09:22:14.550485Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: acknoledged message 9 2025-06-30T09:22:14.550329Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 10 parts 0 suffix '63' 2025-06-30T09:22:14.550488Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: acknoledged message 10 2025-06-30T09:22:14.550610Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: close. Timeout = 0 ms 2025-06-30T09:22:14.550619Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session will now close 2025-06-30T09:22:14.550624Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: aborting 2025-06-30T09:22:14.550794Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: gracefully shut down, all writes complete 2025-06-30T09:22:14.550804Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0] Write session: destroy 2025-06-30T09:22:14.550964Z node 17 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0 grpc read done: success: 0 data: 2025-06-30T09:22:14.550974Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0 grpc read failed 2025-06-30T09:22:14.550982Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0 grpc closed 2025-06-30T09:22:14.550987Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message-group-id|1fb99619-fb7c14b9-19b6a334-bcc76b6b_0 is DEAD 2025-06-30T09:22:14.551284Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:22:14.551342Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [17:7521670286989354131:2607] destroyed 2025-06-30T09:22:14.551353Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. >> SystemView::CollectPreparedQueries [GOOD] >> SystemView::CollectScanQueries >> ShowCreateView::WithTablePathPrefix >> KikimrIcGateway::TestSecretsExistingValidation [GOOD] >> VectorIndexBuildTest::TTxProgress_Throws [GOOD] >> VectorIndexBuildTest::TTxInit_Throws >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestSecretsExistingValidation [GOOD] Test command err: Trying to start YDB, gRPC: 6560, MsgBus: 9995 2025-06-30T09:22:12.010720Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670280396217128:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:12.010797Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003536/r3tmp/tmpcedntV/pdisk_1.dat 2025-06-30T09:22:12.131649Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:12.132089Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670280396216947:2080] 1751275332007870 != 1751275332007873 TServer::EnableGrpc on GrpcPort 6560, node 1 2025-06-30T09:22:12.146750Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:12.146766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:12.146769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:12.146822Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9995 TClient is connected to server localhost:9995 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:22:12.203709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:12.203745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:12.206580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:12.215144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:12.217844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:22:12.524267Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670280396217613:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.524297Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.585575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.658224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.669809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.684122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.699479Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670280396217927:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.699516Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.699520Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670280396217932:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.700428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:12.710447Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670280396217934:2328], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2025-06-30T09:22:12.800643Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670280396217985:2556] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 8317, MsgBus: 10050 2025-06-30T09:22:13.149188Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670282600851393:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:13.149229Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003536/r3tmp/tmpPxoTIO/pdisk_1.dat 2025-06-30T09:22:13.165330Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:13.165815Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670282600851360:2080] 1751275333148975 != 1751275333148978 TServer::EnableGrpc on GrpcPort 8317, node 2 2025-06-30T09:22:13.179007Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:13.179022Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:13.179025Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:13.179079Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10050 TClient is connected to server localhost:10050 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:13.251149Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:13.251185Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:13.251687Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:13.252345Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:13.258760Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, a ... RD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:22:15.032809Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) Trying to start YDB, gRPC: 23696, MsgBus: 29569 2025-06-30T09:22:15.358076Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670290898294745:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:15.358112Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003536/r3tmp/tmpQ6HQG7/pdisk_1.dat 2025-06-30T09:22:15.374472Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:15.374725Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670290898294725:2080] 1751275335357968 != 1751275335357971 TServer::EnableGrpc on GrpcPort 23696, node 3 2025-06-30T09:22:15.381544Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:15.381558Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:15.381560Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:15.381615Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29569 TClient is connected to server localhost:29569 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:15.467500Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:15.467538Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:15.468006Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:15.468587Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:22:15.475749Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:15.484222Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:15.512161Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:15.549037Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:15.612256Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:15.786314Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670290898296323:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.786355Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.791550Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.806874Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.862723Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.874597Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.888996Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.903388Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.917158Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.933128Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670290898296980:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.933148Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670290898296985:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.933160Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.933903Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:15.936324Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670290898296987:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:15.998870Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670290898297038:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata [GOOD] >> SystemView::ShowCreateTableDefaultLiteral >> SystemView::CollectScanQueries [GOOD] >> SystemView::AuthUsers >> TDataShardTrace::TestTraceWriteImmediateOnShard [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:21:54.366129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:54.366160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:54.366167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:54.366173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:54.366188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:54.366193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:54.366205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:54.366233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:54.366345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:54.366429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:54.382820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:21:54.382851Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:54.387347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:54.387430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:54.387466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:54.396544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:54.396650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:54.396799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:54.396910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:54.397738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:54.397806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:54.398058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:54.398065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:54.398085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:54.398094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:54.398098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:54.398122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.401970Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:21:54.423875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:54.423985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.424070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:54.424080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:54.424133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:54.424149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:54.425196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:54.425257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:54.425334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.425347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:54.425354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:54.425361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:54.426010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.426028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:54.426040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:54.426541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.426557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:54.426565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:54.426573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:54.427348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:54.427954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:54.428009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:54.428241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:54.428278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:21:54.428287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:54.428358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:21:54.428367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:54.428406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:21:54.428420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:21:54.428919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:54.428930Z node 1 :FLAT_TX_SCHEMESHARD ... at schemeshard: 72057594046678944 2025-06-30T09:22:14.395535Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395545Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395595Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395605Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-30T09:22:14.395628Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395636Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395642Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395652Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395658Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395669Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395692Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395701Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395741Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395750Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395769Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395779Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395793Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395820Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395827Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395841Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395862Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395868Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4706: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395872Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4765: KMeansTreeCluster records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395883Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4854: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395887Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4881: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.395891Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4908: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.397526Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:14.398210Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:14.398234Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:14.398277Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:14.398289Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:14.398296Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:14.398865Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [5:382:2349] sender: [5:441:2058] recipient: [5:15:2062] 2025-06-30T09:22:14.450757Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-30T09:22:14.450783Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:101: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-30T09:22:14.543173Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:96: TTxLogin Complete, with error: User user1 login denied: too many failed password attempts, at schemeshard: 72057594046678944 2025-06-30T09:22:14.543219Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:14.543228Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:14.543285Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:14.543292Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:432:2388], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-30T09:22:14.543451Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 0 2025-06-30T09:22:16.544414Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-30T09:22:16.547810Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login_finalize.cpp:28: TTxLoginFinalize Execute at schemeshard: 72057594046678944 2025-06-30T09:22:16.563103Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:96: TTxLogin Complete, with no errors, at schemeshard: 72057594046678944 2025-06-30T09:22:16.563306Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login_finalize.cpp:57: TTxLoginFinalize Completed, with no errors at schemeshard: 72057594046678944 2025-06-30T09:22:16.563496Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:16.563571Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 90us result status StatusSuccess 2025-06-30T09:22:16.563742Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1hKYkyie8S5Ssepb2vpB\nX0yBfStA1mZIqT8GA0N7MWeNtmBJuYlHcqTIJIc0h+TnEcDqKxEH9drVx/QwC/gi\nQAh4ZqSs7/3eg8nANGCB3Azyxfyx9DHGh0CLOxuIaMRm2FS0uclJB1PuDxbgRjZA\ntaX0fKpiElNjmMF1m5lf5MrBAkFVdi7bpkGabz9wHVUGHGZHtl7NxbT012wg9WZ+\n2hyBFv9OyD8n3Lua4Ex+bTXVkuabtHPWW/frX89Ts1MUbqCs2fZYmD9d1Vuh8QAl\njxpSfozcqR7sTf0Uds8DiWtzOMTnCSM6XTHOrJ06tWpzwNeoiqlgf9V4VtP+AIAw\nAQIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1751361732276 } PublicKeys { KeyId: 2 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwPzBnpDv+scs6xZtYiBh\nQIsQSWOO+75+RgyvGb5Cd0NOf1gzMS9asm7MzZV2KZVi758P4aJUjqnxhkD2wlrr\nevEPUyzvQW9Ia3jShB0CqpQq86hBzJRUvxDZTxTomFqxREOSonZZtUmKCHUk5Pwa\nsD9DjpRVTqxhB/btrqg+Y5hECGsF+bkzodQNwvGv9zoeZV1hr1ZUrKrrO/8EDOrm\n+bcLCqJmvAdtq063YjM0KsYK0oIuX5t8W+GwzIj8Sv8ibBxaaw1dAc2nGAHfEQQC\nig67/cco6mD9oEfyn6898EFwMd2LNqt5w/qmBFvwTYkWCWhnabPvjZHAnRnbRxnV\n+QIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1751361732376 } PublicKeys { KeyId: 3 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyrt5CvVqjSk1K8IgzApf\nygFxmixx8qfbP16DzhD2N3sn2LKqosiK4mrbrAJGCsY5TT5TKvknq0uWW2P4vqbE\nzxvLZmkRD9KH+SgAlvRcO0Bu6UKGGVuKrjWMdt0nleqWB3PCgUQ0J0L8hJjFhG6H\njS4wsEH7AQFTjcgMmwYvmJnCh4I5PG2jMY0gu68sUgI61YuMxLKb3ai1rqCh6oXD\n/yaSSDd9uNhO+BokK5LHxxNfTdqQ9kWME7t2NaFHpw7pGUVLWLLvhi9c/hhnM/hT\nWp+PGMpYYotr4PD4iieC7COTChhRzw3EtfzSWVD850td6XeqksJGojs0frZfJhrK\nTQIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1751361734542 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TDataShardTrace::TestTraceDistributedUpsert+UseSink [GOOD] >> TDataShardTrace::TestTraceDistributedSelectViaReadActors [GOOD] >> TDataShardTrace::TestTraceDistributedUpsert-UseSink [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata [GOOD] Test command err: Trying to start YDB, gRPC: 18947, MsgBus: 14763 2025-06-30T09:22:11.026474Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670277215122427:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:11.028234Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003587/r3tmp/tmpapgnLz/pdisk_1.dat 2025-06-30T09:22:11.126973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:11.127002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:11.128103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:11.128841Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18947, node 1 2025-06-30T09:22:11.189827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:11.189856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:11.189859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:11.189917Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14763 TClient is connected to server localhost:14763 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:11.315916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:11.325491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:11.340262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.418587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.458139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.473207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.614853Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670277215123996:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:11.614884Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:11.678439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.691971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.750383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.760633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.775428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.788433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.813970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.892601Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670277215124658:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:11.892625Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:11.892769Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670277215124663:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:11.893965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:11.897032Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670277215124665:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:11.997247Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670277215124716:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:12.027045Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:12.249092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:22:12.251249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table ... } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:14.875285Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:14.888813Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:14.901436Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:14.962360Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:15.021428Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:15.191545Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670293179404453:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.191577Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.201896Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.212381Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.223904Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.238874Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.253879Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.266280Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.280795Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.297025Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670293179405103:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.297061Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670293179405108:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.297066Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.297828Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:15.306552Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670293179405110:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:15.367198Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670293179405161:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:15.761371Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:15.763135Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.863329Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:22:15.927201Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:16.050098Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:16.176581Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:22:16.241652Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:22:16.321623Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:16.338500Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:22:16.865554Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715711:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) >> DbCounters::TabletsSimple ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceWriteImmediateOnShard [GOOD] Test command err: 2025-06-30T09:22:15.015209Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:15.015301Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:15.015320Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0033ae/r3tmp/tmpiHnEUd/pdisk_1.dat 2025-06-30T09:22:15.114117Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:15.115069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:15.132140Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:15.133179Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275334583684 != 1751275334583688 2025-06-30T09:22:15.175494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:15.175539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:15.186173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:15.259361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.514941Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> SystemView::PartitionStatsOneSchemeShard [GOOD] >> SystemView::PartitionStatsOneSchemeShardDataQuery ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedUpsert-UseSink [GOOD] Test command err: 2025-06-30T09:22:14.890986Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:14.891084Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:14.891102Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0033f0/r3tmp/tmp30KDOo/pdisk_1.dat 2025-06-30T09:22:14.997981Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:14.998808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:15.015022Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:15.016223Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275334412411 != 1751275334412415 2025-06-30T09:22:15.058202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:15.058236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:15.068761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:15.142431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.387102Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:16.957219Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:892:2734], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.957257Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:903:2739], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.957273Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.958305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:16.971709Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-30T09:22:17.117930Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:906:2742], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:22:17.155594Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:968:2784] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:17.224286Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz029x7w7tp6y58jx24yt3e5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzVmYjY3OTItNTEyZWYxZi1lZGUwMTViNi1jY2ZlNjllMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Trace: (Session.query.QUERY_ACTION_EXECUTE -> [(CompileService -> [(CompileActor)]) , (LiteralExecuter) , (DataExecuter -> [(WaitForTableResolve) , (RunTasks) , (Datashard.Transaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendResult)]) , (Datashard.Transaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendResult)])])]) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedUpsert+UseSink [GOOD] Test command err: 2025-06-30T09:22:14.877121Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:14.877228Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:14.877248Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0033bb/r3tmp/tmpH2mmoK/pdisk_1.dat 2025-06-30T09:22:14.991770Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:14.992825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:15.011351Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:15.012646Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275334361572 != 1751275334361576 2025-06-30T09:22:15.055149Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:15.055198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:15.065891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:15.139925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.390995Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:16.952019Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:892:2734], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.952058Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:903:2739], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.952074Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.953257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:16.966827Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-30T09:22:17.113173Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:906:2742], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:22:17.151924Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:968:2784] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:17.209707Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz029x7qbm1t2ajswb06sweb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWRhMjRlMzEtNWUwY2IyMC02NGMxYjg0OS1iZjhhYjU3Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Trace: (Session.query.QUERY_ACTION_EXECUTE -> [(CompileService -> [(CompileActor)]) , (DataExecuter -> [(WaitForTableResolve) , (ComputeActor -> [(ForwardWriteActor)]) , (RunTasks) , (Commit -> [(Datashard.WriteTransaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWriteResult)]) , (Datashard.WriteTransaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWriteResult)])])])]) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedSelectViaReadActors [GOOD] Test command err: 2025-06-30T09:22:14.811643Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:14.811733Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:14.811751Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0033d2/r3tmp/tmp5MFgKL/pdisk_1.dat 2025-06-30T09:22:14.912625Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:14.914049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:14.932076Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:14.933214Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275334398197 != 1751275334398201 2025-06-30T09:22:14.975292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:14.975335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:14.985919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:15.059303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.302941Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:16.859394Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:892:2734], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.859433Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:903:2739], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.859453Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.860531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:16.873318Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-30T09:22:17.023403Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:906:2742], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:22:17.060275Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:968:2784] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:17.118363Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz029x4tft61tjyf6vq86x5g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTE3NTUyZjYtZjEwY2QxMWItNDVjY2I3OTQtZGUwZjk5NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:17.146447Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz029xde4af15j52cgq2gpp5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGNjZGRmZWYtOWI0NDEyYTMtZTFiNDJjNTgtODYzZjczYmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:17.190462Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz029xe77jwg1kf7sb3946ap, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzEzZWUyNGQtNGFlYTgwZTEtZjE5YmVlODItY2E3ZDVlMmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TDataShardTrace::TestTraceDistributedSelect [GOOD] >> VectorIndexBuildTest::TTxInit_Throws [GOOD] >> SystemView::AuthGroups_Access [GOOD] >> SystemView::AuthGroups_ResultOrder >> SystemView::Nodes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedSelect [GOOD] Test command err: 2025-06-30T09:22:15.252705Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:15.252802Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:15.252821Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0033a1/r3tmp/tmpBlKQqT/pdisk_1.dat 2025-06-30T09:22:15.364866Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:15.365909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:15.382961Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:15.384202Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275334789051 != 1751275334789055 2025-06-30T09:22:15.426637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:15.426686Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:15.439330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:15.517303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:15.767514Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:17.337702Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:892:2734], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:17.337744Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:903:2739], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:17.337755Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:17.338630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:17.352019Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-30T09:22:17.497951Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:906:2742], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:22:17.535967Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:968:2784] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:17.597477Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz029xks2erxs7346nmqjr3d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzRhOGFiYzctMzZkMDdmYTctZjc3ODI2NjItZGE4MTBhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:17.629290Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz029xwd7nqcq44jsejtdb72, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzU1MmIzOWEtODM2YWFiYjEtOTdjMjY4YWItZjA2MWUzYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:17.842450Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz029y14c84ndd0g2pjy00q9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Mjc1NjkyYzUtMjBkYmI5NTktYWQ3NjQ2YTEtNTFmZjA3OTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TStorageTenantTest::LsLs >> KikimrIcGateway::TestLoadDataSourceProperties [GOOD] >> TStorageTenantTest::CreateSolomonInsideSubDomain ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::TTxInit_Throws [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:14.915983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:14.916015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:14.916021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:14.916028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:14.916043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:14.916048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:14.916060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:14.916076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:14.916212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:14.916302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:14.929768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:14.929800Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:14.933609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:14.933682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:14.933749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:14.935357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:14.935441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:14.935554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:14.935637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:14.936269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:14.936319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:14.936614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:14.936626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:14.936650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:14.936659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:14.936665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:14.936688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.938096Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:14.958682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:14.958784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.958845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:14.958852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:14.958891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:14.958904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:14.959661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:14.959703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:14.959754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.959764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:14.959770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:14.959774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:14.960236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.960248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:14.960254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:14.960740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.960759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.960767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:14.960786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:14.961553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:14.962095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:14.962132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:14.962316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:14.962349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:14.962366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:14.962437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:14.962446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:14.962473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:14.962485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:14.963190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:14.963205Z node 1 :FLAT_TX_SCHEMESHARD ... LAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:18.135706Z node 3 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186233409552 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-30T09:22:18.135773Z node 3 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186233409552 Initiating switch from PreOffline to Offline state 2025-06-30T09:22:18.136144Z node 3 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186233409553 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-30T09:22:18.136191Z node 3 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186233409553 Initiating switch from PreOffline to Offline state 2025-06-30T09:22:18.137285Z node 3 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186233409552 Reporting state Offline to schemeshard 72057594046678944 2025-06-30T09:22:18.137357Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:1228:3135], Recipient [3:1238:3143]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-30T09:22:18.137423Z node 3 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186233409553 Reporting state Offline to schemeshard 72057594046678944 2025-06-30T09:22:18.137462Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:1645:3540], Recipient [3:1655:3548]: NKikimr::TEvTablet::TEvFollowerGcApplied Leader for TabletID 72057594046678944 is [3:2754:4631] sender: [3:2821:2058] recipient: [3:15:2062] 2025-06-30T09:22:18.137559Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [3:2820:4686], Recipient [3:1238:3143]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046678944 Status: OK ServerId: [3:2823:4688] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-30T09:22:18.137565Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-30T09:22:18.137574Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [3:2822:4687], Recipient [3:1655:3548]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046678944 Status: OK ServerId: [3:2824:4689] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-30T09:22:18.137578Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-30T09:22:18.137621Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5640: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 1238 RawX2: 12884905031 } TabletId: 72075186233409552 State: 4 2025-06-30T09:22:18.137637Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409552, state: Offline, at schemeshard: 72057594046678944 2025-06-30T09:22:18.137706Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5640: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 1655 RawX2: 12884905436 } TabletId: 72075186233409553 State: 4 2025-06-30T09:22:18.137713Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409553, state: Offline, at schemeshard: 72057594046678944 2025-06-30T09:22:18.138285Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:7 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:22:18.138335Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [3:2754:4631], Recipient [3:1238:3143]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-30T09:22:18.138342Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-30T09:22:18.138348Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409552 state Offline 2025-06-30T09:22:18.138364Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:8 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:22:18.138395Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:2820:4686], Recipient [3:1238:3143]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [3:2820:4686] ServerId: [3:2823:4688] } 2025-06-30T09:22:18.138401Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-30T09:22:18.138414Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [3:2754:4631], Recipient [3:1655:3548]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-30T09:22:18.138418Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-30T09:22:18.138423Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409553 state Offline 2025-06-30T09:22:18.138448Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:2822:4687], Recipient [3:1655:3548]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [3:2822:4687] ServerId: [3:2824:4689] } 2025-06-30T09:22:18.138453Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-30T09:22:18.138506Z node 3 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 7 TxId_Deprecated: 7 TabletID: 72075186233409552 2025-06-30T09:22:18.138588Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [3:1228:3135], Recipient [3:1238:3143]: NKikimr::TEvTablet::TEvTabletDead 2025-06-30T09:22:18.138649Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409552 2025-06-30T09:22:18.138682Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409552 2025-06-30T09:22:18.139113Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 7 ShardOwnerId: 72057594046678944 ShardLocalIdx: 7, at schemeshard: 72057594046678944 2025-06-30T09:22:18.139207Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 1 2025-06-30T09:22:18.139291Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:18.139298Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 9], at schemeshard: 72057594046678944 2025-06-30T09:22:18.139312Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 Forgetting tablet 72075186233409552 2025-06-30T09:22:18.139456Z node 3 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 8 TxId_Deprecated: 8 TabletID: 72075186233409553 2025-06-30T09:22:18.139541Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [3:1645:3540], Recipient [3:1655:3548]: NKikimr::TEvTablet::TEvTabletDead 2025-06-30T09:22:18.139594Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409553 2025-06-30T09:22:18.139616Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409553 2025-06-30T09:22:18.140098Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 8 ShardOwnerId: 72057594046678944 ShardLocalIdx: 8, at schemeshard: 72057594046678944 2025-06-30T09:22:18.140145Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 1 Forgetting tablet 72075186233409553 2025-06-30T09:22:18.142190Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-30T09:22:18.142211Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-06-30T09:22:18.142561Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-30T09:22:18.142599Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:18.142607Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 10], at schemeshard: 72057594046678944 2025-06-30T09:22:18.142629Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:22:18.142686Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:8 2025-06-30T09:22:18.142696Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:8 tabletId 72075186233409553 2025-06-30T09:22:18.143118Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:22:18.163784Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-30T09:22:18.163892Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:103: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 Issues { message: "Init IndexBuild unhandled exception ydb/core/tx/schemeshard/schemeshard_info_types.h:3661: Condition violated: `creationConfig.ParseFromString(row.template GetValue())\'" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/vectors" index { name: "by_embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 100 } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 Issues { message: "Init IndexBuild unhandled exception ydb/core/tx/schemeshard/schemeshard_info_types.h:3661: Condition violated: `creationConfig.ParseFromString(row.template GetValue())\'" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/vectors" index { name: "by_embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 100 } >> TStorageTenantTest::GenericCases |82.0%| [TA] $(B)/ydb/core/tx/datashard/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} >> TStorageTenantTest::RemoveStoragePoolAndCreateOneMore ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestLoadDataSourceProperties [GOOD] Test command err: Trying to start YDB, gRPC: 3127, MsgBus: 3009 2025-06-30T09:22:11.594095Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670276425036801:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:11.595333Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003552/r3tmp/tmpBxpzDH/pdisk_1.dat 2025-06-30T09:22:11.704170Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:11.705366Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670276425036614:2080] 1751275331591559 != 1751275331591562 2025-06-30T09:22:11.707778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:11.707807Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:11.711334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3127, node 1 2025-06-30T09:22:11.726883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:11.726900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:11.726903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:11.726966Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3009 TClient is connected to server localhost:3009 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:11.821151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:11.824676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:22:11.863565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:11.940751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:11.987653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:12.007102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:12.127327Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670280720005511:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.127358Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.182892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.192880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.202165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.214428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.232838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.243073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.255761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.272416Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670280720006162:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.272433Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670280720006167:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.272445Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:12.273256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:12.282838Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670280720006169:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:12.334508Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670280720006220:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:12.591653Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:12.597111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:12.703471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 7205759404664448 ... . 2025-06-30T09:22:16.475228Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:16.479230Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:16.499692Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:16.534362Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:16.562257Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:16.580454Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:16.787987Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670297895944474:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.788019Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.799089Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:16.807465Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:16.819643Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:16.834308Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:16.847754Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:16.861836Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:16.877007Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:16.892952Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670297895945127:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.892964Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670297895945132:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.892973Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.893886Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:16.903535Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670297895945134:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:16.958610Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670297895945185:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:17.232927Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:17.234460Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:17.331759Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:22:17.408102Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:17.499150Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:17.589667Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:22:17.672523Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:22:17.760475Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:17.779542Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:22:18.369559Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715711:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) >> TStorageTenantTest::RemoveStoragePoolBeforeDroppingTablet >> TStorageTenantTest::CreateTableInsideSubDomain2 >> TOlapReboots::DropMultipleTables [GOOD] >> TOlapReboots::CreateDropTable [GOOD] >> TOlapReboots::CreateDropStore >> SystemView::AuthUsers [GOOD] >> SystemView::AuthUsers_LockUnlock >> TStorageTenantTest::DeclareAndDefine >> IndexBuildTest::RejectsCancel [GOOD] |82.0%| [TA] $(B)/ydb/core/kqp/provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TStorageTenantTest::LsLs [GOOD] >> TOlapReboots::DropTableThenStore [GOOD] >> SystemView::PartitionStatsOneSchemeShardDataQuery [GOOD] >> SystemView::PgTablesOneSchemeShardDataQuery >> TStorageTenantTest::CreateTableInsideSubDomain ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::LsLs [GOOD] Test command err: 2025-06-30T09:22:18.721312Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670303037169746:2193];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:18.721416Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:18.724632Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670306512452567:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:18.724656Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00498a/r3tmp/tmpUuUYuM/pdisk_1.dat 2025-06-30T09:22:18.796476Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:18.818561Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:18.819023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:18.819050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:18.820778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14586 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:22:18.839117Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521670303037169803:2141] Handle TEvNavigate describe path dc-1 2025-06-30T09:22:18.841421Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521670303037170190:2404] HANDLE EvNavigateScheme dc-1 2025-06-30T09:22:18.841477Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521670303037169828:2155], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:18.841490Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521670303037169828:2155], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:22:18.841549Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521670303037170191:2405][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:22:18.842061Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670303037169486:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670303037170195:2405] 2025-06-30T09:22:18.842086Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670303037169486:2052] Subscribe: subscriber# [1:7521670303037170195:2405], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:18.842103Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670303037169489:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670303037170196:2405] 2025-06-30T09:22:18.842107Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670303037169489:2055] Subscribe: subscriber# [1:7521670303037170196:2405], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:18.842113Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670303037169492:2058] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670303037170197:2405] 2025-06-30T09:22:18.842117Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670303037169492:2058] Subscribe: subscriber# [1:7521670303037170197:2405], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:18.842132Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670303037170195:2405][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670303037169486:2052] 2025-06-30T09:22:18.842138Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670303037170196:2405][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670303037169489:2055] 2025-06-30T09:22:18.842143Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670303037170197:2405][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670303037169492:2058] 2025-06-30T09:22:18.842151Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670303037170191:2405][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670303037170192:2405] 2025-06-30T09:22:18.842159Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670303037170191:2405][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670303037170193:2405] 2025-06-30T09:22:18.842183Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521670303037170191:2405][/dc-1] Set up state: owner# [1:7521670303037169828:2155], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:18.842238Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670303037170191:2405][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670303037170194:2405] 2025-06-30T09:22:18.842262Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521670303037170191:2405][/dc-1] Path was already updated: owner# [1:7521670303037169828:2155], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:18.842273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670303037170195:2405][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670303037170192:2405], cookie# 1 2025-06-30T09:22:18.842276Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670303037170196:2405][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670303037170193:2405], cookie# 1 2025-06-30T09:22:18.842280Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670303037170197:2405][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670303037170194:2405], cookie# 1 2025-06-30T09:22:18.842286Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670303037169486:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670303037170195:2405] 2025-06-30T09:22:18.842291Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670303037169486:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670303037170195:2405], cookie# 1 2025-06-30T09:22:18.842296Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670303037169489:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670303037170196:2405] 2025-06-30T09:22:18.842299Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670303037169489:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670303037170196:2405], cookie# 1 2025-06-30T09:22:18.842303Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670303037169492:2058] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670303037170197:2405] 2025-06-30T09:22:18.842306Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670303037169492:2058] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670303037170197:2405], cookie# 1 2025-06-30T09:22:18.843414Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670303037170195:2405][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670303037169486:2052], cookie# 1 2025-06-30T09:22:18.843429Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670303037170196:2405][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670303037169489:2055], cookie# 1 2025-06-30T09:22:18.843434Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670303037170197:2405][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670303037169492:2058], cookie# 1 2025-06-30T09:22:18.843442Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670303037170191:2405][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670303037170192:2405], cookie# 1 2025-06-30T09:22:18.843452Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670303037170191:2405][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:22:18.843457Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670303037170191:2405][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670303037170193:2405], cookie# 1 2025-06-30T09:22:18.843459Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670303037170191:2405][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:22:18.843463Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670303037170191:2405][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670303037170194:2405], cookie# 1 2025-06-30T09:22:18.843469Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521670303037170191:2405][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:22:18.852268Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521670303037169828:2155], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ... -30T09:22:19.356173Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [2:7521670306512452746:2108], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1 PathId: Strong: 0 }, by path# { Subscriber: { Subscriber: [2:7521670310807420066:2112] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:22:19.356200Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7521670306512452746:2108], cacheItem# { Subscriber: { Subscriber: [2:7521670310807420066:2112] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:19.356265Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7521670310807420073:2113], recipient# [2:7521670310807420064:2111], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.356278Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7521670306512452746:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.356290Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7521670310807420075:2114], recipient# [2:7521670310807420052:2257], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.358378Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:19.358535Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7521670306512452746:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.358576Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7521670310807420076:2115], recipient# [2:7521670310807420063:2266], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.358836Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7521670310807420063:2266], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:19.425290Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7521670306512452746:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.425357Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7521670310807420077:2116], recipient# [2:7521670310807420063:2266], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.429012Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7521670310807420063:2266], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:19.611637Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7521670306512452746:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.611707Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7521670310807420079:2117], recipient# [2:7521670310807420063:2266], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.612882Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7521670310807420063:2266], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:19.726608Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7521670306512452746:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.726688Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7521670310807420082:2118], recipient# [2:7521670310807420081:2267], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.726849Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:19.730883Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:19.815232Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7521670306512452746:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.815291Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7521670310807420083:2119], recipient# [2:7521670310807420063:2266], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.815475Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7521670310807420063:2266], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } >> SystemView::AuthGroups_ResultOrder [GOOD] >> SystemView::AuthGroups_TableRange ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::RejectsCancel [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:14.958973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:14.958999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:14.959005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:14.959012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:14.959027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:14.959030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:14.959038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:14.959049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:14.959144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:14.959202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:14.973425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:14.973449Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:14.977458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:14.977533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:14.977585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:14.979609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:14.979700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:14.979793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:14.979851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:14.980525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:14.980592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:14.980872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:14.980880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:14.980901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:14.980907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:14.980911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:14.980924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.982019Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:14.997666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:14.997790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.997887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:14.997896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:14.997951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:14.997970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:14.998973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:14.999032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:14.999101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.999114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:14.999120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:14.999127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:14.999860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.999877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:14.999886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:15.000322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:15.000331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:15.000339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:15.000361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:15.000948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:15.001411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:15.001450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:15.001652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:15.001687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:15.001704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:15.001820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:15.001830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:15.001871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:15.001886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:15.002389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:15.002399Z node 1 :FLAT_TX_SCHEMESHARD ... ings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { seconds: 30 } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 Issues { message: "TShardStatus { ShardIdx: 72057594046678944:2 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:3 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:4 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:5 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:6 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:7 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:8 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:9 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:10 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { seconds: 30 } } 2025-06-30T09:22:19.739542Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:19.739620Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 81us result status StatusSuccess 2025-06-30T09:22:19.739753Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "index1" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:19.740035Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:22:19.740089Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/index1" took 56us result status StatusSuccess 2025-06-30T09:22:19.740278Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/index1" PathDescription { Self { Name: "index1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "index1" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TStorageTenantTest::RemoveStoragePoolBeforeDroppingTablet [GOOD] >> TStorageTenantTest::Boot >> TStorageTenantTest::CreateSolomonInsideSubDomain [GOOD] >> TStorageTenantTest::GenericCases [GOOD] >> SystemView::Nodes [GOOD] >> SystemView::PartitionStatsFields >> ShowCreateView::WithTablePathPrefix [GOOD] >> ShowCreateView::WithSingleQuotedTablePathPrefix >> TStorageTenantTest::DeclareAndDefine [GOOD] >> TStorageTenantTest::Boot [GOOD] >> TStorageTenantTest::CopyTableAndConcurrentSplit >> SystemView::ShowCreateTablePartitionAtKeys [GOOD] >> SystemView::ShowCreateTablePartitionByHash >> YdbIndexTable::OnlineBuild [GOOD] >> YdbIndexTable::OnlineBuildWithDataColumn ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::RemoveStoragePoolBeforeDroppingTablet [GOOD] Test command err: 2025-06-30T09:22:19.441007Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670310436360071:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:19.442801Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004948/r3tmp/tmpjM1AQS/pdisk_1.dat 2025-06-30T09:22:19.534928Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:26359 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:22:19.578689Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521670310436360237:2140] Handle TEvNavigate describe path dc-1 2025-06-30T09:22:19.580536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:19.580581Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:19.580937Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:7521670310436360237:2140] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:22:19.580949Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521670310436360678:2435] HANDLE EvNavigateScheme dc-1 2025-06-30T09:22:19.581046Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521670310436360267:2156], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.581064Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521670310436360267:2156], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:22:19.581154Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:22:19.581168Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521670310436360679:2436][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:22:19.581682Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670310436359932:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670310436360683:2436] 2025-06-30T09:22:19.581705Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670310436359932:2051] Subscribe: subscriber# [1:7521670310436360683:2436], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.581751Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670310436359938:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670310436360685:2436] 2025-06-30T09:22:19.581755Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670310436359938:2057] Subscribe: subscriber# [1:7521670310436360685:2436], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.581786Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670310436360683:2436][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670310436359932:2051] 2025-06-30T09:22:19.581791Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670310436360685:2436][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670310436359938:2057] 2025-06-30T09:22:19.581798Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670310436360679:2436][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670310436360680:2436] 2025-06-30T09:22:19.581807Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670310436360679:2436][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670310436360682:2436] 2025-06-30T09:22:19.581820Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521670310436360679:2436][/dc-1] Set up state: owner# [1:7521670310436360267:2156], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.581859Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670310436360683:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670310436360680:2436], cookie# 1 2025-06-30T09:22:19.581864Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670310436360684:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670310436360681:2436], cookie# 1 2025-06-30T09:22:19.581867Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670310436360685:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670310436360682:2436], cookie# 1 2025-06-30T09:22:19.581887Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670310436359932:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670310436360683:2436] 2025-06-30T09:22:19.581892Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670310436359932:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670310436360683:2436], cookie# 1 2025-06-30T09:22:19.581897Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670310436359938:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670310436360685:2436] 2025-06-30T09:22:19.581900Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670310436359938:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670310436360685:2436], cookie# 1 2025-06-30T09:22:19.581903Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670310436359935:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670310436360684:2436] 2025-06-30T09:22:19.581930Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670310436359935:2054] Subscribe: subscriber# [1:7521670310436360684:2436], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.581953Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670310436359935:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670310436360684:2436], cookie# 1 2025-06-30T09:22:19.582020Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670310436360683:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670310436359932:2051], cookie# 1 2025-06-30T09:22:19.582050Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670310436360685:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670310436359938:2057], cookie# 1 2025-06-30T09:22:19.582059Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670310436360684:2436][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670310436359935:2054] 2025-06-30T09:22:19.582096Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670310436360684:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670310436359935:2054], cookie# 1 2025-06-30T09:22:19.582124Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670310436360679:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670310436360680:2436], cookie# 1 2025-06-30T09:22:19.582134Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670310436360679:2436][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:22:19.582138Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670310436360679:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670310436360682:2436], cookie# 1 2025-06-30T09:22:19.582141Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670310436360679:2436][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:22:19.582148Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670310436360679:2436][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670310436360681:2436] 2025-06-30T09:22:19.582184Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521670310436360679:2436][/dc-1] Path was already updated: owner# [1:7521670310436360267:2156], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.582195Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670310436360679:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670310436360681:2436], cookie# 1 2025-06-30T09:22:19.582202Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521670310436360679:2436][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:22:19.582213Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670310436359935:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670310436360684:2436] 2025-06-30T09:22:19.583963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:19.594313Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521670310436360267:2156], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 7205759 ... : 281474976715660 2025-06-30T09:22:19.791908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715660 2025-06-30T09:22:19.795125Z node 1 :HIVE WARN: hive_impl.cpp:1959: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) 2025-06-30T09:22:19.803257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-30T09:22:19.804355Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037889 2025-06-30T09:22:19.804381Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037891 2025-06-30T09:22:19.804386Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037888 2025-06-30T09:22:19.804391Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037890 2025-06-30T09:22:19.806931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-06-30T09:22:19.807070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-30T09:22:19.807118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-06-30T09:22:19.807137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-30T09:22:19.807156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-30T09:22:19.807172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-30T09:22:19.807190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-30T09:22:19.807210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-30T09:22:19.807214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-30T09:22:19.807247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-30T09:22:19.807271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-30T09:22:19.807275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-30T09:22:19.807287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-30T09:22:19.811688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-30T09:22:19.811697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-30T09:22:19.811711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-30T09:22:19.811713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-30T09:22:19.811719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-30T09:22:19.811722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-30T09:22:19.811726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-30T09:22:19.811731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-30T09:22:19.811740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-30T09:22:19.811747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-30T09:22:19.780984Z node 3 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976725656 RangeEnd# 281474976730656 txAllocator# 72057594046447617 TabletID: 72075186224037888 Status: OK Info { TabletID: 72075186224037888 Channels { Channel: 0 ChannelType: 0 History { FromGeneration: 0 GroupID: 2181038081 } StoragePool: "name_USER_0_kind_storage-pool-number-2" } Channels { Channel: 1 ChannelType: 0 History { FromGeneration: 0 GroupID: 2181038081 } StoragePool: "name_USER_0_kind_storage-pool-number-2" } Channels { Channel: 2 ChannelType: 0 History { FromGeneration: 0 GroupID: 2181038081 } StoragePool: "name_USER_0_kind_storage-pool-number-2" } TabletType: Coordinator Version: 1 TenantIdOwner: 72057594046644480 TenantIdLocalId: 2 } 2025-06-30T09:22:19.790953Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7521670310672024968:2228][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:7521670310436359932:2051] 2025-06-30T09:22:19.790965Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7521670310672024969:2228][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:7521670310436359935:2054] 2025-06-30T09:22:19.866086Z node 1 :HIVE WARN: hive_impl.cpp:1959: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) 2025-06-30T09:22:19.790973Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7521670310672024970:2228][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:7521670310436359938:2057] 2025-06-30T09:22:19.790986Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:7521670310672024961:2228][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Version: 18446744073709551615 }: sender# [3:7521670310672024965:2228] 2025-06-30T09:22:19.791009Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][3:7521670310672024961:2228][/dc-1/USER_0] Path was updated to new version: owner# [3:7521670310672024568:2106], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 4) DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.791017Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:7521670310672024961:2228][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Version: 18446744073709551615 }: sender# [3:7521670310672024966:2228] 2025-06-30T09:22:19.791026Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][3:7521670310672024961:2228][/dc-1/USER_0] Path was already updated: owner# [3:7521670310672024568:2106], state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.791030Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:7521670310672024961:2228][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Version: 18446744073709551615 }: sender# [3:7521670310672024967:2228] 2025-06-30T09:22:19.791037Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][3:7521670310672024961:2228][/dc-1/USER_0] Path was already updated: owner# [3:7521670310672024568:2106], state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.791069Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7521670310672024568:2106], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Strong: 1 } 2025-06-30T09:22:19.791093Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7521670310672024568:2106], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7521670310672024961:2228] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 1751275339695 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# { Subscriber: { Subscriber: [3:7521670310672024961:2228] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 1751275339695 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateSolomonInsideSubDomain [GOOD] Test command err: 2025-06-30T09:22:18.977470Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670304366816904:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:18.977497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00495c/r3tmp/tmpyl0w2c/pdisk_1.dat 2025-06-30T09:22:19.057507Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:19.081825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:19.081861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:19.092039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:19.108941Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TClient is connected to server localhost:25678 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:22:19.128007Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521670304366817092:2142] Handle TEvNavigate describe path dc-1 2025-06-30T09:22:19.130819Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521670308661784784:2414] HANDLE EvNavigateScheme dc-1 2025-06-30T09:22:19.130901Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521670308661784411:2155], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.130913Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521670308661784411:2155], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:22:19.131029Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521670308661784808:2429][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:22:19.131476Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670304366816771:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670308661784813:2429] 2025-06-30T09:22:19.131498Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670304366816771:2051] Subscribe: subscriber# [1:7521670308661784813:2429], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.131517Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670304366816774:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670308661784814:2429] 2025-06-30T09:22:19.131520Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670304366816774:2054] Subscribe: subscriber# [1:7521670308661784814:2429], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.131526Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670304366816777:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670308661784815:2429] 2025-06-30T09:22:19.131530Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670304366816777:2057] Subscribe: subscriber# [1:7521670308661784815:2429], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.131548Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670308661784813:2429][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670304366816771:2051] 2025-06-30T09:22:19.131553Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670308661784814:2429][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670304366816774:2054] 2025-06-30T09:22:19.131557Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670308661784815:2429][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670304366816777:2057] 2025-06-30T09:22:19.131564Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670308661784808:2429][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670308661784810:2429] 2025-06-30T09:22:19.131572Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670308661784808:2429][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670308661784811:2429] 2025-06-30T09:22:19.131584Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521670308661784808:2429][/dc-1] Set up state: owner# [1:7521670308661784411:2155], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.131623Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670308661784808:2429][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670308661784812:2429] 2025-06-30T09:22:19.131647Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521670308661784808:2429][/dc-1] Path was already updated: owner# [1:7521670308661784411:2155], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.131655Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670308661784813:2429][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670308661784810:2429], cookie# 1 2025-06-30T09:22:19.131659Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670308661784814:2429][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670308661784811:2429], cookie# 1 2025-06-30T09:22:19.131662Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670308661784815:2429][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670308661784812:2429], cookie# 1 2025-06-30T09:22:19.131677Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670304366816771:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670308661784813:2429] 2025-06-30T09:22:19.131681Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670304366816771:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670308661784813:2429], cookie# 1 2025-06-30T09:22:19.131686Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670304366816774:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670308661784814:2429] 2025-06-30T09:22:19.131688Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670304366816774:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670308661784814:2429], cookie# 1 2025-06-30T09:22:19.131692Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670304366816777:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670308661784815:2429] 2025-06-30T09:22:19.131695Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670304366816777:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670308661784815:2429], cookie# 1 2025-06-30T09:22:19.133144Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670308661784813:2429][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670304366816771:2051], cookie# 1 2025-06-30T09:22:19.133156Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670308661784814:2429][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670304366816774:2054], cookie# 1 2025-06-30T09:22:19.133159Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670308661784815:2429][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670304366816777:2057], cookie# 1 2025-06-30T09:22:19.133166Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670308661784808:2429][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670308661784810:2429], cookie# 1 2025-06-30T09:22:19.133174Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670308661784808:2429][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:22:19.133177Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670308661784808:2429][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670308661784811:2429], cookie# 1 2025-06-30T09:22:19.133180Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670308661784808:2429][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:22:19.133184Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670308661784808:2429][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670308661784812:2429], cookie# 1 2025-06-30T09:22:19.133198Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521670308661784808:2429][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:22:19.146001Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521670308661784411:2155], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: ... Subscriber: [3:7521670309041991778:2113] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 1751275339226 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 } waiting... 2025-06-30T09:22:20.178031Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670304366816771:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 18446744073709551615 }: sender# [3:7521670309041991791:2113] 2025-06-30T09:22:20.178049Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670304366816774:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 18446744073709551615 }: sender# [3:7521670309041991792:2113] 2025-06-30T09:22:20.178057Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670304366816777:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 18446744073709551615 }: sender# [3:7521670309041991793:2113] 2025-06-30T09:22:20.178704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:22:20.184759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-30T09:22:20.184775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:7 hive 72057594037968897 at ss 72057594046644480 2025-06-30T09:22:20.184779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-30T09:22:20.184782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-30T09:22:20.184785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:6 hive 72057594037968897 at ss 72057594046644480 2025-06-30T09:22:20.184787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-30T09:22:20.184789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:8 hive 72057594037968897 at ss 72057594046644480 2025-06-30T09:22:20.184791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:5 hive 72057594037968897 at ss 72057594046644480 2025-06-30T09:22:20.184832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715661 2025-06-30T09:22:20.184842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715661 2025-06-30T09:22:20.184851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715661 2025-06-30T09:22:20.186971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-30T09:22:20.187080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 6 2025-06-30T09:22:20.187151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 7 ShardOwnerId: 72057594046644480 ShardLocalIdx: 7, at schemeshard: 72057594046644480 2025-06-30T09:22:20.187177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-30T09:22:20.187195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-30T09:22:20.187212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-06-30T09:22:20.187229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-30T09:22:20.187246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-06-30T09:22:20.187262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046644480 ShardLocalIdx: 6, at schemeshard: 72057594046644480 2025-06-30T09:22:20.187279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-30T09:22:20.187296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-30T09:22:20.187312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-30T09:22:20.187329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 8 ShardOwnerId: 72057594046644480 ShardLocalIdx: 8, at schemeshard: 72057594046644480 2025-06-30T09:22:20.187346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-30T09:22:20.187367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-30T09:22:20.187386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-30T09:22:20.187403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-30T09:22:20.187407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-30T09:22:20.187421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-30T09:22:20.187438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-30T09:22:20.187441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-30T09:22:20.187472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-30T09:22:20.191465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-30T09:22:20.191482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-30T09:22:20.191504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:7 2025-06-30T09:22:20.191507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:7 tabletId 72075186224037894 2025-06-30T09:22:20.191513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-30T09:22:20.191514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-30T09:22:20.191520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-30T09:22:20.191522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-30T09:22:20.191528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:6 2025-06-30T09:22:20.191530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:6 tabletId 72075186224037893 2025-06-30T09:22:20.191536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-30T09:22:20.191538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-30T09:22:20.191542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:8 2025-06-30T09:22:20.191545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:8 tabletId 72075186224037895 2025-06-30T09:22:20.191550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-30T09:22:20.191554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-30T09:22:20.191564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046644480 2025-06-30T09:22:20.191577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-30T09:22:20.191585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-30T09:22:20.191590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-30T09:22:20.191616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-30T09:22:20.193017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::DropMultipleTables [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:31.875737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:31.875767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:31.875773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:31.875778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:31.875790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:31.875795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:31.875805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:31.875824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:31.875957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:31.876054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:31.892113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:31.892143Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:31.892276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:31.895961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:31.897124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:31.897187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:31.899017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:31.899081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:31.899240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:31.899313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:31.900174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:31.900233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:31.900516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:31.900528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:31.900537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:31.900546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:31.900552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:31.900582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:31.902160Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:31.926575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:31.926683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:31.926828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:31.926838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:31.926891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:31.926909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:31.927868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:31.927918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:31.927993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:31.928019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:31.928025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:31.928031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:31.928721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:31.928737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:31.928744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:31.929154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:31.929166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:31.929174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:31.929182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:31.930005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:31.930469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:31.930520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:31.930787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:31.930822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... 78944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.299584Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.299590Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:22:19.299596Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-30T09:22:19.299604Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-30T09:22:19.299755Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.299767Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.299783Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:22:19.299788Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-30T09:22:19.299792Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:19.303269Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 11 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.303315Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 11 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.303323Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:22:19.303330Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 11 2025-06-30T09:22:19.303338Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:22:19.303382Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 0/1, is published: true 2025-06-30T09:22:19.303800Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1005:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-30T09:22:19.304429Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.304490Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.304977Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.316657Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6237: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1005 2025-06-30T09:22:19.316685Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1005, tablet: 72075186233409546, partId: 0 2025-06-30T09:22:19.316714Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1005:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1005 2025-06-30T09:22:19.316734Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1005:0 129 -> 130 FAKE_COORDINATOR: Erasing txId 1005 2025-06-30T09:22:19.317560Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-30T09:22:19.317654Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-30T09:22:19.317666Z node 83 :FLAT_TX_SCHEMESHARD INFO: drop_table.cpp:315: TDropColumnTable TProposedDeleteParts operationId# 1005:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:19.317701Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:22:19.317734Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:22:19.317741Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:22:19.317748Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:22:19.317752Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:22:19.317759Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: true 2025-06-30T09:22:19.317781Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [83:368:2344] message: TxId: 1005 2025-06-30T09:22:19.317789Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:22:19.317795Z node 83 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1005:0 2025-06-30T09:22:19.317800Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1005:0 2025-06-30T09:22:19.317838Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-30T09:22:19.317954Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:19.317963Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:22:19.317980Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:22:19.320384Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-30T09:22:19.320407Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [83:527:2495] 2025-06-30T09:22:19.320611Z node 83 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1005 2025-06-30T09:22:19.320766Z node 83 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore/ColumnTable1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:19.320836Z node 83 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore/ColumnTable1" took 83us result status StatusPathDoesNotExist 2025-06-30T09:22:19.320890Z node 83 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/OlapStore/ColumnTable1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/OlapStore\' (id: [OwnerId: 72057594046678944, LocalPathId: 3]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/OlapStore/ColumnTable1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/OlapStore" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "OlapStore" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnStore CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:22:19.321021Z node 83 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore/ColumnTable2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:19.321044Z node 83 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore/ColumnTable2" took 26us result status StatusPathDoesNotExist 2025-06-30T09:22:19.321065Z node 83 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/OlapStore/ColumnTable2\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/OlapStore\' (id: [OwnerId: 72057594046678944, LocalPathId: 3]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/OlapStore/ColumnTable2" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/OlapStore" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "OlapStore" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnStore CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::GenericCases [GOOD] Test command err: 2025-06-30T09:22:19.224306Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670311174032139:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:19.224343Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004953/r3tmp/tmp5iQPNY/pdisk_1.dat 2025-06-30T09:22:19.305577Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:19.329186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:19.329221Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:19.333762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4955 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:22:19.346033Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521670311174032335:2142] Handle TEvNavigate describe path dc-1 2025-06-30T09:22:19.348036Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521670311174032758:2432] HANDLE EvNavigateScheme dc-1 2025-06-30T09:22:19.348093Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521670311174032360:2156], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.348105Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521670311174032360:2156], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:22:19.348162Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521670311174032759:2433][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:22:19.348615Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311174032011:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670311174032763:2433] 2025-06-30T09:22:19.348644Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311174032011:2050] Subscribe: subscriber# [1:7521670311174032763:2433], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.348662Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311174032014:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670311174032764:2433] 2025-06-30T09:22:19.348666Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311174032014:2053] Subscribe: subscriber# [1:7521670311174032764:2433], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.348671Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311174032017:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670311174032765:2433] 2025-06-30T09:22:19.348675Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311174032017:2056] Subscribe: subscriber# [1:7521670311174032765:2433], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.348689Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670311174032763:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311174032011:2050] 2025-06-30T09:22:19.348694Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670311174032764:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311174032014:2053] 2025-06-30T09:22:19.348698Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670311174032765:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311174032017:2056] 2025-06-30T09:22:19.348705Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670311174032759:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311174032760:2433] 2025-06-30T09:22:19.348714Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670311174032759:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311174032761:2433] 2025-06-30T09:22:19.348728Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521670311174032759:2433][/dc-1] Set up state: owner# [1:7521670311174032360:2156], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.348781Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670311174032759:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311174032762:2433] 2025-06-30T09:22:19.348802Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521670311174032759:2433][/dc-1] Path was already updated: owner# [1:7521670311174032360:2156], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.348811Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670311174032763:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670311174032760:2433], cookie# 1 2025-06-30T09:22:19.348814Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670311174032764:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670311174032761:2433], cookie# 1 2025-06-30T09:22:19.348818Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670311174032765:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670311174032762:2433], cookie# 1 2025-06-30T09:22:19.348825Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311174032011:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670311174032763:2433] 2025-06-30T09:22:19.348829Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670311174032011:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670311174032763:2433], cookie# 1 2025-06-30T09:22:19.348834Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311174032014:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670311174032764:2433] 2025-06-30T09:22:19.348837Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670311174032014:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670311174032764:2433], cookie# 1 2025-06-30T09:22:19.348839Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311174032017:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670311174032765:2433] 2025-06-30T09:22:19.348843Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670311174032017:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670311174032765:2433], cookie# 1 2025-06-30T09:22:19.350853Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670311174032763:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311174032011:2050], cookie# 1 2025-06-30T09:22:19.350862Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670311174032764:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311174032014:2053], cookie# 1 2025-06-30T09:22:19.350865Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670311174032765:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311174032017:2056], cookie# 1 2025-06-30T09:22:19.350872Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670311174032759:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311174032760:2433], cookie# 1 2025-06-30T09:22:19.350880Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670311174032759:2433][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:22:19.350884Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670311174032759:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311174032761:2433], cookie# 1 2025-06-30T09:22:19.350887Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670311174032759:2433][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:22:19.350892Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670311174032759:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311174032762:2433], cookie# 1 2025-06-30T09:22:19.350904Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521670311174032759:2433][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:22:19.359727Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521670311174032360:2156], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... 360:2156] TSchemeCache with 2 scheme entries. DataReq marker# P2 2025-06-30T09:22:20.527265Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2823: Handle TEvTxProxySchemeCache::TEvResolveKeySet: self# [1:7521670311174032360:2156], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 8] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo Point: (Uint64 : 42) },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 7] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo Point: (Uint64 : 42) }] } 2025-06-30T09:22:20.527281Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2062: FillEntry for TResolve: self# [1:7521670311174032360:2156], cacheItem# { Subscriber: { Subscriber: [1:7521670315469000919:3098] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1751275340550 PathId: [OwnerId: 72057594046644480, LocalPathId: 8] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 8] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:20.527298Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2062: FillEntry for TResolve: self# [1:7521670311174032360:2156], cacheItem# { Subscriber: { Subscriber: [1:7521670315469000799:2989] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1751275340500 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 7] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:20.527348Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7521670315469000963:3113], recipient# [1:7521670315469000961:3111], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 8] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Point: (Uint64 : 42) },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 7] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Point: (Uint64 : 42) }] } 2025-06-30T09:22:20.527366Z node 1 :TX_PROXY TRACE: datareq.cpp:499: StateWaitResolve, received event# 269746178, Sender [1:7521670315469000963:3113], Recipient [1:7521670315469000961:3111]: NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult 2025-06-30T09:22:20.527373Z node 1 :TX_PROXY TRACE: datareq.cpp:503: StateWaitResolve, processing event TEvTxProxySchemeCache::TEvResolveKeySetResult 2025-06-30T09:22:20.527377Z node 1 :TX_PROXY DEBUG: datareq.cpp:1620: Actor# [1:7521670315469000961:3111] txid# 281474976715668 HANDLE EvResolveKeySetResult TDataReq marker# P3 ErrorCount# 0 2025-06-30T09:22:20.527513Z node 1 :TX_PROXY DEBUG: datareq.cpp:1115: Actor# [1:7521670315469000961:3111] txid# 281474976715668 SEND TEvProposeTransaction to datashard 72075186224037892 with 327 bytes program affected shards 2 followers disallowed marker# P4 2025-06-30T09:22:20.527562Z node 1 :TX_PROXY DEBUG: datareq.cpp:1115: Actor# [1:7521670315469000961:3111] txid# 281474976715668 SEND TEvProposeTransaction to datashard 72075186224037894 with 327 bytes program affected shards 2 followers disallowed marker# P4 2025-06-30T09:22:20.529952Z node 1 :TX_PROXY TRACE: datareq.cpp:531: StateWaitPrepare, received event# 269550080, Sender [2:7521670313884491208:2284], Recipient [1:7521670315469000961:3111] 2025-06-30T09:22:20.529964Z node 1 :TX_PROXY TRACE: datareq.cpp:535: StateWaitPrepare, processing event TEvDataShard::TEvProposeTransactionResult 2025-06-30T09:22:20.529990Z node 1 :TX_PROXY DEBUG: datareq.cpp:1873: Actor# [1:7521670315469000961:3111] txid# 281474976715668 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# PREPARED shard id 72075186224037892 read size 0 out readset size 0 marker# P6 2025-06-30T09:22:20.529998Z node 1 :TX_PROXY TRACE: datareq.cpp:531: StateWaitPrepare, received event# 269550080, Sender [2:7521670313884491384:2297], Recipient [1:7521670315469000961:3111] 2025-06-30T09:22:20.530000Z node 1 :TX_PROXY TRACE: datareq.cpp:535: StateWaitPrepare, processing event TEvDataShard::TEvProposeTransactionResult 2025-06-30T09:22:20.530005Z node 1 :TX_PROXY DEBUG: datareq.cpp:1873: Actor# [1:7521670315469000961:3111] txid# 281474976715668 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# PREPARED shard id 72075186224037894 read size 0 out readset size 0 marker# P6 2025-06-30T09:22:20.530025Z node 1 :TX_PROXY DEBUG: datareq.cpp:2921: Actor# [1:7521670315469000961:3111] txid# 281474976715668 SEND EvProposeTransaction to# 72075186224037888 Coordinator marker# P7 2025-06-30T09:22:20.531049Z node 1 :TX_PROXY TRACE: datareq.cpp:563: StateWaitPlan, received event# 269091328, Sender [2:7521670309589523703:2256], Recipient [1:7521670315469000961:3111] 2025-06-30T09:22:20.531061Z node 1 :TX_PROXY TRACE: datareq.cpp:567: StateWaitPlan, processing event TEvTxProxy::TEvProposeTransactionStatus 2025-06-30T09:22:20.531069Z node 1 :TX_PROXY DEBUG: datareq.cpp:2111: Actor# [1:7521670315469000961:3111] txid# 281474976715668 HANDLE TEvProposeTransactionStatus TDataReq marker# P11 Status# 16 2025-06-30T09:22:20.551810Z node 1 :TX_PROXY TRACE: datareq.cpp:563: StateWaitPlan, received event# 269091328, Sender [2:7521670309589523703:2256], Recipient [1:7521670315469000961:3111] 2025-06-30T09:22:20.551824Z node 1 :TX_PROXY TRACE: datareq.cpp:567: StateWaitPlan, processing event TEvTxProxy::TEvProposeTransactionStatus 2025-06-30T09:22:20.551835Z node 1 :TX_PROXY DEBUG: datareq.cpp:2135: Actor# [1:7521670315469000961:3111] txid# 281474976715668 HANDLE TEvProposeTransactionStatus TDataReq marker# P10 Status# 17 2025-06-30T09:22:20.554362Z node 1 :TX_PROXY TRACE: datareq.cpp:563: StateWaitPlan, received event# 269550080, Sender [2:7521670313884491384:2297], Recipient [1:7521670315469000961:3111] 2025-06-30T09:22:20.554376Z node 1 :TX_PROXY TRACE: datareq.cpp:568: StateWaitPlan, processing event TEvDataShard::TEvProposeTransactionResult 2025-06-30T09:22:20.554399Z node 1 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [1:7521670315469000961:3111] txid# 281474976715668 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037894 marker# P12 2025-06-30T09:22:20.554423Z node 1 :TX_PROXY TRACE: datareq.cpp:563: StateWaitPlan, received event# 269550080, Sender [2:7521670313884491208:2284], Recipient [1:7521670315469000961:3111] 2025-06-30T09:22:20.554429Z node 1 :TX_PROXY TRACE: datareq.cpp:568: StateWaitPlan, processing event TEvDataShard::TEvProposeTransactionResult 2025-06-30T09:22:20.554435Z node 1 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [1:7521670315469000961:3111] txid# 281474976715668 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037892 marker# P12 2025-06-30T09:22:20.554595Z node 1 :TX_PROXY DEBUG: datareq.cpp:2691: Actor# [1:7521670315469000961:3111] txid# 281474976715668 MergeResult ExecComplete TDataReq marker# P17 2025-06-30T09:22:20.554636Z node 1 :TX_PROXY INFO: datareq.cpp:834: Actor# [1:7521670315469000961:3111] txid# 281474976715668 RESPONSE Status# ExecComplete prepare time: 0.002877s execute time: 0.024621s total time: 0.027498s marker# P13 2025-06-30T09:22:20.561131Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 2 2025-06-30T09:22:20.561250Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7521670311174032011:2050] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [2:7521670309589523664:2114] 2025-06-30T09:22:20.561268Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7521670311174032011:2050] Unsubscribe: subscriber# [2:7521670309589523664:2114], path# /dc-1/USER_0 2025-06-30T09:22:20.561276Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7521670311174032014:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [2:7521670309589523665:2114] 2025-06-30T09:22:20.561279Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7521670311174032014:2053] Unsubscribe: subscriber# [2:7521670309589523665:2114], path# /dc-1/USER_0 2025-06-30T09:22:20.561297Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7521670311174032017:2056] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [2:7521670309589523666:2114] 2025-06-30T09:22:20.561301Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7521670311174032017:2056] Unsubscribe: subscriber# [2:7521670309589523666:2114], path# /dc-1/USER_0 2025-06-30T09:22:20.561464Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:20.641277Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521670311174032360:2156], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:20.641322Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7521670311174032360:2156], cacheItem# { Subscriber: { Subscriber: [1:7521670311174033071:2659] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:20.641349Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7521670315469000974:3123], recipient# [1:7521670315469000973:2299], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> VectorIndexBuildTest::PrefixedDuplicates [GOOD] >> VectorIndexBuildTest::Metering_ServerLessDB_Restarts-doRestarts-false ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::DropTableThenStore [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:32.237525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:32.237561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:32.237568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:32.237573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:32.237586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:32.237592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:32.237603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:32.237622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:32.237753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:32.237919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:32.256115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:32.256150Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:32.256300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:32.261391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:32.262621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:32.262676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:32.269069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:32.269163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:32.269269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:32.269324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:32.273888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:32.273979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:32.274344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:32.274360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:32.274370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:32.274382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:32.274389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:32.274422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:32.276326Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:32.302829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:32.302931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.303008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:32.303017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:32.303065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:32.303088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:32.305180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:32.305261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:32.305350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.305378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:32.305384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:32.305392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:32.306306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.306323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:32.306330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:32.306823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.306836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.306844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:32.306853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:32.307725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:32.308271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:32.308320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:32.308579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:32.308623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... 94046678944, cookie: 1005 2025-06-30T09:22:19.873358Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:22:19.873364Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-30T09:22:19.873371Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:22:19.873463Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.873475Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.873480Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:22:19.873485Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-30T09:22:19.873502Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:19.873514Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 0/1, is published: true 2025-06-30T09:22:19.874134Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1005:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-30T09:22:19.874166Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1005, partId: 0, tablet: 72075186233409546 2025-06-30T09:22:19.874439Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6237: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1005 2025-06-30T09:22:19.874447Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1005, tablet: 72075186233409546, partId: 0 2025-06-30T09:22:19.874462Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1005:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1005 2025-06-30T09:22:19.874478Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1005:0 129 -> 130 2025-06-30T09:22:19.874780Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.874924Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:22:19.875020Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-30T09:22:19.875044Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-30T09:22:19.875051Z node 87 :FLAT_TX_SCHEMESHARD INFO: drop_store.cpp:235: TDropOlapStore TProposedDeleteParts operationId# 1005:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:19.875069Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:22:19.875105Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:22:19.875111Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:22:19.875117Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:22:19.875120Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:22:19.875126Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: true 2025-06-30T09:22:19.875131Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:22:19.875136Z node 87 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1005:0 2025-06-30T09:22:19.875142Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1005:0 2025-06-30T09:22:19.875171Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:22:19.875569Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:22:19.875669Z node 87 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-06-30T09:22:19.875766Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:19.875966Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:22:19.876151Z node 87 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409546;self_id=[87:338:2323];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-30T09:22:19.877588Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:19.877603Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:22:19.877622Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:19.878618Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-30T09:22:19.878656Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-30T09:22:19.878816Z node 87 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 1005 2025-06-30T09:22:19.878882Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-30T09:22:19.878890Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-30T09:22:19.878966Z node 87 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-30T09:22:19.878987Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-30T09:22:19.878993Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [87:554:2522] TestWaitNotification: OK eventTxId 1005 2025-06-30T09:22:19.879063Z node 87 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore/ColumnTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:19.879101Z node 87 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore/ColumnTable" took 51us result status StatusPathDoesNotExist 2025-06-30T09:22:19.879147Z node 87 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/OlapStore/ColumnTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/OlapStore/ColumnTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:22:19.879209Z node 87 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:19.879227Z node 87 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore" took 20us result status StatusPathDoesNotExist 2025-06-30T09:22:19.879244Z node 87 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/OlapStore\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/OlapStore" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::DeclareAndDefine [GOOD] Test command err: 2025-06-30T09:22:19.935909Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670311051302306:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:19.936222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00493b/r3tmp/tmpWnPuQJ/pdisk_1.dat 2025-06-30T09:22:20.005523Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:20.015115Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:20.035948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:20.035985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:4795 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:22:20.037321Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521670311051302482:2141] Handle TEvNavigate describe path dc-1 2025-06-30T09:22:20.037996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:20.038990Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521670315346270199:2428] HANDLE EvNavigateScheme dc-1 2025-06-30T09:22:20.039046Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521670311051302506:2154], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:20.039062Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521670311051302506:2154], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:22:20.039114Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521670315346270202:2431][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:22:20.039640Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311051302162:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670315346270207:2431] 2025-06-30T09:22:20.039667Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311051302162:2050] Subscribe: subscriber# [1:7521670315346270207:2431], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.039676Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311051302165:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670315346270208:2431] 2025-06-30T09:22:20.039685Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311051302165:2053] Subscribe: subscriber# [1:7521670315346270208:2431], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.039702Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311051302168:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670315346270209:2431] 2025-06-30T09:22:20.039706Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311051302168:2056] Subscribe: subscriber# [1:7521670315346270209:2431], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.039715Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670315346270207:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311051302162:2050] 2025-06-30T09:22:20.039721Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670315346270208:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311051302165:2053] 2025-06-30T09:22:20.039726Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670315346270209:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311051302168:2056] 2025-06-30T09:22:20.039734Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670315346270202:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670315346270204:2431] 2025-06-30T09:22:20.039743Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670315346270202:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670315346270205:2431] 2025-06-30T09:22:20.039772Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521670315346270202:2431][/dc-1] Set up state: owner# [1:7521670311051302506:2154], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:20.039779Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311051302168:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670315346270209:2431] 2025-06-30T09:22:20.039810Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311051302162:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670315346270207:2431] 2025-06-30T09:22:20.039813Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311051302165:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670315346270208:2431] 2025-06-30T09:22:20.039819Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670315346270202:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670315346270206:2431] 2025-06-30T09:22:20.039839Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521670315346270202:2431][/dc-1] Path was already updated: owner# [1:7521670311051302506:2154], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:20.039848Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670315346270207:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670315346270204:2431], cookie# 1 2025-06-30T09:22:20.039852Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670315346270208:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670315346270205:2431], cookie# 1 2025-06-30T09:22:20.039856Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670315346270209:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670315346270206:2431], cookie# 1 2025-06-30T09:22:20.039862Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670311051302162:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670315346270207:2431], cookie# 1 2025-06-30T09:22:20.039867Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670311051302165:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670315346270208:2431], cookie# 1 2025-06-30T09:22:20.039872Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670311051302168:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670315346270209:2431], cookie# 1 2025-06-30T09:22:20.039878Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670315346270207:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311051302162:2050], cookie# 1 2025-06-30T09:22:20.039882Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670315346270208:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311051302165:2053], cookie# 1 2025-06-30T09:22:20.039886Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670315346270209:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311051302168:2056], cookie# 1 2025-06-30T09:22:20.039892Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670315346270202:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670315346270204:2431], cookie# 1 2025-06-30T09:22:20.039899Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670315346270202:2431][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:22:20.039904Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670315346270202:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670315346270205:2431], cookie# 1 2025-06-30T09:22:20.039907Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670315346270202:2431][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:22:20.039911Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670315346270202:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670315346270206:2431], cookie# 1 2025-06-30T09:22:20.039916Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521670315346270202:2431][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:22:20.048606Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521670311051302506:2154], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: ... ut readset size 0 marker# P6 2025-06-30T09:22:20.581431Z node 1 :TX_PROXY DEBUG: datareq.cpp:2921: Actor# [1:7521670315346270937:2991] txid# 281474976715665 SEND EvProposeTransaction to# 72075186224037889 Coordinator marker# P7 2025-06-30T09:22:20.581968Z node 1 :TX_PROXY DEBUG: datareq.cpp:2111: Actor# [1:7521670315346270937:2991] txid# 281474976715665 HANDLE TEvProposeTransactionStatus TDataReq marker# P11 Status# 16 2025-06-30T09:22:20.602191Z node 1 :TX_PROXY DEBUG: datareq.cpp:2135: Actor# [1:7521670315346270937:2991] txid# 281474976715665 HANDLE TEvProposeTransactionStatus TDataReq marker# P10 Status# 17 2025-06-30T09:22:20.608139Z node 1 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [1:7521670315346270937:2991] txid# 281474976715665 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037894 marker# P12 2025-06-30T09:22:20.608174Z node 1 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [1:7521670315346270937:2991] txid# 281474976715665 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037892 marker# P12 2025-06-30T09:22:20.608333Z node 1 :TX_PROXY DEBUG: datareq.cpp:2691: Actor# [1:7521670315346270937:2991] txid# 281474976715665 MergeResult ExecComplete TDataReq marker# P17 2025-06-30T09:22:20.608372Z node 1 :TX_PROXY INFO: datareq.cpp:834: Actor# [1:7521670315346270937:2991] txid# 281474976715665 RESPONSE Status# ExecComplete prepare time: 0.003600s execute time: 0.026938s total time: 0.030538s marker# P13 2025-06-30T09:22:20.619382Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7521670311051302162:2050] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [2:7521670312475662780:2108] 2025-06-30T09:22:20.619394Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7521670311051302162:2050] Unsubscribe: subscriber# [2:7521670312475662780:2108], path# /dc-1/USER_0 2025-06-30T09:22:20.619404Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7521670311051302165:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [2:7521670312475662781:2108] 2025-06-30T09:22:20.619409Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7521670311051302165:2053] Unsubscribe: subscriber# [2:7521670312475662781:2108], path# /dc-1/USER_0 2025-06-30T09:22:20.619416Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7521670311051302168:2056] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [2:7521670312475662782:2108] 2025-06-30T09:22:20.619420Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7521670311051302168:2056] Unsubscribe: subscriber# [2:7521670312475662782:2108], path# /dc-1/USER_0 2025-06-30T09:22:20.619566Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 2 2025-06-30T09:22:20.619837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:20.643022Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311051302162:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/delayed_requests DomainOwnerId: 72057594046644480 }: sender# [2:7521670312475663474:2511] 2025-06-30T09:22:20.643029Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7521670311051302162:2050] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/delayed_requests 2025-06-30T09:22:20.643030Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311051302165:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/delayed_requests DomainOwnerId: 72057594046644480 }: sender# [2:7521670312475663475:2511] 2025-06-30T09:22:20.643033Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7521670311051302165:2053] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/delayed_requests 2025-06-30T09:22:20.643051Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311051302162:2050] Subscribe: subscriber# [2:7521670312475663474:2511], path# /dc-1/USER_0/.metadata/workload_manager/delayed_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.643052Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311051302165:2053] Subscribe: subscriber# [2:7521670312475663475:2511], path# /dc-1/USER_0/.metadata/workload_manager/delayed_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.643066Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311051302162:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [2:7521670312475663480:2512] 2025-06-30T09:22:20.643066Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311051302165:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [2:7521670312475663481:2512] 2025-06-30T09:22:20.643068Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7521670311051302165:2053] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/running_requests 2025-06-30T09:22:20.643068Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7521670311051302162:2050] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/running_requests 2025-06-30T09:22:20.643075Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311051302165:2053] Subscribe: subscriber# [2:7521670312475663481:2512], path# /dc-1/USER_0/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.643083Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311051302165:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers DomainOwnerId: 72057594046644480 }: sender# [2:7521670312475663487:2513] 2025-06-30T09:22:20.643085Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7521670311051302165:2053] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers 2025-06-30T09:22:20.643085Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311051302162:2050] Subscribe: subscriber# [2:7521670312475663480:2512], path# /dc-1/USER_0/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.643091Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311051302165:2053] Subscribe: subscriber# [2:7521670312475663487:2513], path# /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.643093Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311051302162:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers DomainOwnerId: 72057594046644480 }: sender# [2:7521670312475663486:2513] 2025-06-30T09:22:20.643095Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7521670311051302162:2050] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers 2025-06-30T09:22:20.643101Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311051302162:2050] Subscribe: subscriber# [2:7521670312475663486:2513], path# /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.643102Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311051302168:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/delayed_requests DomainOwnerId: 72057594046644480 }: sender# [2:7521670312475663476:2511] 2025-06-30T09:22:20.643104Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7521670311051302168:2056] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/delayed_requests 2025-06-30T09:22:20.643110Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311051302168:2056] Subscribe: subscriber# [2:7521670312475663476:2511], path# /dc-1/USER_0/.metadata/workload_manager/delayed_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.643120Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311051302168:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [2:7521670312475663482:2512] 2025-06-30T09:22:20.643122Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7521670311051302168:2056] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/running_requests 2025-06-30T09:22:20.643127Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311051302168:2056] Subscribe: subscriber# [2:7521670312475663482:2512], path# /dc-1/USER_0/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.643134Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311051302168:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers DomainOwnerId: 72057594046644480 }: sender# [2:7521670312475663488:2513] 2025-06-30T09:22:20.643135Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7521670311051302168:2056] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers 2025-06-30T09:22:20.643141Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311051302168:2056] Subscribe: subscriber# [2:7521670312475663488:2513], path# /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.643581Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311051302165:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7521670312475663475:2511] 2025-06-30T09:22:20.643586Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311051302165:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7521670312475663481:2512] 2025-06-30T09:22:20.643593Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311051302165:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7521670312475663487:2513] 2025-06-30T09:22:20.643598Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311051302162:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7521670312475663474:2511] 2025-06-30T09:22:20.643603Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311051302162:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7521670312475663480:2512] 2025-06-30T09:22:20.643607Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311051302162:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7521670312475663486:2513] 2025-06-30T09:22:20.643612Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311051302168:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7521670312475663476:2511] 2025-06-30T09:22:20.643617Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311051302168:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7521670312475663482:2512] 2025-06-30T09:22:20.643621Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311051302168:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7521670312475663488:2513] >> SystemView::PgTablesOneSchemeShardDataQuery [GOOD] >> SystemView::QueryStats >> SystemView::StoragePoolsRanges [GOOD] >> SystemView::TopPartitionsByCpuFields >> TStorageTenantTest::CreateDummyTabletsInDifferentDomains >> IndexBuildTest::Lock >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoWithError [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsFromAdLdapServer >> TStorageTenantTest::CreateTableInsideSubDomain [GOOD] >> TStorageTenantTest::CopyTableAndConcurrentSplit [GOOD] >> SystemView::VSlotsFields [GOOD] >> SystemView::TopPartitionsByCpuTables >> IndexBuildTest::CancellationNotEnoughRetries >> TStorageTenantTest::CreateTableInsideSubDomain2 [GOOD] >> VectorIndexBuildTest::CreateAndDrop >> IndexBuildTest::BaseCase >> TStorageTenantTest::RemoveStoragePoolAndCreateOneMore [GOOD] >> IndexBuildTest::Lock [GOOD] >> IndexBuildTest::MergeIndexTableShardsOnlyWhenReady >> LdapAuthProviderTest_StartTls::LdapFetchGroupsFromAdLdapServer [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsDisableRequestToAD >> IndexBuildTest::ShadowDataNotAllowedByDefault >> SystemView::AuthGroups_TableRange [GOOD] >> SystemView::AuthOwners ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateTableInsideSubDomain [GOOD] Test command err: 2025-06-30T09:22:20.424763Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670312792691825:2159];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:20.424915Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004930/r3tmp/tmp7kGznl/pdisk_1.dat 2025-06-30T09:22:20.516295Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:20.523187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:20.523223Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:20.535465Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:20.535977Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TClient is connected to server localhost:12289 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:22:20.563847Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521670312792691912:2141] Handle TEvNavigate describe path dc-1 2025-06-30T09:22:20.565692Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521670312792692286:2391] HANDLE EvNavigateScheme dc-1 2025-06-30T09:22:20.565778Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521670312792691961:2165], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:20.565802Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521670312792691961:2165], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:22:20.565852Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521670312792692287:2392][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:22:20.566325Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670312792691595:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670312792692291:2392] 2025-06-30T09:22:20.566333Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670312792691598:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670312792692292:2392] 2025-06-30T09:22:20.566351Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670312792691595:2051] Subscribe: subscriber# [1:7521670312792692291:2392], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.566352Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670312792691598:2054] Subscribe: subscriber# [1:7521670312792692292:2392], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.566370Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670312792691601:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670312792692293:2392] 2025-06-30T09:22:20.566374Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670312792691601:2057] Subscribe: subscriber# [1:7521670312792692293:2392], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.566377Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670312792692291:2392][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670312792691595:2051] 2025-06-30T09:22:20.566382Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670312792692292:2392][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670312792691598:2054] 2025-06-30T09:22:20.566386Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670312792691595:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670312792692291:2392] 2025-06-30T09:22:20.566392Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670312792692293:2392][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670312792691601:2057] 2025-06-30T09:22:20.566393Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670312792691598:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670312792692292:2392] 2025-06-30T09:22:20.566399Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670312792691601:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670312792692293:2392] 2025-06-30T09:22:20.566399Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670312792692287:2392][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670312792692288:2392] 2025-06-30T09:22:20.566406Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670312792692287:2392][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670312792692289:2392] 2025-06-30T09:22:20.566418Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521670312792692287:2392][/dc-1] Set up state: owner# [1:7521670312792691961:2165], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:20.566465Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670312792692287:2392][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670312792692290:2392] 2025-06-30T09:22:20.566486Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521670312792692287:2392][/dc-1] Path was already updated: owner# [1:7521670312792691961:2165], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:20.566495Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670312792692291:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670312792692288:2392], cookie# 1 2025-06-30T09:22:20.566499Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670312792692292:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670312792692289:2392], cookie# 1 2025-06-30T09:22:20.566503Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670312792692293:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670312792692290:2392], cookie# 1 2025-06-30T09:22:20.566899Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670312792691595:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670312792692291:2392], cookie# 1 2025-06-30T09:22:20.566913Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670312792691598:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670312792692292:2392], cookie# 1 2025-06-30T09:22:20.566928Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670312792691601:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670312792692293:2392], cookie# 1 2025-06-30T09:22:20.566937Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670312792692291:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670312792691595:2051], cookie# 1 2025-06-30T09:22:20.566941Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670312792692292:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670312792691598:2054], cookie# 1 2025-06-30T09:22:20.566945Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670312792692293:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670312792691601:2057], cookie# 1 2025-06-30T09:22:20.566951Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670312792692287:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670312792692288:2392], cookie# 1 2025-06-30T09:22:20.566957Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670312792692287:2392][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:22:20.566961Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670312792692287:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670312792692289:2392], cookie# 1 2025-06-30T09:22:20.566963Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670312792692287:2392][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:22:20.566967Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670312792692287:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670312792692290:2392], cookie# 1 2025-06-30T09:22:20.566972Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521670312792692287:2392][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:22:20.575995Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:7521670312792691912:2141] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:22:20.576206Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521670312792691961:2165], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediato ... alue" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "k... (TRUNCATED) 2025-06-30T09:22:21.640419Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7521670312792691595:2051] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7521670316001096165:2110] 2025-06-30T09:22:21.640445Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7521670312792691595:2051] Unsubscribe: subscriber# [3:7521670316001096165:2110], path# /dc-1/USER_0 2025-06-30T09:22:21.640455Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7521670312792691598:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7521670316001096166:2110] 2025-06-30T09:22:21.640459Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7521670312792691598:2054] Unsubscribe: subscriber# [3:7521670316001096166:2110], path# /dc-1/USER_0 2025-06-30T09:22:21.640469Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7521670312792691601:2057] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7521670316001096167:2110] 2025-06-30T09:22:21.640475Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7521670312792691601:2057] Unsubscribe: subscriber# [3:7521670316001096167:2110], path# /dc-1/USER_0 2025-06-30T09:22:21.640563Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-30T09:22:21.640791Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:21.814832Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670316001096143:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:21.814889Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670316001096143:2105], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:21.814898Z node 3 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [3:7521670316001096143:2105], path# /dc-1/USER_0, domainOwnerId# 72057594046644480 2025-06-30T09:22:21.814959Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][3:7521670316001096555:2342][/dc-1/USER_0] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:22:21.815084Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:7521670316001096555:2342][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 Version: 0 }: sender# [3:7521670316001096556:2342] 2025-06-30T09:22:21.815101Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:7521670316001096555:2342][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 Version: 0 }: sender# [3:7521670316001096557:2342] 2025-06-30T09:22:21.815111Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][3:7521670316001096555:2342][/dc-1/USER_0] Set up state: owner# [3:7521670316001096143:2105], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:21.815117Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:7521670316001096555:2342][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 Version: 0 }: sender# [3:7521670316001096558:2342] 2025-06-30T09:22:21.815125Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][3:7521670316001096555:2342][/dc-1/USER_0] Ignore empty state: owner# [3:7521670316001096143:2105], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:21.815135Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7521670316001096143:2105], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: Strong: 0 } 2025-06-30T09:22:21.815150Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7521670316001096143:2105], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: Strong: 0 }, by path# { Subscriber: { Subscriber: [3:7521670316001096555:2342] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:22:21.815169Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7521670316001096143:2105], cacheItem# { Subscriber: { Subscriber: [3:7521670316001096555:2342] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:21.815182Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521670316001096562:2343], recipient# [3:7521670316001096554:2341], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:21.815197Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670316001096143:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:21.815217Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521670316001096563:2344], recipient# [3:7521670316001096548:2293], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:21.818789Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7521670316001096548:2293], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:21.819008Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670316001096143:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:21.819039Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521670316001096565:2345], recipient# [3:7521670316001096564:2296], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:21.820718Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:21.870349Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670316001096143:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:21.870406Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521670316001096566:2346], recipient# [3:7521670316001096548:2293], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:21.870483Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7521670316001096548:2293], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CopyTableAndConcurrentSplit [GOOD] Test command err: 2025-06-30T09:22:20.690480Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670314100141003:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:20.690499Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004927/r3tmp/tmpHUuQOG/pdisk_1.dat 2025-06-30T09:22:20.750725Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:20.750971Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670314100140983:2080] 1751275340690337 != 1751275340690340 TClient is connected to server localhost:28290 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:22:20.775409Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521670314100141188:2094] Handle TEvNavigate describe path dc-1 2025-06-30T09:22:20.777364Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521670314100141678:2419] HANDLE EvNavigateScheme dc-1 2025-06-30T09:22:20.777426Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521670314100141230:2116], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:20.777438Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521670314100141230:2116], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:22:20.777516Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521670314100141679:2420][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:22:20.778018Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670314100140955:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670314100141684:2420] 2025-06-30T09:22:20.778033Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670314100140952:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670314100141683:2420] 2025-06-30T09:22:20.778045Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670314100140955:2052] Subscribe: subscriber# [1:7521670314100141684:2420], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.778053Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670314100140952:2049] Subscribe: subscriber# [1:7521670314100141683:2420], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.778063Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670314100140958:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670314100141685:2420] 2025-06-30T09:22:20.778065Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670314100140958:2055] Subscribe: subscriber# [1:7521670314100141685:2420], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:20.778074Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670314100141684:2420][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670314100140955:2052] 2025-06-30T09:22:20.778078Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670314100141685:2420][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670314100140958:2055] 2025-06-30T09:22:20.778082Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670314100141679:2420][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670314100141681:2420] 2025-06-30T09:22:20.778087Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670314100141679:2420][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670314100141682:2420] 2025-06-30T09:22:20.778107Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670314100140955:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670314100141684:2420] 2025-06-30T09:22:20.778112Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670314100140958:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670314100141685:2420] 2025-06-30T09:22:20.778139Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521670314100141679:2420][/dc-1] Set up state: owner# [1:7521670314100141230:2116], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:20.778193Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670314100141683:2420][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670314100140952:2049] 2025-06-30T09:22:20.778203Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670314100141683:2420][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670314100141680:2420], cookie# 1 2025-06-30T09:22:20.778207Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670314100141684:2420][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670314100141681:2420], cookie# 1 2025-06-30T09:22:20.778210Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670314100141685:2420][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670314100141682:2420], cookie# 1 2025-06-30T09:22:20.778216Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670314100141679:2420][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670314100141680:2420] 2025-06-30T09:22:20.778233Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521670314100141679:2420][/dc-1] Path was already updated: owner# [1:7521670314100141230:2116], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:20.778245Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670314100140952:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670314100141683:2420] 2025-06-30T09:22:20.778249Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670314100140952:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670314100141683:2420], cookie# 1 2025-06-30T09:22:20.778254Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670314100140955:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670314100141684:2420], cookie# 1 2025-06-30T09:22:20.778258Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670314100140958:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670314100141685:2420], cookie# 1 2025-06-30T09:22:20.778265Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670314100141683:2420][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670314100140952:2049], cookie# 1 2025-06-30T09:22:20.778273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670314100141684:2420][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670314100140955:2052], cookie# 1 2025-06-30T09:22:20.778276Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670314100141685:2420][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670314100140958:2055], cookie# 1 2025-06-30T09:22:20.778282Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670314100141679:2420][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670314100141680:2420], cookie# 1 2025-06-30T09:22:20.778292Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670314100141679:2420][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:22:20.778299Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670314100141679:2420][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670314100141681:2420], cookie# 1 2025-06-30T09:22:20.778301Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670314100141679:2420][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:22:20.778303Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670314100141679:2420][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670314100141682:2420], cookie# 1 2025-06-30T09:22:20.778306Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521670314100141679:2420][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:22:20.788245Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521670314100141230:2116], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTable ... Board.TEvNotify { Path: /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [4:7521670318183893854:2734] 2025-06-30T09:22:21.903860Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][4:7521670318183893839:2734][/dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers] Ignore empty state: owner# [4:7521670318183892689:2103], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:21.903879Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7521670318183892689:2103], cacheItem# { Subscriber: { Subscriber: [4:7521670318183893838:2733] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:21.903884Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [4:7521670318183892689:2103], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-30T09:22:21.903890Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [4:7521670318183892689:2103], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [4:7521670318183893837:2732] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:22:21.903897Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7521670318183892689:2103], cacheItem# { Subscriber: { Subscriber: [4:7521670318183893837:2732] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:21.903911Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [4:7521670318183892689:2103], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 } 2025-06-30T09:22:21.903918Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [4:7521670318183892689:2103], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [4:7521670318183893839:2734] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:22:21.903928Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7521670318183892689:2103], cacheItem# { Subscriber: { Subscriber: [4:7521670318183893839:2734] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:21.903941Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7521670318183893858:2735], recipient# [4:7521670318183893833:2330], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:21.903955Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7521670318183893859:2736], recipient# [4:7521670318183893834:2331], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:21.902987Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7521670318565500231:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [4:7521670318183893850:2733] 2025-06-30T09:22:21.902988Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7521670318565500231:2054] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/running_requests 2025-06-30T09:22:21.902996Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7521670318565500231:2054] Subscribe: subscriber# [4:7521670318183893850:2733], path# /dc-1/USER_0/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:21.903001Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7521670318565500231:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers DomainOwnerId: 72057594046644480 }: sender# [4:7521670318183893856:2734] 2025-06-30T09:22:21.903003Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7521670318565500231:2054] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers 2025-06-30T09:22:21.903008Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7521670318565500231:2054] Subscribe: subscriber# [4:7521670318183893856:2734], path# /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:21.903014Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7521670318565500234:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/delayed_requests DomainOwnerId: 72057594046644480 }: sender# [4:7521670318183893845:2732] 2025-06-30T09:22:21.903016Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7521670318565500234:2057] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/delayed_requests 2025-06-30T09:22:21.903020Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7521670318565500234:2057] Subscribe: subscriber# [4:7521670318183893845:2732], path# /dc-1/USER_0/.metadata/workload_manager/delayed_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:21.903027Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7521670318565500234:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [4:7521670318183893851:2733] 2025-06-30T09:22:21.903029Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7521670318565500234:2057] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/running_requests 2025-06-30T09:22:21.903033Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7521670318565500234:2057] Subscribe: subscriber# [4:7521670318183893851:2733], path# /dc-1/USER_0/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:21.903039Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7521670318565500234:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers DomainOwnerId: 72057594046644480 }: sender# [4:7521670318183893857:2734] 2025-06-30T09:22:21.903040Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7521670318565500234:2057] Upsert description: path# /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers 2025-06-30T09:22:21.903048Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7521670318565500234:2057] Subscribe: subscriber# [4:7521670318183893857:2734], path# /dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:21.903937Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7521670318565500228:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [4:7521670318183893843:2732] 2025-06-30T09:22:21.903945Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7521670318565500228:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [4:7521670318183893849:2733] 2025-06-30T09:22:21.903948Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7521670318565500228:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [4:7521670318183893855:2734] 2025-06-30T09:22:21.903953Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7521670318565500231:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [4:7521670318183893844:2732] 2025-06-30T09:22:21.903957Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7521670318565500231:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [4:7521670318183893850:2733] 2025-06-30T09:22:21.903961Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7521670318565500231:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [4:7521670318183893856:2734] 2025-06-30T09:22:21.903965Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7521670318565500234:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [4:7521670318183893851:2733] 2025-06-30T09:22:21.903968Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7521670318565500234:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [4:7521670318183893845:2732] 2025-06-30T09:22:21.903972Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7521670318565500234:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [4:7521670318183893857:2734] >> VectorIndexBuildTest::Metering_CommonDB ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateTableInsideSubDomain2 [GOOD] Test command err: 2025-06-30T09:22:19.562221Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670311533127789:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:19.562256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00493c/r3tmp/tmpee44EA/pdisk_1.dat 2025-06-30T09:22:19.665708Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:19.670983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:19.671018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:19.673918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:19.681230Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TClient is connected to server localhost:5996 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:22:19.725777Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521670311533127978:2142] Handle TEvNavigate describe path dc-1 2025-06-30T09:22:19.728293Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:7521670311533127978:2142] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:22:19.728319Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521670311533128353:2396] HANDLE EvNavigateScheme dc-1 2025-06-30T09:22:19.728361Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521670311533128001:2155], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.728371Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521670311533128001:2155], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:22:19.728426Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521670311533128354:2397][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:22:19.729000Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311533127656:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670311533128358:2397] 2025-06-30T09:22:19.729020Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311533127656:2051] Subscribe: subscriber# [1:7521670311533128358:2397], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.729034Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311533127659:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670311533128359:2397] 2025-06-30T09:22:19.729038Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311533127659:2054] Subscribe: subscriber# [1:7521670311533128359:2397], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.729044Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670311533127662:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670311533128360:2397] 2025-06-30T09:22:19.729048Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670311533127662:2057] Subscribe: subscriber# [1:7521670311533128360:2397], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.729062Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670311533128358:2397][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311533127656:2051] 2025-06-30T09:22:19.729067Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670311533128359:2397][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311533127659:2054] 2025-06-30T09:22:19.729072Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670311533128360:2397][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311533127662:2057] 2025-06-30T09:22:19.729079Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670311533128354:2397][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311533128355:2397] 2025-06-30T09:22:19.729088Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670311533128354:2397][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311533128356:2397] 2025-06-30T09:22:19.729109Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521670311533128354:2397][/dc-1] Set up state: owner# [1:7521670311533128001:2155], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.729144Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670311533128354:2397][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670311533128357:2397] 2025-06-30T09:22:19.729166Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521670311533128354:2397][/dc-1] Path was already updated: owner# [1:7521670311533128001:2155], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.729175Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670311533128358:2397][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670311533128355:2397], cookie# 1 2025-06-30T09:22:19.729178Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670311533128359:2397][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670311533128356:2397], cookie# 1 2025-06-30T09:22:19.729181Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670311533128360:2397][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670311533128357:2397], cookie# 1 2025-06-30T09:22:19.729190Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311533127656:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670311533128358:2397] 2025-06-30T09:22:19.729195Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670311533127656:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670311533128358:2397], cookie# 1 2025-06-30T09:22:19.729200Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311533127659:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670311533128359:2397] 2025-06-30T09:22:19.729203Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670311533127659:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670311533128359:2397], cookie# 1 2025-06-30T09:22:19.729208Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670311533127662:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670311533128360:2397] 2025-06-30T09:22:19.729211Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670311533127662:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670311533128360:2397], cookie# 1 2025-06-30T09:22:19.734818Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670311533128358:2397][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311533127656:2051], cookie# 1 2025-06-30T09:22:19.734843Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670311533128359:2397][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311533127659:2054], cookie# 1 2025-06-30T09:22:19.734847Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670311533128360:2397][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311533127662:2057], cookie# 1 2025-06-30T09:22:19.734858Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670311533128354:2397][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311533128355:2397], cookie# 1 2025-06-30T09:22:19.734868Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670311533128354:2397][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:22:19.734872Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670311533128354:2397][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311533128356:2397], cookie# 1 2025-06-30T09:22:19.734876Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670311533128354:2397][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:22:19.734880Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670311533128354:2397][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670311533128357:2397], cookie# 1 2025-06-30T09:22:19.734886Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521670311533128354:2397][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:22:19.735747Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:22:19.751298Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521670311533128001:2155], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescriptio ... PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7521670324418030879:2842] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 2 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1751275341950 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, by pathId# nullptr 2025-06-30T09:22:22.030276Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7521670311533128001:2155], cacheItem# { Subscriber: { Subscriber: [1:7521670324418030879:2842] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 2 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1751275341950 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: dc-1/USER_0/SimpleTable TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 2 IsSync: true Partial: 0 } 2025-06-30T09:22:22.030334Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7521670324418030889:2846], recipient# [1:7521670324418030888:2845], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/SimpleTable TableId: [72057594046644480:3:1] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:22:22.030345Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7521670324418030888:2845] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-30T09:22:22.030363Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7521670324418030888:2845] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1/USER_0/SimpleTable" Options { ShowPrivateTable: true } 2025-06-30T09:22:22.030773Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7521670324418030888:2845] Handle TEvDescribeSchemeResult Forward to# [1:7521670324418030887:2844] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 52 Record# Status: StatusSuccess Path: "/dc-1/USER_0/SimpleTable" PathDescription { Self { Name: "SimpleTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715660 CreateStep: 1751275341950 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "SimpleTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } FollowerCount: 2 PartitioningPolicy { MinPartitionsCount: 2 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } DomainKey { SchemeShard: 72057594046644480 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "SimpleTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715660 CreateStep: 1751275341950 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "SimpleTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "k... (TRUNCATED) 2025-06-30T09:22:22.032495Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521670311533128001:2155], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:22.032533Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7521670311533128001:2155], cacheItem# { Subscriber: { Subscriber: [1:7521670315828096002:2649] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:22.032553Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7521670324418030903:2859], recipient# [1:7521670324418030902:2286], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:22.040999Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-30T09:22:22.041303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:22.041493Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7521670311533127656:2051] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7521670311625564294:2101] 2025-06-30T09:22:22.041502Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7521670311533127656:2051] Unsubscribe: subscriber# [3:7521670311625564294:2101], path# /dc-1/USER_0 2025-06-30T09:22:22.041509Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7521670311533127659:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7521670311625564295:2101] 2025-06-30T09:22:22.041513Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7521670311533127659:2054] Unsubscribe: subscriber# [3:7521670311625564295:2101], path# /dc-1/USER_0 2025-06-30T09:22:22.041518Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7521670311533127662:2057] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7521670311625564296:2101] 2025-06-30T09:22:22.041521Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7521670311533127662:2057] Unsubscribe: subscriber# [3:7521670311625564296:2101], path# /dc-1/USER_0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::RemoveStoragePoolAndCreateOneMore [GOOD] Test command err: 2025-06-30T09:22:19.374288Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670308402644575:2085];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:19.374309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00494e/r3tmp/tmpkBuBsS/pdisk_1.dat 2025-06-30T09:22:19.460717Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:19.475020Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:19.475052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:19.480021Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8913 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:22:19.494294Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521670308402644713:2140] Handle TEvNavigate describe path dc-1 2025-06-30T09:22:19.496389Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521670308402645159:2426] HANDLE EvNavigateScheme dc-1 2025-06-30T09:22:19.496462Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521670308402644763:2154], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:19.496482Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521670308402644763:2154], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:22:19.496544Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521670308402645160:2427][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:22:19.497070Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670308402644423:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670308402645164:2427] 2025-06-30T09:22:19.497097Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670308402644423:2051] Subscribe: subscriber# [1:7521670308402645164:2427], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.497124Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670308402644426:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670308402645165:2427] 2025-06-30T09:22:19.497129Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670308402644426:2054] Subscribe: subscriber# [1:7521670308402645165:2427], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.497137Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670308402644429:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670308402645166:2427] 2025-06-30T09:22:19.497141Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670308402644429:2057] Subscribe: subscriber# [1:7521670308402645166:2427], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:19.497155Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670308402645164:2427][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670308402644423:2051] 2025-06-30T09:22:19.497161Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670308402645165:2427][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670308402644426:2054] 2025-06-30T09:22:19.497166Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670308402645166:2427][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670308402644429:2057] 2025-06-30T09:22:19.497174Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670308402645160:2427][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670308402645161:2427] 2025-06-30T09:22:19.497184Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670308402645160:2427][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670308402645162:2427] 2025-06-30T09:22:19.497217Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521670308402645160:2427][/dc-1] Set up state: owner# [1:7521670308402644763:2154], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.497257Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670308402645160:2427][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670308402645163:2427] 2025-06-30T09:22:19.497279Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521670308402645160:2427][/dc-1] Path was already updated: owner# [1:7521670308402644763:2154], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:19.497289Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670308402645164:2427][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670308402645161:2427], cookie# 1 2025-06-30T09:22:19.497293Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670308402645165:2427][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670308402645162:2427], cookie# 1 2025-06-30T09:22:19.497296Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670308402645166:2427][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670308402645163:2427], cookie# 1 2025-06-30T09:22:19.497304Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670308402644423:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670308402645164:2427] 2025-06-30T09:22:19.497309Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670308402644423:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670308402645164:2427], cookie# 1 2025-06-30T09:22:19.497314Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670308402644426:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670308402645165:2427] 2025-06-30T09:22:19.497317Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670308402644426:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670308402645165:2427], cookie# 1 2025-06-30T09:22:19.497322Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670308402644429:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670308402645166:2427] 2025-06-30T09:22:19.497325Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670308402644429:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670308402645166:2427], cookie# 1 2025-06-30T09:22:19.498795Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670308402645164:2427][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670308402644423:2051], cookie# 1 2025-06-30T09:22:19.498806Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670308402645165:2427][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670308402644426:2054], cookie# 1 2025-06-30T09:22:19.498810Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670308402645166:2427][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670308402644429:2057], cookie# 1 2025-06-30T09:22:19.498820Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670308402645160:2427][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670308402645161:2427], cookie# 1 2025-06-30T09:22:19.498831Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670308402645160:2427][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:22:19.498835Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670308402645160:2427][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670308402645162:2427], cookie# 1 2025-06-30T09:22:19.498839Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670308402645160:2427][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:22:19.498842Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670308402645160:2427][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670308402645163:2427], cookie# 1 2025-06-30T09:22:19.498848Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521670308402645160:2427][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:22:19.511999Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521670308402644763:2154], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... ainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:21.963965Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7521670315956519329:2230], cacheItem# { Subscriber: { Subscriber: [2:7521670315956519477:2305] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:21.963970Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7521670315956519329:2230], cacheItem# { Subscriber: { Subscriber: [2:7521670315956519478:2306] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:21.963992Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7521670315956519563:2336], recipient# [2:7521670315956519562:2494], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:22.172554Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670308455398340:2215], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:22.172607Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7521670308455398340:2215], cacheItem# { Subscriber: { Subscriber: [3:7521670312750365935:2284] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:22.172639Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670308455398340:2215], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:22.172653Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7521670308455398340:2215], cacheItem# { Subscriber: { Subscriber: [3:7521670312750365934:2283] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:22.172672Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521670321340303453:3268], recipient# [3:7521670321340303451:3125], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:22.172684Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521670321340303454:3269], recipient# [3:7521670321340303452:3126], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:22.453133Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:22.473833Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7521670315956519329:2230], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:22.473869Z node 2 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [2:7521670315956519329:2230], path# /dc-1/USER_1/.metadata/initialization/migrations, domainOwnerId# 72057594046644480 2025-06-30T09:22:22.473955Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][2:7521670320251486865:2337][/dc-1/USER_1/.metadata/initialization/migrations] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:22:22.474067Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:7521670320251486865:2337][/dc-1/USER_1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_1/.metadata/initialization/migrations Version: 0 }: sender# [2:7521670320251486866:2337] 2025-06-30T09:22:22.474084Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:7521670320251486865:2337][/dc-1/USER_1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_1/.metadata/initialization/migrations Version: 0 }: sender# [2:7521670320251486867:2337] 2025-06-30T09:22:22.474094Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][2:7521670320251486865:2337][/dc-1/USER_1/.metadata/initialization/migrations] Set up state: owner# [2:7521670315956519329:2230], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:22.474100Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:7521670320251486865:2337][/dc-1/USER_1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_1/.metadata/initialization/migrations Version: 0 }: sender# [2:7521670320251486868:2337] 2025-06-30T09:22:22.474106Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][2:7521670320251486865:2337][/dc-1/USER_1/.metadata/initialization/migrations] Ignore empty state: owner# [2:7521670315956519329:2230], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:22.474116Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [2:7521670315956519329:2230], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_1/.metadata/initialization/migrations PathId: Strong: 0 } 2025-06-30T09:22:22.474129Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [2:7521670315956519329:2230], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_1/.metadata/initialization/migrations PathId: Strong: 0 }, by path# { Subscriber: { Subscriber: [2:7521670320251486865:2337] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-30T09:22:22.474145Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7521670315956519329:2230], cacheItem# { Subscriber: { Subscriber: [2:7521670320251486865:2337] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:22.474158Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7521670320251486872:2338], recipient# [2:7521670320251486864:2495], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:22.474279Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_1/.metadata/initialization/migrations;error=incorrect path status: LookupError; >> VectorIndexBuildTest::RecreatedColumns >> IndexBuildTest::ShadowDataNotAllowedByDefault [GOOD] >> IndexBuildTest::ShadowDataEdgeCases >> LdapAuthProviderTest_StartTls::LdapFetchGroupsDisableRequestToAD [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithCustomGroupAttributeGood >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoWithError [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetDefaultFilter [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithOneLoginPlaceholder [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithSearchAttribute [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithFewLoginPlaceholders [GOOD] >> IndexBuildTest::ShadowDataEdgeCases [GOOD] >> IndexBuildTest::WithFollowers >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithCustomGroupAttributeGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsUseInvalidSearchFilterBad >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithFewLoginPlaceholders [GOOD] Test command err: 2025-06-30T09:22:02.501921Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670238422806569:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:02.501950Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046b1/r3tmp/tmpwfOcB2/pdisk_1.dat 2025-06-30T09:22:02.561078Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:02.561317Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670238422806549:2080] 1751275322501737 != 1751275322501740 TServer::EnableGrpc on GrpcPort 20695, node 1 2025-06-30T09:22:02.580131Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.580146Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.580149Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.580200Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.639108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.639150Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.640668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:02.714827Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:02.715861Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:02.715868Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:02.716060Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:63162, port: 63162 2025-06-30T09:22:02.716433Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:02.721071Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:02.766921Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:02.767111Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:02.767121Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:02.810916Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:02.859698Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:02.860390Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****hBkA (E45CB3BA) () has now valid token of ldapuser@ldap 2025-06-30T09:22:03.505001Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:06.511707Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****hBkA (E45CB3BA) 2025-06-30T09:22:06.511800Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:63162, port: 63162 2025-06-30T09:22:06.511830Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:06.526135Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:06.526358Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:63162 return no entries 2025-06-30T09:22:06.526515Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****hBkA (E45CB3BA) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:63162 return no entries)' 2025-06-30T09:22:07.502921Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521670238422806569:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:07.502964Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:22:11.519646Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****hBkA (E45CB3BA) 2025-06-30T09:22:13.076524Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670284376830586:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:13.076715Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046b1/r3tmp/tmpLTMVae/pdisk_1.dat 2025-06-30T09:22:13.092840Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:13.093442Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670284376830557:2080] 1751275333076297 != 1751275333076300 TServer::EnableGrpc on GrpcPort 3016, node 2 2025-06-30T09:22:13.116691Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:13.116708Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:13.116711Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:13.116775Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:13.153557Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:13.160330Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:13.160355Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:13.160599Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:7449, port: 7449 2025-06-30T09:22:13.160622Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:13.183361Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:13.183387Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:13.183430Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:13.183639Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:7449. Server is busy 2025-06-30T09:22:13.183772Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****SrCg (A19CA756) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:7449. Server is busy)' 2025-06-30T09:22:13.183835Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:13.183841Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:13.184077Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:7449, port: 7449 2025-06-30T09:22:13.184106Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:13.184327Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:13.203735Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:13.203933Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:7449. Server is busy 2025-06-30T09:22:13.204053Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****SrCg (A19CA756) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:7449. Server is busy)' 2025-06-30T09:22:14.078566Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:15.077973Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****SrCg (A19CA756) 2025-06-30T09:22:15.078034Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:15.078039Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:15.078229Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:7449, port: 7449 2025-06-30T09:22:15.078266Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:15.095114Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:15.095277Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:7449. Server is busy 2025-06-30T09:22:15.095395Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****SrCg (A19CA756) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:7449. Server is busy)' 2025-06-30T09:22:18.076789Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7521670284376830586:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:18.076840Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:22:19.080530Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****SrCg (A19CA756) 2025-06-30T09:22:19.080635Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:19.080643Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:19.080931Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:7449, port: 7449 2025-06-30T09:22:19.080970Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:19.095289Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:19.140216Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:19.140497Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:19.140516Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:19.185511Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:19.230874Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:19.231381Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****SrCg (A19CA756) () has now valid token of ldapuser@ldap 2025-06-30T09:22:23.082569Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****SrCg (A19CA756) 2025-06-30T09:22:23.082599Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:7449, port: 7449 2025-06-30T09:22:23.082627Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:23.084901Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:23.126922Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:23.127169Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:23.127184Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:23.174953Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:23.218959Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf LDAP_MOCK: SearchRequest, protocol error, unexpected request, not matched any of provided to mock 2025-06-30T09:22:23.219393Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****SrCg (A19CA756) () has now valid token of ldapuser@ldap >> SystemView::QueryStats [GOOD] >> SystemView::QueryStatsFields >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshRemoveUserBad |82.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> LdapAuthProviderTest_StartTls::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] Test command err: 2025-06-30T09:22:00.746325Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670227166294089:2150];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:00.794810Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046bb/r3tmp/tmpoap20o/pdisk_1.dat 2025-06-30T09:22:00.817279Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:00.817565Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670227166293965:2080] 1751275320738135 != 1751275320738138 TServer::EnableGrpc on GrpcPort 20706, node 1 2025-06-30T09:22:00.833344Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:00.833360Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:00.833363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:00.833416Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:00.902081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:00.902147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:00.903704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:01.073546Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:01.073702Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:01.073708Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:01.073970Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:32247, port: 32247 2025-06-30T09:22:01.074347Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:01.078151Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-30T09:22:01.121484Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****OYRQ (33918C42) () has now valid token of ldapuser@ldap 2025-06-30T09:22:01.425860Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046bb/r3tmp/tmp3HStRd/pdisk_1.dat 2025-06-30T09:22:01.430298Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63972, node 2 2025-06-30T09:22:01.441591Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:01.441605Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:01.441607Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:01.441676Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:01.520373Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:01.520408Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:01.521191Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:01.546797Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:01.547106Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:01.547117Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:01.547289Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:8341, port: 8341 2025-06-30T09:22:01.547318Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=invalidRobouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:01.560964Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:8341. Invalid credentials 2025-06-30T09:22:01.561114Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****469g (AF2955E9) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:8341. Invalid credentials)' 2025-06-30T09:22:01.899883Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670233542197859:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:01.899918Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046bb/r3tmp/tmpYncHLU/pdisk_1.dat 2025-06-30T09:22:01.931008Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:01.931284Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670233542197840:2080] 1751275321899742 != 1751275321899745 TServer::EnableGrpc on GrpcPort 9410, node 3 2025-06-30T09:22:01.942406Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:01.942429Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:01.942431Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:01.942491Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.006081Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.006128Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.007115Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:02.022792Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:02.023769Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:02.023777Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:02.023908Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:8460, port: 8460 2025-06-30T09:22:02.023929Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:02.035522Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:8460. Invalid credentials 2025-06-30T09:22:02.035671Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****trKg (C93EB7DB) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:8460. Invalid credentials)' test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046bb/r3tmp/tmpJkGaHE/pdisk_1.dat 2025-06-30T09:22:02.448648Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521670237102557567:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:02.450164Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:02.476268Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:02.476471Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521670237102557459:2080] 1751275322446813 != 1751275322446816 TServer::EnableGrpc on GrpcPort 22259, node 4 2025-06-30T09:22:02.488044Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.488062Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.488065Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.488115Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.542845Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:02.543612Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:02.543626Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:02.543814Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:17685, port: 17685 2025-06-30T09:22:02.543846Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:02.549533Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:02.549684Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:17685 return no entries 2025-06-30T09:22:02.549776Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****P0vw (8510AEC3) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:17685 return no entries)' 2025-06-30T09:22:02.561821Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.561852Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T ... :22:03.115593Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:03.115806Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:10653, port: 10653 2025-06-30T09:22:03.115831Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:03.115910Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:03.115931Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:03.118212Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:03.125659Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:03.171437Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:03.171718Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:03.171732Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:03.215181Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:03.259983Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:03.260346Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****TEOQ (7ECD6F8C) () has now valid token of ldapuser@ldap 2025-06-30T09:22:04.014860Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:08.017634Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7521670242701982685:2086];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:08.017680Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:22:08.019710Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****TEOQ (7ECD6F8C) 2025-06-30T09:22:08.019774Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:10653, port: 10653 2025-06-30T09:22:08.019805Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:08.031320Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:08.077268Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:08.077578Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:08.077599Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:08.123850Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:08.170947Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:08.171419Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****TEOQ (7ECD6F8C) () has now valid token of ldapuser@ldap 2025-06-30T09:22:13.031284Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****TEOQ (7ECD6F8C) 2025-06-30T09:22:13.031333Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:10653, port: 10653 2025-06-30T09:22:13.031375Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:13.056611Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:13.102963Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:13.103295Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:13.103313Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:13.148224Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:13.191495Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:13.191847Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****TEOQ (7ECD6F8C) () has now valid token of ldapuser@ldap 2025-06-30T09:22:13.432255Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670285259794949:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:13.434551Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046bb/r3tmp/tmpP018d4/pdisk_1.dat 2025-06-30T09:22:13.464335Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:13.465645Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7521670285259794924:2080] 1751275333431831 != 1751275333431834 TServer::EnableGrpc on GrpcPort 61463, node 6 2025-06-30T09:22:13.478715Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:13.478728Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:13.478730Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:13.478796Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:13.532886Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:13.532920Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:13.534319Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:13.543165Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:13.545027Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:13.545035Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:13.545265Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:28462, port: 28462 2025-06-30T09:22:13.545291Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:13.653792Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:13.695217Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****tNOQ (4847C336) () has now valid token of ldapuser@ldap 2025-06-30T09:22:14.431518Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:18.432858Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7521670285259794949:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:18.432900Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:22:19.438383Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****tNOQ (4847C336) 2025-06-30T09:22:19.438422Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:28462, port: 28462 2025-06-30T09:22:19.438462Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:19.467262Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:19.511131Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****tNOQ (4847C336) () has now valid token of ldapuser@ldap 2025-06-30T09:22:22.439721Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****tNOQ (4847C336) 2025-06-30T09:22:22.439777Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:28462, port: 28462 2025-06-30T09:22:22.439806Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:22.454508Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:22.499532Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****tNOQ (4847C336) () has now valid token of ldapuser@ldap |82.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> IndexBuildTest::WithFollowers [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] |82.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> ShowCreateView::WithSingleQuotedTablePathPrefix [GOOD] >> ShowCreateView::WithTwoTablePathPrefixes |82.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_StartTls::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] Test command err: 2025-06-30T09:22:00.427291Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670226604143635:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:00.427450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046de/r3tmp/tmpQjDRg5/pdisk_1.dat 2025-06-30T09:22:00.612751Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:00.639515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:00.639541Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 19618, node 1 2025-06-30T09:22:00.643262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:00.670954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:00.670969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:00.670972Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:00.671017Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:01.059038Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:01.062227Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:01.062240Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:01.062823Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:23288, port: 23288 2025-06-30T09:22:01.062869Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:01.136589Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:01.183008Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:01.183223Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:01.183234Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:01.230900Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:01.279127Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:01.279744Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****ifLQ (A930652D) () has now valid token of ldapuser@ldap 2025-06-30T09:22:01.418941Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:05.430886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521670226604143635:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:05.430926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:22:05.435938Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****ifLQ (A930652D) 2025-06-30T09:22:05.436071Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:23288, port: 23288 2025-06-30T09:22:05.436098Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:05.502939Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:05.503204Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldaps://localhost:23288 return no entries 2025-06-30T09:22:05.503382Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****ifLQ (A930652D) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldaps://localhost:23288 return no entries)' 2025-06-30T09:22:10.441848Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****ifLQ (A930652D) test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046de/r3tmp/tmpMejLCb/pdisk_1.dat 2025-06-30T09:22:11.462073Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:11.485012Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:11.486823Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670273547375083:2080] 1751275331436368 != 1751275331436371 TServer::EnableGrpc on GrpcPort 17896, node 2 2025-06-30T09:22:11.501008Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:11.501033Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:11.501035Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:11.501081Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:11.555679Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:11.555726Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:11.556105Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:11.658804Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:11.659087Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:11.659096Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:11.659291Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:28642, port: 28642 2025-06-30T09:22:11.659332Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:11.723006Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:11.726972Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldaps://localhost:28642. Server is busy 2025-06-30T09:22:11.727203Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****igSg (F43A3FE5) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldaps://localhost:28642. Server is busy)' 2025-06-30T09:22:11.727265Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:11.727272Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:11.727515Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:28642, port: 28642 2025-06-30T09:22:11.727533Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:11.798948Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:11.802636Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldaps://localhost:28642. Server is busy 2025-06-30T09:22:11.802898Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****igSg (F43A3FE5) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldaps://localhost:28642. Server is busy)' 2025-06-30T09:22:12.439286Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:13.442608Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****igSg (F43A3FE5) 2025-06-30T09:22:13.442689Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:13.442695Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:13.442982Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:28642, port: 28642 2025-06-30T09:22:13.443009Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:13.538922Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:13.539141Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldaps://localhost:28642. Server is busy 2025-06-30T09:22:13.539301Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****igSg (F43A3FE5) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldaps://localhost:28642. Server is busy)' 2025-06-30T09:22:17.446105Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****igSg (F43A3FE5) 2025-06-30T09:22:17.446213Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /R ... ion 2025-06-30T09:22:22.120415Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:22.120452Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:22.121274Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:22.147023Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:22.147361Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:22.147375Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:22.147573Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:27455, port: 27455 2025-06-30T09:22:22.147591Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:22.168967Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:22.210994Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:22.255089Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:22.299248Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****dvwA (FE32D885) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046de/r3tmp/tmpsxV0ZV/pdisk_1.dat 2025-06-30T09:22:22.734590Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:22.734953Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:22.735263Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521670324294259244:2080] 1751275342715991 != 1751275342715994 TServer::EnableGrpc on GrpcPort 21965, node 4 2025-06-30T09:22:22.747527Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:22.747543Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:22.747545Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:22.747610Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:22.782818Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:22.783612Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:22.783629Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:22.783839Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:1419, port: 1419 2025-06-30T09:22:22.783869Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:22.804535Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:22.822210Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:22.822247Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:22.825220Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:22.851253Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:22.902280Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****wd5A (03ED8756) () has now valid token of ldapuser@ldap 2025-06-30T09:22:23.266384Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521670324875497734:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:23.266404Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046de/r3tmp/tmpMioRfn/pdisk_1.dat 2025-06-30T09:22:23.290551Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8053, node 5 2025-06-30T09:22:23.301606Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:23.301620Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:23.301625Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:23.301676Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:23.371560Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:23.371614Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:23.372732Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:23.385960Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:23.386044Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:23.386054Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:23.386247Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:5774, port: 5774 2025-06-30T09:22:23.386272Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:23.407107Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:23.455097Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-30T09:22:23.502974Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:23.503221Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:23.503243Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-30T09:22:23.546978Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-30T09:22:23.590992Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-30T09:22:23.591527Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****_QdQ (67591BD1) () has now valid token of ldapuser@ldap 2025-06-30T09:22:23.902139Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670324545030592:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:23.902168Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046de/r3tmp/tmpggcirF/pdisk_1.dat 2025-06-30T09:22:23.917656Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10492, node 6 2025-06-30T09:22:23.931122Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:23.931138Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:23.931144Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:23.931221Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:23.961588Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:23.963954Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:23.963976Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:23.964237Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:5030, port: 5030 2025-06-30T09:22:23.964267Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:23.972521Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:24.007005Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:24.007059Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:24.008050Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:24.019072Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: &(uid=ldapuser)(), attributes: memberOf 2025-06-30T09:22:24.019107Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:5030. Bad search filter 2025-06-30T09:22:24.019390Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****1VGg (38C66BB3) () has now permanent error message 'Could not login via LDAP (Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:5030. Bad search filter)' >> YdbIndexTable::MultiShardTableTwoIndexes [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::WithFollowers [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:22.963075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:22.963104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:22.963109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:22.963116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:22.963151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:22.963157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:22.963168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:22.963185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:22.963313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:22.963382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:22.978477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:22.978503Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:22.982032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:22.982096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:22.982149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:22.985883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:22.986001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:22.986133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.986247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:22.987135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:22.987190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:22.987462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:22.987470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:22.987495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:22.987502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:22.987506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:22.987524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.988727Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:23.007742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:23.007857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.007935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:23.007943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:23.007995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:23.008011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:23.009329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:23.009379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:23.009443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.009456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:23.009463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:23.009469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:23.010080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.010093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:23.010102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:23.010571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.010582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.010588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:23.010609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:23.011366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:23.011849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:23.011891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:23.012113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:23.012140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:23.012157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:23.012227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:23.012235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:23.012272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:23.012286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:23.013045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:23.013068Z node 1 :FLAT_TX_SCHEMESHARD ... board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-30T09:22:24.287274Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:24.287784Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:22:24.287798Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:22:24.287804Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:22:24.287809Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-30T09:22:24.287814Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-30T09:22:24.287921Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:22:24.287935Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:22:24.287939Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:22:24.288190Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:2, at schemeshard: 72057594046678944 2025-06-30T09:22:24.288199Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:2 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:24.288274Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:22:24.288302Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:2 progress is 3/3 2025-06-30T09:22:24.288307Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-30T09:22:24.288313Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:2 progress is 3/3 2025-06-30T09:22:24.288317Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-30T09:22:24.288321Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: false 2025-06-30T09:22:24.288326Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-30T09:22:24.288333Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-30T09:22:24.288338Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:0 2025-06-30T09:22:24.288363Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:22:24.288370Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:1 2025-06-30T09:22:24.288373Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:1 2025-06-30T09:22:24.288379Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:22:24.288384Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:2 2025-06-30T09:22:24.288387Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:2 2025-06-30T09:22:24.288396Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:22:24.288402Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 1, subscribers: 1 2025-06-30T09:22:24.288407Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:22:24.288756Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:22:24.288771Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:22:24.288777Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:22:24.288783Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:22:24.288787Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:22:24.288801Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 1 2025-06-30T09:22:24.288807Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [3:340:2317] 2025-06-30T09:22:24.288967Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:22:24.289377Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:22:24.289396Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:22:24.289404Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:22:24.289903Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:22:24.289926Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-30T09:22:24.289932Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [3:707:2662] TestWaitNotification: OK eventTxId 104 2025-06-30T09:22:24.290083Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/WithFollowers" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:24.290141Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/WithFollowers" took 67us result status StatusSuccess 2025-06-30T09:22:24.290288Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/WithFollowers" PathDescription { Self { Name: "WithFollowers" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "WithFollowers" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "valueFloat" Type: "Float" TypeId: 33 Id: 4 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] Test command err: 2025-06-30T09:22:00.398167Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670228256581042:2075];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046e1/r3tmp/tmpeh2pmq/pdisk_1.dat 2025-06-30T09:22:00.507375Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:00.536284Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670228256581004:2080] 1751275320393089 != 1751275320393092 2025-06-30T09:22:00.544761Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15128, node 1 2025-06-30T09:22:00.586977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:00.586993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:00.586996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:00.587046Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:00.607247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:00.607282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:00.611400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:00.786833Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:00.790931Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:00.790951Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:00.791560Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:16354, port: 16354 2025-06-30T09:22:00.791583Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:00.853736Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-30T09:22:00.903506Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****AI5g (EC15FFE7) () has now valid token of ldapuser@ldap 2025-06-30T09:22:01.277613Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670232990898987:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:01.279850Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046e1/r3tmp/tmpZXU51Y/pdisk_1.dat 2025-06-30T09:22:01.311343Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670232990898964:2080] 1751275321277171 != 1751275321277174 2025-06-30T09:22:01.311868Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11292, node 2 2025-06-30T09:22:01.339075Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:01.339093Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:01.339095Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:01.339151Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:01.391307Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:01.391346Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:01.399181Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:01.426132Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:01.426223Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:01.426229Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:01.426448Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:11038, port: 11038 2025-06-30T09:22:01.426470Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=invalidRobouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:01.501801Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:11038. Invalid credentials 2025-06-30T09:22:01.502084Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****4qYw (34ED1287) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:11038. Invalid credentials)' test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046e1/r3tmp/tmpCgqhzg/pdisk_1.dat 2025-06-30T09:22:01.990039Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:02.003360Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:02.005305Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670231629507398:2080] 1751275321982142 != 1751275321982145 TServer::EnableGrpc on GrpcPort 4981, node 3 2025-06-30T09:22:02.017172Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.017185Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.017187Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.017245Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.073570Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:02.074365Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:02.074383Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:02.074508Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:1512, port: 1512 2025-06-30T09:22:02.074534Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:02.087295Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.087341Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.089103Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:02.123338Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:1512. Invalid credentials 2025-06-30T09:22:02.123597Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****efpA (09AD13AA) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:1512. Invalid credentials)' 2025-06-30T09:22:02.611333Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521670237944362195:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:02.611361Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046e1/r3tmp/tmpwqFLWj/pdisk_1.dat 2025-06-30T09:22:02.633953Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13153, node 4 2025-06-30T09:22:02.646323Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.646337Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.646340Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.646405Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.715058Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.715100Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.716318Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:02.729114Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:02.731321Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:02.731343Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:02.731545Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:27154, port: 27154 2025-06-30T09:22:02.731574Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:02.799059Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:02.799305Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldaps://localhost:27154 return no entries 2025-06-30T09:22:02.799530Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****x-oQ (6D766C91) () has now permanent error message ... 09:22:03.327166Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:03.438846Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:03.439157Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:03.439164Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:03.439326Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:26228, port: 26228 2025-06-30T09:22:03.439348Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:03.491233Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:03.535005Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:03.535299Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:03.535316Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:03.582973Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:03.634238Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:03.634816Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****Df_A (D1A46ECB) () has now valid token of ldapuser@ldap 2025-06-30T09:22:04.222080Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:08.225731Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7521670242150953259:2241];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:08.225788Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:22:09.233609Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****Df_A (D1A46ECB) 2025-06-30T09:22:09.233666Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:26228, port: 26228 2025-06-30T09:22:09.233697Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:09.298939Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:09.347674Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:09.347929Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:09.347942Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:09.390940Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:09.438776Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:09.440335Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****Df_A (D1A46ECB) () has now valid token of ldapuser@ldap 2025-06-30T09:22:13.242975Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****Df_A (D1A46ECB) 2025-06-30T09:22:13.243149Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:26228, port: 26228 2025-06-30T09:22:13.243192Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:13.303094Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:13.354913Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:13.355564Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:13.355580Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:13.402015Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:13.443101Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:13.445846Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****Df_A (D1A46ECB) () has now valid token of ldapuser@ldap 2025-06-30T09:22:13.907809Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670285538553445:2243];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046e1/r3tmp/tmpSKyRaO/pdisk_1.dat 2025-06-30T09:22:13.915231Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:13.933707Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:13.934284Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7521670285538553217:2080] 1751275333905079 != 1751275333905082 TServer::EnableGrpc on GrpcPort 31827, node 6 2025-06-30T09:22:13.950475Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:13.950489Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:13.950492Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:13.950548Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:14.015665Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:14.015702Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:14.016789Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:14.110309Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:14.110731Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:14.110757Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:14.110993Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:17031, port: 17031 2025-06-30T09:22:14.111032Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:14.171012Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:14.219250Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****dQvQ (A2EA7958) () has now valid token of ldapuser@ldap 2025-06-30T09:22:14.910848Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:18.909033Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7521670285538553445:2243];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:18.909611Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:22:18.914542Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****dQvQ (A2EA7958) 2025-06-30T09:22:18.914604Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:17031, port: 17031 2025-06-30T09:22:18.914645Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:18.971085Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:19.023189Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****dQvQ (A2EA7958) () has now valid token of ldapuser@ldap 2025-06-30T09:22:22.919959Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****dQvQ (A2EA7958) 2025-06-30T09:22:22.920082Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:17031, port: 17031 2025-06-30T09:22:22.920120Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:22.990980Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:23.035274Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****dQvQ (A2EA7958) () has now valid token of ldapuser@ldap >> SystemView::AuthOwners [GOOD] >> SystemView::AuthOwners_Access |82.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> SystemView::QueryStatsFields [GOOD] >> SystemView::PartitionStatsTtlFields |82.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> SystemView::ShowCreateTableDefaultLiteral [GOOD] >> SystemView::ShowCreateTableColumn |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableTwoIndexes [GOOD] Test command err: Trying to start YDB, gRPC: 24727, MsgBus: 19671 2025-06-30T09:21:41.115433Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670147977302816:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:41.117613Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003411/r3tmp/tmpDqaQIK/pdisk_1.dat 2025-06-30T09:21:41.242896Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:41.246822Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670147977302633:2080] 1751275301111451 != 1751275301111454 TServer::EnableGrpc on GrpcPort 24727, node 1 2025-06-30T09:21:41.256412Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:41.256427Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:41.256430Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:41.256482Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:41.273004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:41.273036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:41.273802Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19671 TClient is connected to server localhost:19671 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:41.487832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:41.494889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:41.503880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:41.564559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:41.609989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:41.650178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:42.118832Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:42.187417Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670152272271537:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.187450Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.243967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.264570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.292816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.336094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.368353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.401952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.433070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.472325Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670152272272187:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.472384Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.472549Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670152272272195:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.473592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:42.479153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:42.479236Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670152272272197:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:42.572381Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670152272272248:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:42.901465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:43.162938Z node 1 :KQP_EXECUTER ERROR: kqp_ ... DE4ZWItZGYwOGQ0YmEtNGQyOTA2MmItOTNmMWFmMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.210406Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719489. Ctx: { TraceId: 01jz02a4afbe887bjf3c7bwqjw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDA1OGFkZDItZTFiODQ4N2QtODExMTBkNmEtOWE5YmYzOTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.212820Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719491. Ctx: { TraceId: 01jz02a4ad3vc8xfqn3ect1xgz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmJmNzFhZjgtNDJmMTRlMGMtOWNiNDYzZmYtYjZkNTIzZjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.213054Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719490. Ctx: { TraceId: 01jz02a4ak65k4y2fc8av5rycy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTJhZWMzOTQtNjc2MmM0YTMtZTgxODA2MzgtMmVjZjc5Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.217289Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719492. Ctx: { TraceId: 01jz02a4ardchtt4b6c5j0hr3s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjQ3NjRmMjktNTRmMWM2NTYtYWRkMGI2YjktYWYyOTI1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.220439Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719493. Ctx: { TraceId: 01jz02a4avf8ffrfh0n3wz86p1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzhkYmVjZDMtM2ZmNTBhNDgtYjIzNmZiNmYtNDc5MWM4NWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.221691Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719494. Ctx: { TraceId: 01jz02a4avf8ffrfh0n3wz86p1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzhkYmVjZDMtM2ZmNTBhNDgtYjIzNmZiNmYtNDc5MWM4NWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.222311Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719495. Ctx: { TraceId: 01jz02a4aw9b3kbs11yvrvcqba, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmE0ZjIzNDktZGU5MWU3ZTQtMjdkMTM1OWYtZDk5OTE4ZWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.223662Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719496. Ctx: { TraceId: 01jz02a4aw9b3kbs11yvrvcqba, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmE0ZjIzNDktZGU5MWU3ZTQtMjdkMTM1OWYtZDk5OTE4ZWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.226657Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719497. Ctx: { TraceId: 01jz02a4b1e1azs94c58jb3kg1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDA1OGFkZDItZTFiODQ4N2QtODExMTBkNmEtOWE5YmYzOTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.227290Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719498. Ctx: { TraceId: 01jz02a4b10d3ej9rhq6tdapfe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTJhZWMzOTQtNjc2MmM0YTMtZTgxODA2MzgtMmVjZjc5Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.228235Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719499. Ctx: { TraceId: 01jz02a4b32mpadfbtdxtkayzm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2I4Y2M0Y2ItNDJkYTJiM2QtZWE3ZmZiOTYtOWJkMjVmZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.228525Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719500. Ctx: { TraceId: 01jz02a4b10d3ej9rhq6tdapfe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTJhZWMzOTQtNjc2MmM0YTMtZTgxODA2MzgtMmVjZjc5Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.229155Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719501. Ctx: { TraceId: 01jz02a4b1e1azs94c58jb3kg1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDA1OGFkZDItZTFiODQ4N2QtODExMTBkNmEtOWE5YmYzOTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.231960Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719502. Ctx: { TraceId: 01jz02a4b669323jr8zk8tpn0s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjQ3NjRmMjktNTRmMWM2NTYtYWRkMGI2YjktYWYyOTI1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.235454Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719503. Ctx: { TraceId: 01jz02a4b84w2jvgsy2jhbdr7y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmJmNzFhZjgtNDJmMTRlMGMtOWNiNDYzZmYtYjZkNTIzZjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.235459Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719505. Ctx: { TraceId: 01jz02a4b85aajg22ykcbbr062, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2I4Y2M0Y2ItNDJkYTJiM2QtZWE3ZmZiOTYtOWJkMjVmZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.235633Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719504. Ctx: { TraceId: 01jz02a4b91smmhwqdv1qq0m90, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmE0ZjIzNDktZGU5MWU3ZTQtMjdkMTM1OWYtZDk5OTE4ZWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.236335Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719506. Ctx: { TraceId: 01jz02a4bac6xb0akdrg6vvev3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTJhZWMzOTQtNjc2MmM0YTMtZTgxODA2MzgtMmVjZjc5Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.237468Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719507. Ctx: { TraceId: 01jz02a4b85aajg22ykcbbr062, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2I4Y2M0Y2ItNDJkYTJiM2QtZWE3ZmZiOTYtOWJkMjVmZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.237947Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719508. Ctx: { TraceId: 01jz02a4b91smmhwqdv1qq0m90, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmE0ZjIzNDktZGU5MWU3ZTQtMjdkMTM1OWYtZDk5OTE4ZWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.238117Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719509. Ctx: { TraceId: 01jz02a4b84w2jvgsy2jhbdr7y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmJmNzFhZjgtNDJmMTRlMGMtOWNiNDYzZmYtYjZkNTIzZjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.240351Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719510. Ctx: { TraceId: 01jz02a4bac6xb0akdrg6vvev3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTJhZWMzOTQtNjc2MmM0YTMtZTgxODA2MzgtMmVjZjc5Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.243187Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719511. Ctx: { TraceId: 01jz02a4bf9xtp2yv7g3fzkabt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDA1OGFkZDItZTFiODQ4N2QtODExMTBkNmEtOWE5YmYzOTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.244340Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719512. Ctx: { TraceId: 01jz02a4bf9xtp2yv7g3fzkabt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDA1OGFkZDItZTFiODQ4N2QtODExMTBkNmEtOWE5YmYzOTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.249159Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719513. Ctx: { TraceId: 01jz02a4bq114faaemthrpwwmv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTAxODE4ZWItZGYwOGQ0YmEtNGQyOTA2MmItOTNmMWFmMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.250209Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719514. Ctx: { TraceId: 01jz02a4bq30q6j3nytpj6cabf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2I4Y2M0Y2ItNDJkYTJiM2QtZWE3ZmZiOTYtOWJkMjVmZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.250264Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719515. Ctx: { TraceId: 01jz02a4bq9pnearr0ej0dgk6b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjQ3NjRmMjktNTRmMWM2NTYtYWRkMGI2YjktYWYyOTI1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.251498Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719516. Ctx: { TraceId: 01jz02a4bq30q6j3nytpj6cabf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2I4Y2M0Y2ItNDJkYTJiM2QtZWE3ZmZiOTYtOWJkMjVmZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.251814Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719517. Ctx: { TraceId: 01jz02a4bq9pnearr0ej0dgk6b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjQ3NjRmMjktNTRmMWM2NTYtYWRkMGI2YjktYWYyOTI1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.252296Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719518. Ctx: { TraceId: 01jz02a4bq30q6j3nytpj6cabf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2I4Y2M0Y2ItNDJkYTJiM2QtZWE3ZmZiOTYtOWJkMjVmZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS finished with status: SUCCESS finished with status: SUCCESS 2025-06-30T09:22:24.263579Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719519. Ctx: { TraceId: 01jz02a4c54nw9qrp0c1n7qd9y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTJhZWMzOTQtNjc2MmM0YTMtZTgxODA2MzgtMmVjZjc5Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.264659Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719520. Ctx: { TraceId: 01jz02a4c54nw9qrp0c1n7qd9y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTJhZWMzOTQtNjc2MmM0YTMtZTgxODA2MzgtMmVjZjc5Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:24.265222Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719521. Ctx: { TraceId: 01jz02a4c54nw9qrp0c1n7qd9y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTJhZWMzOTQtNjc2MmM0YTMtZTgxODA2MzgtMmVjZjc5Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::CreateDroppedExternalDataSourceAndDropWithReboots >> TExternalDataSourceTestReboots::ParallelCreateDrop >> TExternalDataSourceTestReboots::CreateDroppedExternalDataSourceWithReboots |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> SystemView::ShowCreateTablePartitionByHash [GOOD] >> SystemView::ShowCreateTablePartitionSettings |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> SystemView::AuthUsers_LockUnlock [GOOD] >> SystemView::AuthUsers_Access >> VectorIndexBuildTest::CreateAndDrop [GOOD] >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-false >> TExternalDataSourceTestReboots::SimpleDropExternalDataSourceWithReboots2 |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |82.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.1%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |82.1%| [TA] {RESULT} $(B)/ydb/core/kqp/provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} |82.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> VectorIndexBuildTest::Metering_CommonDB [GOOD] >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn [GOOD] >> VectorIndexBuildTest::Metering_ServerLessDB-smallScanBuffer-false >> SystemView::AuthOwners_Access [GOOD] >> SystemView::AuthOwners_ResultOrder |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> SystemView::PartitionStatsFields [GOOD] >> SystemView::ConcurrentScans >> VectorIndexBuildTest::RecreatedColumns [GOOD] >> VectorIndexBuildTest::SimpleDuplicates |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::SimpleDropExternalDataSourceWithReboots ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 31972, MsgBus: 1507 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0033d5/r3tmp/tmpgjHYBl/pdisk_1.dat 2025-06-30T09:21:41.212001Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:21:41.259206Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31972, node 1 2025-06-30T09:21:41.261631Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670146898572809:2080] 1751275301124311 != 1751275301124314 2025-06-30T09:21:41.298997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:41.299031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:41.299297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:41.299300Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:41.299302Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:41.299346Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:41.300348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1507 TClient is connected to server localhost:1507 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:41.464444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:41.503574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:41.591886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:41.654074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:41.688297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:41.989383Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670146898574419:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:41.989412Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.088999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.147603Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:42.158755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.189623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.210139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.244395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.265147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.287765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:42.330825Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670151193542382:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.330878Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.331229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670151193542387:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:42.332357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:42.336162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:42.336267Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670151193542389:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:42.414011Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670151193542440:3406] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:42.800386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:43.017731Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jz028w31cnq3j078aqw38fx2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2I0NjM1MjYtYmI2ODc0YzgtYTBmMDM1MjItNTVmMjFjMmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:21:43.018113Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId ... b://session/3?node_id=2&id=YTNkOGI3Y2ItOTQ0OWQ1ZDMtOTlkNGY2YmQtN2NhY2I4NTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.158538Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727087. Ctx: { TraceId: 01jz02a67adjpt2famxh8nj5j3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTRiZGY2YjUtNzVlNzUzNTQtZWQ1NDQyYS1lMGVjZGU5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.158631Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727085. Ctx: { TraceId: 01jz02a67dea83emjmtwprefte, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTdiMDZhMzMtNWRkZjczZDQtZGVkNWQ5N2QtYWViOTE2ZjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.160468Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727088. Ctx: { TraceId: 01jz02a67ca8azeg33e9cwgpp8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTBiYjNmMzEtYmZhZWNmNWEtYzVmNDYzZmMtZjVmYzFhYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.160696Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727089. Ctx: { TraceId: 01jz02a67adjpt2famxh8nj5j3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTRiZGY2YjUtNzVlNzUzNTQtZWQ1NDQyYS1lMGVjZGU5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.161841Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727090. Ctx: { TraceId: 01jz02a67h225rvw7n9792tfra, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZGUzNzYwYy05NTQ1NDg1YS03OWRmZTJlMC03ZDBjOGYxNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.163207Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727091. Ctx: { TraceId: 01jz02a67hde7pnb6yp33rva99, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWU0OTYwYTgtNGYyOGY4ZjEtNjJlZjI3YjgtYTYxZDFkOGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.163989Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727092. Ctx: { TraceId: 01jz02a67jdxv6ysq7j4czv3f9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTdiMDZhMzMtNWRkZjczZDQtZGVkNWQ5N2QtYWViOTE2ZjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.164266Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727093. Ctx: { TraceId: 01jz02a67hde7pnb6yp33rva99, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWU0OTYwYTgtNGYyOGY4ZjEtNjJlZjI3YjgtYTYxZDFkOGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.164905Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727094. Ctx: { TraceId: 01jz02a67k7prxnfs4jv3z4gk4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTRiZGY2YjUtNzVlNzUzNTQtZWQ1NDQyYS1lMGVjZGU5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.165174Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727095. Ctx: { TraceId: 01jz02a67hde7pnb6yp33rva99, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWU0OTYwYTgtNGYyOGY4ZjEtNjJlZjI3YjgtYTYxZDFkOGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.165532Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727096. Ctx: { TraceId: 01jz02a67jdxv6ysq7j4czv3f9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTdiMDZhMzMtNWRkZjczZDQtZGVkNWQ5N2QtYWViOTE2ZjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.166072Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727097. Ctx: { TraceId: 01jz02a67k7prxnfs4jv3z4gk4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTRiZGY2YjUtNzVlNzUzNTQtZWQ1NDQyYS1lMGVjZGU5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.166598Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727099. Ctx: { TraceId: 01jz02a67hde7pnb6yp33rva99, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWU0OTYwYTgtNGYyOGY4ZjEtNjJlZjI3YjgtYTYxZDFkOGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.166607Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727098. Ctx: { TraceId: 01jz02a67jdxv6ysq7j4czv3f9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTdiMDZhMzMtNWRkZjczZDQtZGVkNWQ5N2QtYWViOTE2ZjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.167566Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727101. Ctx: { TraceId: 01jz02a67mfmb12v2v20r777kb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTBiYjNmMzEtYmZhZWNmNWEtYzVmNDYzZmMtZjVmYzFhYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.168006Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727100. Ctx: { TraceId: 01jz02a67kbwhdxvbvk4sfwppc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTNkOGI3Y2ItOTQ0OWQ1ZDMtOTlkNGY2YmQtN2NhY2I4NTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.168559Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727102. Ctx: { TraceId: 01jz02a67k7prxnfs4jv3z4gk4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTRiZGY2YjUtNzVlNzUzNTQtZWQ1NDQyYS1lMGVjZGU5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.169253Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727103. Ctx: { TraceId: 01jz02a67kbwhdxvbvk4sfwppc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTNkOGI3Y2ItOTQ0OWQ1ZDMtOTlkNGY2YmQtN2NhY2I4NTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.169967Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727104. Ctx: { TraceId: 01jz02a67mfmb12v2v20r777kb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTBiYjNmMzEtYmZhZWNmNWEtYzVmNDYzZmMtZjVmYzFhYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.171427Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727105. Ctx: { TraceId: 01jz02a67mfmb12v2v20r777kb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTBiYjNmMzEtYmZhZWNmNWEtYzVmNDYzZmMtZjVmYzFhYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.173224Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727106. Ctx: { TraceId: 01jz02a67vcfchgqap7qhpe122, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZGUzNzYwYy05NTQ1NDg1YS03OWRmZTJlMC03ZDBjOGYxNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.173529Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727107. Ctx: { TraceId: 01jz02a67w1hyanqektckhzvc5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWU0OTYwYTgtNGYyOGY4ZjEtNjJlZjI3YjgtYTYxZDFkOGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.174594Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727108. Ctx: { TraceId: 01jz02a67w16b87h5y998pa07k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTdiMDZhMzMtNWRkZjczZDQtZGVkNWQ5N2QtYWViOTE2ZjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.174886Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727109. Ctx: { TraceId: 01jz02a67w1hyanqektckhzvc5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWU0OTYwYTgtNGYyOGY4ZjEtNjJlZjI3YjgtYTYxZDFkOGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.175023Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727110. Ctx: { TraceId: 01jz02a67vcfchgqap7qhpe122, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZGUzNzYwYy05NTQ1NDg1YS03OWRmZTJlMC03ZDBjOGYxNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.176111Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727111. Ctx: { TraceId: 01jz02a67w16b87h5y998pa07k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTdiMDZhMzMtNWRkZjczZDQtZGVkNWQ5N2QtYWViOTE2ZjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.176384Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727112. Ctx: { TraceId: 01jz02a67x5xeck5a4yc1pb9gt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTNkOGI3Y2ItOTQ0OWQ1ZDMtOTlkNGY2YmQtN2NhY2I4NTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.176693Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727113. Ctx: { TraceId: 01jz02a67vcfchgqap7qhpe122, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZGUzNzYwYy05NTQ1NDg1YS03OWRmZTJlMC03ZDBjOGYxNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-30T09:22:26.177995Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727114. Ctx: { TraceId: 01jz02a67x5xeck5a4yc1pb9gt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTNkOGI3Y2ItOTQ0OWQ1ZDMtOTlkNGY2YmQtN2NhY2I4NTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-30T09:22:26.178487Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727115. Ctx: { TraceId: 01jz02a67x5xeck5a4yc1pb9gt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTNkOGI3Y2ItOTQ0OWQ1ZDMtOTlkNGY2YmQtN2NhY2I4NTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.178711Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727116. Ctx: { TraceId: 01jz02a6808j5yx57btq1trm8a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTRiZGY2YjUtNzVlNzUzNTQtZWQ1NDQyYS1lMGVjZGU5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-30T09:22:26.179680Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727117. Ctx: { TraceId: 01jz02a6808j5yx57btq1trm8a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTRiZGY2YjUtNzVlNzUzNTQtZWQ1NDQyYS1lMGVjZGU5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:26.180556Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727118. Ctx: { TraceId: 01jz02a6808j5yx57btq1trm8a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTRiZGY2YjUtNzVlNzUzNTQtZWQ1NDQyYS1lMGVjZGU5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS >> TExternalDataSourceTestReboots::CreateExternalDataSourceWithReboots >> ShowCreateView::WithTwoTablePathPrefixes [GOOD] >> SystemView::AuthGroups |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |82.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |82.1%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-false [GOOD] >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-true >> TCdcStreamWithRebootsTests::CreateStreamWithInitialScan[TabletReboots] [GOOD] |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TStorageTenantTest::CreateDummyTabletsInDifferentDomains [GOOD] >> SystemView::ConcurrentScans [GOOD] >> SystemView::PDisksFields >> SystemView::AuthOwners_ResultOrder [GOOD] >> SystemView::AuthOwners_TableRange >> TNodeBrokerTest::TestRandomActions [GOOD] |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> SystemView::AuthUsers_Access [GOOD] >> SystemView::AuthUsers_ResultOrder ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateDummyTabletsInDifferentDomains [GOOD] Test command err: 2025-06-30T09:22:22.131078Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670320470887136:2097];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:22.133179Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:22.135068Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670321815996587:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:22.135091Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004926/r3tmp/tmpeL1WGT/pdisk_1.dat 2025-06-30T09:22:22.243356Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:22.247214Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:22.247240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:22.249646Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:22.256490Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TClient is connected to server localhost:27267 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:22:22.274404Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521670320470887119:2141] Handle TEvNavigate describe path dc-1 2025-06-30T09:22:22.276370Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521670320470887660:2395] HANDLE EvNavigateScheme dc-1 2025-06-30T09:22:22.276429Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7521670320470887306:2156], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:22.276443Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7521670320470887306:2156], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-30T09:22:22.276508Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:7521670320470887661:2396][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:22:22.277004Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670320470886968:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670320470887666:2396] 2025-06-30T09:22:22.277026Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670320470886968:2055] Subscribe: subscriber# [1:7521670320470887666:2396], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:22.277032Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670320470886965:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670320470887665:2396] 2025-06-30T09:22:22.277040Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7521670320470886971:2058] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7521670320470887667:2396] 2025-06-30T09:22:22.277044Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670320470886971:2058] Subscribe: subscriber# [1:7521670320470887667:2396], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:22.277052Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7521670320470886965:2052] Subscribe: subscriber# [1:7521670320470887665:2396], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-30T09:22:22.277057Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670320470887666:2396][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670320470886968:2055] 2025-06-30T09:22:22.277062Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670320470887667:2396][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670320470886971:2058] 2025-06-30T09:22:22.277069Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670320470887661:2396][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670320470887663:2396] 2025-06-30T09:22:22.277077Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7521670320470887665:2396][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670320470886965:2052] 2025-06-30T09:22:22.277077Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670320470886968:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670320470887666:2396] 2025-06-30T09:22:22.277081Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670320470887661:2396][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670320470887664:2396] 2025-06-30T09:22:22.277082Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670320470886971:2058] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670320470887667:2396] 2025-06-30T09:22:22.277087Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7521670320470886965:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7521670320470887665:2396] 2025-06-30T09:22:22.277094Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:7521670320470887661:2396][/dc-1] Set up state: owner# [1:7521670320470887306:2156], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:22.277139Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:7521670320470887661:2396][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7521670320470887662:2396] 2025-06-30T09:22:22.277161Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:7521670320470887661:2396][/dc-1] Path was already updated: owner# [1:7521670320470887306:2156], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:22:22.277167Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670320470887665:2396][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670320470887662:2396], cookie# 1 2025-06-30T09:22:22.277171Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670320470887666:2396][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670320470887663:2396], cookie# 1 2025-06-30T09:22:22.277175Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7521670320470887667:2396][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670320470887664:2396], cookie# 1 2025-06-30T09:22:22.277180Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670320470886965:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670320470887665:2396], cookie# 1 2025-06-30T09:22:22.277186Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670320470886968:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670320470887666:2396], cookie# 1 2025-06-30T09:22:22.277190Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7521670320470886971:2058] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7521670320470887667:2396], cookie# 1 2025-06-30T09:22:22.277196Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670320470887665:2396][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670320470886965:2052], cookie# 1 2025-06-30T09:22:22.277199Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670320470887666:2396][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670320470886968:2055], cookie# 1 2025-06-30T09:22:22.277202Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7521670320470887667:2396][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670320470886971:2058], cookie# 1 2025-06-30T09:22:22.277207Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670320470887661:2396][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670320470887662:2396], cookie# 1 2025-06-30T09:22:22.277213Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670320470887661:2396][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:22:22.277216Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670320470887661:2396][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670320470887663:2396], cookie# 1 2025-06-30T09:22:22.277219Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:7521670320470887661:2396][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:22:22.277222Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:7521670320470887661:2396][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7521670320470887664:2396], cookie# 1 2025-06-30T09:22:22.277228Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:7521670320470887661:2396][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-30T09:22:22.288897Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7521670320470887306:2156], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ... LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.431455Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670347829380630:2192], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.431487Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521670347829380697:2226], recipient# [3:7521670347829380683:2471], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.431488Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:28.431502Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:28.631073Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7521670321815996779:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.631118Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7521670321815996779:2108], cacheItem# { Subscriber: { Subscriber: [2:7521670321815996804:2114] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-30T09:22:28.631144Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7521670347585800647:2132], recipient# [2:7521670347585800646:2279], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.639629Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670347829380630:2192], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.639702Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521670347829380712:2227], recipient# [3:7521670347829380710:2483], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.641590Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:28.641594Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670347829380630:2192], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.641643Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521670347829380715:2228], recipient# [3:7521670347829380711:2484], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.641812Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7521670347829380711:2484], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:28.734478Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670347829380630:2192], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.734552Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521670347829380716:2229], recipient# [3:7521670347829380711:2484], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.734624Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7521670347829380711:2484], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:28.837399Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670347829380630:2192], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.837468Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521670347829380718:2230], recipient# [3:7521670347829380711:2484], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.837539Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7521670347829380711:2484], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:28.910912Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7521670347829380630:2192], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.911028Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7521670347829380720:2231], recipient# [3:7521670347829380719:2485], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:28.911089Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest >> TCdcStreamWithRebootsTests::CreateStreamWithInitialScan[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:31.767781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:31.767820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:31.767826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:31.767844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:31.767850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:31.767855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:31.767866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:31.767897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:31.768048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:31.768168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:31.782351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:31.782385Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:31.782538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:31.785971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:31.787346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:31.787410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:31.789282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:31.789347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:31.789514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:31.789585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:31.790526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:31.790588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:31.790880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:31.790897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:31.790910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:31.790920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:31.790926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:31.790959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:31.792634Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:31.824640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:31.824748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:31.824840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:31.824850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:31.824925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:31.824944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:31.827216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:31.827276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:31.827351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:31.827364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:31.827369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:31.827375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:31.828079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:31.828097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:31.828105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:31.829499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:31.829513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:31.829519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:31.829527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:31.830159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:31.830620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:31.830662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:31.830924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:31.830956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... 2057594046678944 2025-06-30T09:22:28.794283Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [141:209:2209], at schemeshard: 72057594046678944, txId: 1003, path id: 4 2025-06-30T09:22:28.794289Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [141:209:2209], at schemeshard: 72057594046678944, txId: 1003, path id: 5 2025-06-30T09:22:28.794387Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:3, at schemeshard: 72057594046678944 2025-06-30T09:22:28.794395Z node 141 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:3 ProgressState 2025-06-30T09:22:28.794407Z node 141 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:3 progress is 3/4 2025-06-30T09:22:28.794412Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/4 2025-06-30T09:22:28.794419Z node 141 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 1003, done: 3, blocked: 1 2025-06-30T09:22:28.794426Z node 141 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_cdc_stream.cpp:409: [72057594046678944] TDone opId# 1003:2HandleReply TEvCompleteBarrier 2025-06-30T09:22:28.794439Z node 141 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:2 progress is 4/4 2025-06-30T09:22:28.794443Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 4/4 2025-06-30T09:22:28.794447Z node 141 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:3 progress is 4/4 2025-06-30T09:22:28.794451Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 4/4 2025-06-30T09:22:28.794456Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 3/4, is published: false 2025-06-30T09:22:28.794460Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 4/4, is published: false 2025-06-30T09:22:28.794466Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 4/4 2025-06-30T09:22:28.794472Z node 141 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:22:28.794477Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:22:28.794494Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-30T09:22:28.794499Z node 141 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:1 2025-06-30T09:22:28.794503Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:1 2025-06-30T09:22:28.794509Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:22:28.794514Z node 141 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:2 2025-06-30T09:22:28.794517Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:2 2025-06-30T09:22:28.794533Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:22:28.794537Z node 141 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:3 2025-06-30T09:22:28.794541Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:3 2025-06-30T09:22:28.794551Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 5 2025-06-30T09:22:28.794556Z node 141 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 2, subscribers: 0 2025-06-30T09:22:28.794560Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 4], 4 2025-06-30T09:22:28.794564Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 5], 2 2025-06-30T09:22:28.794776Z node 141 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:28.794791Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:28.794796Z node 141 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:28.794802Z node 141 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 4 2025-06-30T09:22:28.794807Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:22:28.794999Z node 141 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:28.795011Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:28.795019Z node 141 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:28.795024Z node 141 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 2 2025-06-30T09:22:28.795028Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:22:28.795039Z node 141 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:22:28.796374Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:28.797309Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:22:28.799241Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:22:28.799254Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:22:28.799329Z node 141 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:22:28.799349Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:22:28.799355Z node 141 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [141:664:2580] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:22:28.799436Z node 141 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:22:28.799491Z node 141 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Stream" took 65us result status StatusSuccess 2025-06-30T09:22:28.799611Z node 141 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Stream" PathDescription { Self { Name: "Stream" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeCdcStream CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 CdcStreamVersion: 1 } ChildrenExist: true } Children { Name: "streamImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409548 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } CdcStreamDescription { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 4 } State: ECdcStreamStateScan SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> SystemView::AuthGroups [GOOD] >> SystemView::AuthGroupMembers >> IndexBuildTest::BaseCase [GOOD] >> IndexBuildTest::CancellationNoTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::TestRandomActions [GOOD] Test command err: 2025-06-30T09:20:50.268168Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.271205Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.271236Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.271256Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.271282Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.271304Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.271320Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.274389Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.274428Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.274460Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.274485Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.274517Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.274540Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.274557Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.274613Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.274653Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.274672Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.275509Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.275533Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.275548Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.275564Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.275579Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.275621Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.276463Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.276540Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.276582Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.276611Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.276642Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.276655Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.276667Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.276678Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.277138Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277205Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277225Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277243Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277257Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277283Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277310Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:570: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-30T09:20:50.277338Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277359Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277804Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277847Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277897Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277913Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277923Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.277934Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.278017Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.280666Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.280695Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.281109Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.281185Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.281195Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.281208Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.281496Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.281558Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.281894Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.281978Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.282112Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.282165Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.282272Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.282302Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.282493Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.282714Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.282831Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:599: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-30T09:20:50.295728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:50.295751Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-30T09:20:50.299893Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:20:50.300245Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:20:50.300304Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-30T09:20:50.300444Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:20:50.301060Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-30T09:20:50.301083Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-30T09:20:50.301116Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-30T09:20:50.301126Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-30T09:20:50.301130Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-30T09:20:50.301141Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-30T09:20:50.301154Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-30T09:20:50.301158Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-30T09:20:50.301162Z node 1 :NODE_BROKER DEB ... main=72057594046678944:1 slotindex=4 authorizedbycertificate=false bridgePileId= 2025-06-30T09:22:27.999425Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:836: [DB] Removing node #1026.v852 from database 2025-06-30T09:22:27.999433Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z 2025-06-30T09:22:27.999444Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #189.852 2025-06-30T09:22:28.094130Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:25:2072], Recipient [1:28170:21163]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { MinEpoch: 189 } 2025-06-30T09:22:28.094166Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:22:28.094175Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #189 2025-06-30T09:22:28.094235Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [8:228:2072], Recipient [1:28295:21263] 2025-06-30T09:22:28.094240Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:22:28.094246Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #189 2025-06-30T09:22:28.094346Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [2:54:2072], Recipient [1:28286:21258] 2025-06-30T09:22:28.094351Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:22:28.094355Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #189 2025-06-30T09:22:28.094364Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [3:83:2072], Recipient [1:28287:21259] 2025-06-30T09:22:28.094369Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:22:28.094373Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #189 2025-06-30T09:22:28.094379Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [4:112:2072], Recipient [1:28288:21260] 2025-06-30T09:22:28.094383Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:22:28.094387Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #189 2025-06-30T09:22:28.094395Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [5:141:2072], Recipient [1:28293:21261] 2025-06-30T09:22:28.094398Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:22:28.094403Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #189 2025-06-30T09:22:28.094411Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [6:170:2072], Recipient [1:28294:21262] 2025-06-30T09:22:28.094415Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:22:28.094419Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #189 2025-06-30T09:22:28.094426Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [7:199:2072], Recipient [1:28296:21264] 2025-06-30T09:22:28.094430Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:22:28.094435Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:378: Delaying list nodes request for epoch #189 2025-06-30T09:22:28.105800Z node 1 :NODE_BROKER DEBUG: node_broker__update_epoch.cpp:31: TTxUpdateEpoch Complete 2025-06-30T09:22:28.105835Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:522: [Committed] Node #1029.v845 host6:5 has expired 2025-06-30T09:22:28.105848Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:522: [Committed] Node #1030.v847 host15:14 has expired 2025-06-30T09:22:28.105855Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:522: [Committed] Node #1031.v849 host3:2 has expired 2025-06-30T09:22:28.105861Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:522: [Committed] Node #1025.v848 host9:8 has expired 2025-06-30T09:22:28.105867Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:522: [Committed] Node #1028.v846 host5:4 has expired 2025-06-30T09:22:28.105877Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:536: [Committed] Remove node #1026.v850 host7:6 2025-06-30T09:22:28.105900Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:548: [Committed] Move to new epoch #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z, approximate epoch start #189.852 2025-06-30T09:22:28.105930Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-08T21:00:00.025000Z 2025-06-30T09:22:28.105942Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z, approximate epoch start #189.852 nodes=1 expired=5 2025-06-30T09:22:28.105991Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z nodes=1 expired=5 removed=4 2025-06-30T09:22:28.105999Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1032.v838 to update nodes log 2025-06-30T09:22:28.106010Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1033.v838 to update nodes log 2025-06-30T09:22:28.106014Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v850 to update nodes log 2025-06-30T09:22:28.106019Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1027.v851 to update nodes log 2025-06-30T09:22:28.106025Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1029.v852 to update nodes log 2025-06-30T09:22:28.106030Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1030.v852 to update nodes log 2025-06-30T09:22:28.106034Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1031.v852 to update nodes log 2025-06-30T09:22:28.106038Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v852 to update nodes log 2025-06-30T09:22:28.106043Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1028.v852 to update nodes log 2025-06-30T09:22:28.106047Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1026.v852 to update nodes log 2025-06-30T09:22:28.106060Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z 2025-06-30T09:22:28.106073Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z 2025-06-30T09:22:28.106081Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z 2025-06-30T09:22:28.106088Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z 2025-06-30T09:22:28.106096Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z 2025-06-30T09:22:28.106103Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z 2025-06-30T09:22:28.106111Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z 2025-06-30T09:22:28.106119Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z 2025-06-30T09:22:28.128125Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:28352:21296], Recipient [1:28170:21163]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:28.128208Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2215], Recipient [1:28170:21163]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:22:28.128220Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:22:28.128241Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z 2025-06-30T09:22:28.129003Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:28354:21298], Recipient [1:28170:21163]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:28.129034Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:637:2215], Recipient [1:28170:21163]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-30T09:22:28.129039Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-30T09:22:28.129049Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #189.852 1970-01-08T20:00:00.025000Z - 1970-01-08T21:00:00.025000Z - 1970-01-08T22:00:00.025000Z 2025-06-30T09:22:28.129709Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:28356:21300], Recipient [1:28170:21163]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:28.129751Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039939, Sender [1:637:2215], Recipient [1:28170:21163]: NKikimr::NNodeBroker::TEvNodeBroker::TEvExtendLeaseRequest { NodeId: 1024 } 2025-06-30T09:22:28.129757Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:257: StateWork, processing event TEvNodeBroker::TEvExtendLeaseRequest 2025-06-30T09:22:28.129785Z node 1 :NODE_BROKER DEBUG: node_broker__extend_lease.cpp:44: TTxExtendLease Execute node #1024 2025-06-30T09:22:28.129795Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-30T09:22:28.129820Z node 1 :NODE_BROKER DEBUG: node_broker__extend_lease.cpp:78: TTxExtendLease Complete 2025-06-30T09:22:28.129843Z node 1 :NODE_BROKER TRACE: node_broker__extend_lease.cpp:82: TTxExtendLease reply with: NKikimr::NNodeBroker::TEvNodeBroker::TEvExtendLeaseResponse { Status { Code: WRONG_REQUEST Reason: "Unknown node" } NodeId: 1024 } 2025-06-30T09:22:28.130475Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:28358:21302], Recipient [1:28170:21163]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:28.130499Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:637:2215], Recipient [1:28170:21163]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-30T09:22:28.130505Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-30T09:22:28.130518Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> IndexBuildTest::CancellationNoTable [GOOD] >> IndexBuildTest::CancelBuild >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-true [GOOD] >> SystemView::TopPartitionsByCpuTables [GOOD] >> SystemView::TopPartitionsByCpuRanges >> TOlapReboots::CreateMultipleTables [GOOD] >> SystemView::TopPartitionsByCpuFields [GOOD] >> SystemView::TopPartitionsByCpuFollowers |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> IndexBuildTestReboots::DropIndexWithDataColumns >> IndexBuildTestReboots::IndexPartitioning |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> TOlapReboots::CreateTable [GOOD] >> SystemView::AuthUsers_ResultOrder [GOOD] >> SystemView::AuthUsers_TableRange >> SystemView::ShowCreateTablePartitionSettings [GOOD] >> SystemView::ShowCreateTableReadReplicas ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:22.767512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:22.767536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:22.767540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:22.767545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:22.767556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:22.767559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:22.767567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:22.767580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:22.767704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:22.767780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:22.782659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:22.782688Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:22.786706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:22.786814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:22.786865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:22.789357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:22.789457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:22.789614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.789705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:22.790816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:22.790902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:22.791319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:22.791333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:22.791365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:22.791380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:22.791388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:22.791413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.793416Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:22.824274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:22.824343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.824401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:22.824408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:22.824447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:22.824457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:22.825423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.825495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:22.825568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.825583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:22.825590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:22.825597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:22.827225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.827256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:22.827271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:22.827926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.827944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.827962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:22.827983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:22.828933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:22.830433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:22.830487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:22.830788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.830842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:22.830865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:22.830959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:22.830970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:22.831012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:22.831029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:22.832047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:22.832061Z node 1 :FLAT_TX_SCHEMESHARD ... mplete, at schemeshard: 72057594046678944 2025-06-30T09:22:30.758367Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:30.758378Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:30.758384Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:30.758719Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [3:2648:4390] sender: [3:2711:2058] recipient: [3:15:2062] 2025-06-30T09:22:30.800703Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/by_embedding" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:22:30.800844Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/by_embedding" took 163us result status StatusSuccess 2025-06-30T09:22:30.801268Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/by_embedding" PathDescription { Self { Name: "by_embedding" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPrefixTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 10 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "by_embedding" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "prefix" KeyColumnNames: "embedding" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataColumnNames: "covered" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> SystemView::AuthOwners_TableRange [GOOD] >> SystemView::AuthPermissions >> IndexBuildTestReboots::DropIndex >> TOlapReboots::CreateMultipleStandaloneTables [GOOD] >> SystemView::ShowCreateTableColumn [GOOD] >> SystemView::ShowCreateTableKeyBloomFilter |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::BaseCase |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::CreateMultipleTables [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:46.287228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:46.287256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.287262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:46.287268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:46.287283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:46.287288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:46.287298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.287315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:46.287427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:46.287523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:46.301392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:46.301421Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:46.301547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.311494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:46.312888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:46.313010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:46.315449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:46.315528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:46.315665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.315733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:46.316747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.316811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:46.317111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:46.317126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.317137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:46.317146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:46.317153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:46.317187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:46.318988Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.346717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:46.346847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.346929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:46.346939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:46.346996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:46.347015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:46.347992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.348044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:46.348126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.348149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:46.348156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:46.348162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:46.348827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.348846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:46.348852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:46.349402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.349418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.349426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:46.349434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:46.350358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:46.350981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:46.351034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:46.351280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.351315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... e_effects.cpp:654: Send tablet strongly msg operationId: 1003:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:5 msg type: 268697639 2025-06-30T09:22:30.866041Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1003, partId: 0, tablet: 72057594037968897 2025-06-30T09:22:30.866109Z node 89 :HIVE INFO: tablet_helpers.cpp:1441: [72057594037968897] TEvUpdateTabletsObject, msg: ObjectId: 7726343884038809171 TabletIds: 72075186233409546 TxId: 1003 TxPartId: 0 2025-06-30T09:22:30.866135Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6063: Update tablets object reply, message: Status: OK TxId: 1003 TxPartId: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:30.866145Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Status: OK TxId: 1003 TxPartId: 0 2025-06-30T09:22:30.866237Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:30.866953Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:30.867121Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:30.877982Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6237: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1003 2025-06-30T09:22:30.878005Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-30T09:22:30.878036Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1003 FAKE_COORDINATOR: Erasing txId 1003 2025-06-30T09:22:30.878549Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:30.878587Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:30.878595Z node 89 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:22:30.878613Z node 89 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:30.878619Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:30.878624Z node 89 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:30.878627Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:30.878633Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-30T09:22:30.878647Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [89:372:2348] message: TxId: 1003 2025-06-30T09:22:30.878655Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:30.878661Z node 89 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:22:30.878666Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:22:30.878702Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:22:30.880206Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:22:30.880223Z node 89 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [89:443:2412] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:22:30.880340Z node 89 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore/ColumnTable1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:30.880417Z node 89 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore/ColumnTable1" took 85us result status StatusSuccess 2025-06-30T09:22:30.880583Z node 89 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/OlapStore/ColumnTable1" PathDescription { Self { Name: "ColumnTable1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 1004 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "ColumnTable1" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } } Columns { Id: 2 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } NextColumnFamilyId: 1 } SchemaPresetId: 1 SchemaPresetName: "default" ColumnStorePathId { OwnerId: 72057594046678944 LocalId: 3 } ColumnShardCount: 1 Sharding { ColumnShards: 72075186233409546 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "timestamp" } } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:30.880751Z node 89 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore/ColumnTable2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:30.880780Z node 89 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore/ColumnTable2" took 31us result status StatusSuccess 2025-06-30T09:22:30.880861Z node 89 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/OlapStore/ColumnTable2" PathDescription { Self { Name: "ColumnTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "ColumnTable2" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } } Columns { Id: 2 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } NextColumnFamilyId: 1 } SchemaPresetId: 1 SchemaPresetName: "default" ColumnStorePathId { OwnerId: 72057594046678944 LocalId: 3 } ColumnShardCount: 1 Sharding { ColumnShards: 72075186233409546 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "timestamp" } } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> SelfHeal::TestOneMaintenanceRequest4Plus2Block [GOOD] >> SelfHeal::TestThreeMaintenanceRequestsMirror3dc ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::CreateTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:45.786307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:45.786339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:45.786346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:45.786352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:45.786367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:45.786372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:45.786382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:45.786399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:45.786544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:45.786630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:45.810509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:45.810535Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:45.810649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:45.814623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:45.816677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:45.816745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:45.818843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:45.818907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:45.819047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:45.819132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:45.820355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:45.820417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:45.820729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:45.820741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:45.820751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:45.820760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:45.820767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:45.820804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:45.822598Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:45.853886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:45.853988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:45.854059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:45.854069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:45.854126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:45.854146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:45.859180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:45.859248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:45.859331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:45.859360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:45.859367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:45.859373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:45.861974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:45.862005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:45.862014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:45.863138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:45.863166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:45.863174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:45.863183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:45.864166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:45.866457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:45.866534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:45.866852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:45.866912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... schemeshard: 72057594046678944, txId: 1003, path id: 4 2025-06-30T09:22:31.304675Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.304686Z node 105 :FLAT_TX_SCHEMESHARD INFO: create_table.cpp:459: TCreateColumnTable TProposedWaitParts operationId# 1003:0 ProgressState at tablet: 72057594046678944 2025-06-30T09:22:31.304702Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: create_table.cpp:485: TCreateColumnTable TProposedWaitParts operationId# 1003:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-06-30T09:22:31.304857Z node 105 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:31.304871Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:31.304874Z node 105 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:31.304879Z node 105 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-06-30T09:22:31.304885Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:22:31.305271Z node 105 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:31.305296Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:31.305301Z node 105 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:31.305307Z node 105 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 4 2025-06-30T09:22:31.305311Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:22:31.305331Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-30T09:22:31.305672Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1003:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-30T09:22:31.305700Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1003:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:4 msg type: 268697639 2025-06-30T09:22:31.305736Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1003, partId: 0, tablet: 72057594037968897 2025-06-30T09:22:31.305973Z node 105 :HIVE INFO: tablet_helpers.cpp:1441: [72057594037968897] TEvUpdateTabletsObject, msg: ObjectId: 7726343884038809171 TabletIds: 72075186233409546 TxId: 1003 TxPartId: 0 2025-06-30T09:22:31.305993Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6063: Update tablets object reply, message: Status: OK TxId: 1003 TxPartId: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.306008Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Status: OK TxId: 1003 TxPartId: 0 2025-06-30T09:22:31.306268Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:31.306496Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:31.306854Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.317880Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6237: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1003 2025-06-30T09:22:31.317911Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-30T09:22:31.317946Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1003 FAKE_COORDINATOR: Erasing txId 1003 2025-06-30T09:22:31.318602Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.318655Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.318664Z node 105 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:22:31.318720Z node 105 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:31.318727Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:31.318734Z node 105 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:31.318758Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:31.318764Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-30T09:22:31.318781Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [105:371:2347] message: TxId: 1003 2025-06-30T09:22:31.318792Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:31.318799Z node 105 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:22:31.318805Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:22:31.318845Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:22:31.319471Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:22:31.319489Z node 105 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [105:430:2399] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:22:31.319617Z node 105 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore/ColumnTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:31.319701Z node 105 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore/ColumnTable" took 94us result status StatusSuccess 2025-06-30T09:22:31.319842Z node 105 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/OlapStore/ColumnTable" PathDescription { Self { Name: "ColumnTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "ColumnTable" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } } Columns { Id: 2 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } NextColumnFamilyId: 1 } SchemaPresetId: 1 SchemaPresetName: "default" ColumnStorePathId { OwnerId: 72057594046678944 LocalId: 3 } ColumnShardCount: 1 Sharding { ColumnShards: 72075186233409546 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "timestamp" } } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> SystemView::AuthGroupMembers [GOOD] >> SystemView::AuthGroupMembers_Access |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/slow/unittest |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/slow/unittest >> TPQTestSlow::TestOnDiskStoredSourceIds >> TPQTestSlow::TestWriteVeryBigMessage >> SystemView::AuthUsers_TableRange [GOOD] >> SystemView::AuthPermissions_ResultOrder >> SlowTopicAutopartitioning::CDC_Write >> IndexBuildTestReboots::CancelBuild ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::CreateMultipleStandaloneTables [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:31.921817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:31.921850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:31.921858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:31.921865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:31.921880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:31.921885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:31.921896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:31.921912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:31.922037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:31.922124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:31.946938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:31.946971Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:31.947103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:31.960290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:31.961900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:31.961965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:31.964830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:31.964906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:31.965051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:31.965134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:31.966209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:31.966273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:31.966588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:31.966604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:31.966615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:31.966624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:31.966631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:31.966666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:31.969373Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:32.004461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:32.004571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.004645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:32.004656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:32.004710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:32.004741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:32.005758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:32.005852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:32.005943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.005974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:32.005982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:32.005989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:32.006676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.006694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:32.006701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:32.007326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.007345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.007353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:32.007361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:32.008410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:32.009086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:32.009145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:32.009439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:32.009483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... eshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-30T09:22:31.633893Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-30T09:22:31.633896Z node 120 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1002 2025-06-30T09:22:31.633900Z node 120 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 4 2025-06-30T09:22:31.633903Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:22:31.633914Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1002, ready parts: 0/1, is published: true 2025-06-30T09:22:31.634511Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-30T09:22:31.634571Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 TestWaitNotification: OK eventTxId 1003 2025-06-30T09:22:31.645558Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6237: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 1002 2025-06-30T09:22:31.645585Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409547, partId: 0 2025-06-30T09:22:31.645615Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 1002 FAKE_COORDINATOR: Erasing txId 1002 2025-06-30T09:22:31.646411Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.646514Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.646527Z node 120 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1002:0 ProgressState 2025-06-30T09:22:31.646555Z node 120 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1002:0 progress is 1/1 2025-06-30T09:22:31.646560Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-30T09:22:31.646566Z node 120 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1002:0 progress is 1/1 2025-06-30T09:22:31.646569Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-30T09:22:31.646575Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1002, ready parts: 1/1, is published: true 2025-06-30T09:22:31.646602Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [120:425:2389] message: TxId: 1002 2025-06-30T09:22:31.646612Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-30T09:22:31.646619Z node 120 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1002:0 2025-06-30T09:22:31.646624Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1002:0 2025-06-30T09:22:31.646682Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:22:31.647449Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-30T09:22:31.647465Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [120:426:2390] TestWaitNotification: OK eventTxId 1002 2025-06-30T09:22:31.647622Z node 120 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ColumnTable1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:31.647715Z node 120 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ColumnTable1" took 105us result status StatusSuccess 2025-06-30T09:22:31.647890Z node 120 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ColumnTable1" PathDescription { Self { Name: "ColumnTable1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "ColumnTable1" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } ColumnShardCount: 1 Sharding { ColumnShards: 72075186233409546 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "timestamp" } } StorageConfig { DataChannelCount: 64 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:31.648073Z node 120 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ColumnTable2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:31.648113Z node 120 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ColumnTable2" took 41us result status StatusSuccess 2025-06-30T09:22:31.648191Z node 120 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ColumnTable2" PathDescription { Self { Name: "ColumnTable2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "ColumnTable2" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } ColumnShardCount: 1 Sharding { ColumnShards: 72075186233409547 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "timestamp" } } StorageConfig { DataChannelCount: 64 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> IndexBuildTest::CancelBuild [GOOD] >> SystemView::AuthPermissions [GOOD] >> SystemView::AuthPermissions_Access >> IndexBuildTestReboots::BaseCaseWithDataColumns |82.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::CancelBuild [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:22.778572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:22.778599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:22.778606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:22.778612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:22.778628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:22.778632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:22.778642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:22.778659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:22.778769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:22.778844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:22.789816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:22.789839Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:22.793164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:22.793232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:22.793281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:22.797003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:22.797126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:22.797256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.797360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:22.798286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:22.798345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:22.798679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:22.798691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:22.798721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:22.798730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:22.798750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:22.798772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.800567Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:22.823972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:22.824072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.824153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:22.824163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:22.824214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:22.824229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:22.825214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.825284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:22.825365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.825390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:22.825398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:22.825405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:22.828435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.828464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:22.828473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:22.829073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.829088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.829095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:22.829113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:22.829898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:22.832039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:22.832095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:22.832347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.832394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:22.832412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:22.832499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:22.832510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:22.832550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:22.832564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:22.833370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:22.833381Z node 1 :FLAT_TX_SCHEMESHARD ... 1474976710760:0 progress is 1/1 2025-06-30T09:22:33.576517Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-30T09:22:33.576522Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-30T09:22:33.576535Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:129:2153] message: TxId: 281474976710760 2025-06-30T09:22:33.576543Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-30T09:22:33.576548Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-30T09:22:33.576553Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710760:0 2025-06-30T09:22:33.576569Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 13 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-30T09:22:33.577012Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-30T09:22:33.577030Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710760 2025-06-30T09:22:33.577042Z node 3 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2019: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710760 2025-06-30T09:22:33.577061Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2022: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancellation_Unlocking, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [3:1180:3030], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0}, txId# 281474976710760 2025-06-30T09:22:33.577470Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking 2025-06-30T09:22:33.577497Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancellation_Unlocking, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [3:1180:3030], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:22:33.577510Z node 3 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Cancellation_Unlocking to Cancelled 2025-06-30T09:22:33.577925Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled 2025-06-30T09:22:33.577953Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancelled, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [3:1180:3030], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:22:33.577959Z node 3 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:336: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-30T09:22:33.577984Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:22:33.577992Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [3:1276:3115] TestWaitNotification: OK eventTxId 102 2025-06-30T09:22:33.578457Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-30T09:22:33.578536Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:103: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } 2025-06-30T09:22:33.578821Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:33.578899Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 81us result status StatusSuccess 2025-06-30T09:22:33.579039Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:33.579288Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:22:33.579314Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/index1" took 30us result status StatusPathDoesNotExist 2025-06-30T09:22:33.579340Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/index1\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeTableIndex, state: EPathStateNotExist), drop stepId: 5000005, drop txId: 281474976710759, source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Table/index1" PathId: 3 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> VectorIndexBuildTest::Metering_ServerLessDB-smallScanBuffer-false [GOOD] >> VectorIndexBuildTest::Metering_ServerLessDB-smallScanBuffer-true >> LdapAuthProviderTest_StartTls::LdapRefreshRemoveUserBad [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoWithError >> SystemView::AuthGroupMembers_Access [GOOD] >> SystemView::AuthGroupMembers_ResultOrder |82.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |82.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |82.2%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller >> TExternalDataSourceTestReboots::CreateDroppedExternalDataSourceAndDropWithReboots [GOOD] |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::CreateDroppedExternalDataSourceWithReboots [GOOD] >> VectorIndexBuildTest::Metering_ServerLessDB_Restarts-doRestarts-false [GOOD] >> VectorIndexBuildTest::Metering_ServerLessDB_Restarts-doRestarts-true >> SystemView::PDisksFields [GOOD] >> SystemView::GroupsFields |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::CreateDroppedExternalDataSourceAndDropWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:26.164242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:26.164267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:26.164273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:26.164280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:26.164297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:26.164302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:26.164313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:26.164328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:26.164451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:26.164550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:26.177758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:26.177782Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:26.177870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:26.181428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:26.182491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:26.182526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:26.184396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:26.184458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:26.184558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:26.184596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:26.185699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:26.185765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:26.186091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:26.186103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:26.186114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:26.186124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:26.186132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:26.186164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:26.188003Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:26.208046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:26.208130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.208198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:26.208205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:26.208243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:26.208260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:26.209059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:26.209115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:26.209176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.209187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:26.209193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:26.209197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:26.209518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.209526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:26.209530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:26.209832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.209841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.209845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:26.209851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:26.210374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:26.210689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:26.210727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:26.210941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:26.210962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... ep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1005 at step: 5000006 2025-06-30T09:22:34.728851Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.728881Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:34.728904Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1005 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 141733922925 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:34.728914Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 1005:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-30T09:22:34.728942Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:22:34.728958Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1005:0 128 -> 240 2025-06-30T09:22:34.728991Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:34.729004Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:22:34.729438Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 FAKE_COORDINATOR: Erasing txId 1005 2025-06-30T09:22:34.729943Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:34.729956Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:34.729997Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:22:34.730030Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:34.730036Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [33:210:2210], at schemeshard: 72057594046678944, txId: 1005, path id: 1 2025-06-30T09:22:34.730043Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [33:210:2210], at schemeshard: 72057594046678944, txId: 1005, path id: 4 2025-06-30T09:22:34.730114Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-30T09:22:34.730125Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1005:0 ProgressState 2025-06-30T09:22:34.730142Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:22:34.730149Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:22:34.730156Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:22:34.730161Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:22:34.730166Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: false 2025-06-30T09:22:34.730174Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:22:34.730180Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1005:0 2025-06-30T09:22:34.730185Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1005:0 2025-06-30T09:22:34.730203Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:22:34.730211Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1005, publications: 2, subscribers: 0 2025-06-30T09:22:34.730246Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-30T09:22:34.730252Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:22:34.730359Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.730375Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.730381Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:22:34.730388Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:22:34.730394Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:22:34.730463Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:34.730471Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:22:34.730484Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:34.730535Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.730552Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.730557Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:22:34.730562Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-30T09:22:34.730568Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:34.730579Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1005, subscribers: 0 2025-06-30T09:22:34.731508Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.731639Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:22:34.731650Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 TestModificationResult got TxId: 1005, wait until txId: 1005 TestWaitNotification wait txId: 1005 2025-06-30T09:22:34.731702Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-30T09:22:34.731709Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-30T09:22:34.731779Z node 33 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-30T09:22:34.731796Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-30T09:22:34.731800Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [33:416:2405] TestWaitNotification: OK eventTxId 1005 2025-06-30T09:22:34.731877Z node 33 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:34.731908Z node 33 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 41us result status StatusPathDoesNotExist 2025-06-30T09:22:34.731937Z node 33 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> SystemView::AuthPermissions_ResultOrder [GOOD] >> SystemView::AuthPermissions_Selects ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::CreateDroppedExternalDataSourceWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:26.509807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:26.509834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:26.509841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:26.509848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:26.509866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:26.509871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:26.509881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:26.509897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:26.510037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:26.510123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:26.523309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:26.523338Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:26.523455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:26.526002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:26.526702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:26.526759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:26.528819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:26.528920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:26.529082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:26.529133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:26.530533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:26.530588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:26.530901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:26.530914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:26.530928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:26.530938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:26.530944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:26.530973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:26.532642Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:26.554906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:26.554994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.555071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:26.555080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:26.555139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:26.555155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:26.556097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:26.556169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:26.556232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.556243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:26.556250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:26.556256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:26.556677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.556687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:26.556692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:26.557014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.557022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.557027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:26.557032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:26.557607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:26.557970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:26.558006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:26.558178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:26.558200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... Board Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.810537Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:22:34.810542Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 2 2025-06-30T09:22:34.810547Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:22:34.810559Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 0/1, is published: true 2025-06-30T09:22:34.811127Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1005:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1005 msg type: 269090816 2025-06-30T09:22:34.811169Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1005, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1005 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1005 at step: 5000006 2025-06-30T09:22:34.811285Z node 32 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:34.811308Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1005 Coordinator: 72057594046316545 AckTo { RawX1: 126 RawX2: 137438955622 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:34.811319Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 1005:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-30T09:22:34.811346Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:22:34.811365Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1005:0 128 -> 240 2025-06-30T09:22:34.811397Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:34.811407Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:22:34.811603Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.811932Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 FAKE_COORDINATOR: Erasing txId 1005 2025-06-30T09:22:34.812225Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:34.812233Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:34.812275Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:22:34.812302Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:34.812308Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [32:209:2209], at schemeshard: 72057594046678944, txId: 1005, path id: 1 2025-06-30T09:22:34.812313Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [32:209:2209], at schemeshard: 72057594046678944, txId: 1005, path id: 4 2025-06-30T09:22:34.812356Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-30T09:22:34.812364Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1005:0 ProgressState 2025-06-30T09:22:34.812378Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:22:34.812384Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:22:34.812390Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:22:34.812394Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:22:34.812399Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: false 2025-06-30T09:22:34.812405Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:22:34.812411Z node 32 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1005:0 2025-06-30T09:22:34.812417Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1005:0 2025-06-30T09:22:34.812431Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:22:34.812438Z node 32 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1005, publications: 2, subscribers: 0 2025-06-30T09:22:34.812442Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-30T09:22:34.812447Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:22:34.812562Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.812572Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.812577Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:22:34.812583Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:22:34.812587Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:22:34.812619Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:34.812624Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:22:34.812635Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:34.812688Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.812699Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.812703Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:22:34.812708Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-30T09:22:34.812712Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:34.812720Z node 32 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1005, subscribers: 0 2025-06-30T09:22:34.813633Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:22:34.813675Z node 32 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:22:34.813691Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 TestModificationResult got TxId: 1005, wait until txId: 1005 TestWaitNotification wait txId: 1005 2025-06-30T09:22:34.813778Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-30T09:22:34.813788Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-30T09:22:34.813890Z node 32 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-30T09:22:34.813915Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-30T09:22:34.813922Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [32:409:2398] TestWaitNotification: OK eventTxId 1005 >> VectorIndexBuildTest::SimpleDuplicates [GOOD] >> VectorIndexBuildTest::Shard_Build_Error |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.2%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/test-results/unittest/{meta.json ... results_accumulator.log} >> TExternalDataSourceTestReboots::SimpleDropExternalDataSourceWithReboots2 [GOOD] >> TOlapReboots::CreateDropStandaloneTable [GOOD] >> TOlapReboots::AlterTtlSettings >> SystemView::AuthPermissions_Access [GOOD] >> SystemView::ShowCreateTableReadReplicas [GOOD] >> SystemView::ShowCreateTableTtlSettings >> TOlapReboots::DropMultipleStandaloneTables [GOOD] >> SystemView::AuthGroupMembers_ResultOrder [GOOD] >> SystemView::AuthGroupMembers_TableRange |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::SimpleDropExternalDataSourceWithReboots2 [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:27.191634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:27.191662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:27.191668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:27.191673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:27.191688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:27.191693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:27.191704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:27.191721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:27.191839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:27.191926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:27.209392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:27.209419Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:27.209538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:27.212792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:27.213735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:27.213772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:27.215574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:27.215635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:27.215773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:27.215822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:27.216655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:27.216698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:27.216928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:27.216935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:27.216942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:27.216948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:27.216952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:27.216975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:27.218176Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:27.239171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:27.239258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:27.239321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:27.239331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:27.239378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:27.239401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:27.241315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:27.241402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:27.241482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:27.241501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:27.241509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:27.241519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:27.243532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:27.243561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:27.243572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:27.244614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:27.244656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:27.244667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:27.244679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:27.245564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:27.246336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:27.246396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:27.246662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:27.246704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... inStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1003 at step: 5000004 2025-06-30T09:22:35.696925Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:35.696949Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1003 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 141733922925 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:35.696958Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 1003:0 HandleReply TEvOperationPlan: step# 5000004 2025-06-30T09:22:35.696982Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:22:35.697000Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 128 -> 240 2025-06-30T09:22:35.697039Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:35.697049Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:22:35.697165Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:35.697563Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 FAKE_COORDINATOR: Erasing txId 1003 2025-06-30T09:22:35.697902Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:35.697911Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:35.697941Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:22:35.697967Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:35.697972Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [33:210:2210], at schemeshard: 72057594046678944, txId: 1003, path id: 1 2025-06-30T09:22:35.697977Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [33:210:2210], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-30T09:22:35.698029Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:35.698035Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:22:35.698045Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:35.698048Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:35.698052Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:35.698054Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:35.698057Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: false 2025-06-30T09:22:35.698061Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:35.698064Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:22:35.698067Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:22:35.698076Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:22:35.698081Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 2, subscribers: 0 2025-06-30T09:22:35.698086Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-30T09:22:35.698090Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-30T09:22:35.698146Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:35.698156Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:35.698161Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:35.698166Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-30T09:22:35.698172Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:22:35.698213Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:35.698217Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:22:35.698223Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:35.698258Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:35.698267Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:35.698271Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:35.698275Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-30T09:22:35.698279Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:35.698288Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:22:35.698698Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:35.699239Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:22:35.699260Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:22:35.699318Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:22:35.699326Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:22:35.699399Z node 33 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:22:35.699419Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:22:35.699425Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [33:361:2350] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:22:35.699517Z node 33 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:35.699549Z node 33 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 44us result status StatusPathDoesNotExist 2025-06-30T09:22:35.699588Z node 33 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TExternalDataSourceTestReboots::DropExternalDataSourceWithReboots >> VectorIndexBuildTest::Shard_Build_Error [GOOD] |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> SystemView::ShowCreateTableKeyBloomFilter [GOOD] >> SystemView::ShowCreateTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::AuthPermissions_Access [GOOD] Test command err: 2025-06-30T09:22:15.524408Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670290885691541:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:15.524512Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003104/r3tmp/tmpsLZOgD/pdisk_1.dat 2025-06-30T09:22:15.631058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:15.631092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:15.634455Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:15.636592Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25095, node 1 2025-06-30T09:22:15.658678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:15.658696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:15.658698Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:15.658772Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31121 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:15.693023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:15.703509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:15.726023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "Tenant1" } } TxId: 281474976715658 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:22:15.726130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_extsubdomain.cpp:58: TCreateExtSubDomain Propose, path/Root/Tenant1, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-30T09:22:15.726203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: Tenant1, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-30T09:22:15.726276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-30T09:22:15.726289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976715658:0 type: TxCreateExtSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-30T09:22:15.726338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-30T09:22:15.726373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:22:15.726381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:22:15.726400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-30T09:22:15.726411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 waiting... 2025-06-30T09:22:15.727479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 281474976715658, response: Status: StatusAccepted TxId: 281474976715658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-06-30T09:22:15.727535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: root@builtin, status: StatusAccepted, operation: CREATE DATABASE, path: /Root/Tenant1 2025-06-30T09:22:15.727595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-30T09:22:15.727599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-30T09:22:15.727642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-30T09:22:15.727663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-30T09:22:15.727667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7521670290885692003:2385], at schemeshard: 72057594046644480, txId: 281474976715658, path id: 1 2025-06-30T09:22:15.727678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7521670290885692003:2385], at schemeshard: 72057594046644480, txId: 281474976715658, path id: 2 2025-06-30T09:22:15.727689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-30T09:22:15.727694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-30T09:22:15.727699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715658:0, at tablet# 72057594046644480 2025-06-30T09:22:15.727706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 281474976715658 ready parts: 1/1 2025-06-30T09:22:15.729167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715658 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:15.729616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-30T09:22:15.729631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-30T09:22:15.729634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976715658 2025-06-30T09:22:15.729638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715658, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], version: 4 2025-06-30T09:22:15.729644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-30T09:22:15.729692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-30T09:22:15.729700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-30T09:22:15.729702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976715658 2025-06-30T09:22:15.729704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715658, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 2 2025-06-30T09:22:15.729706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-30T09:22:15.729713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715658, ready parts: 0/1, is published: true 2025-06-30T09:22:15.730211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:22:15.730217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operatio ... rCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:35.102573Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager TableId: [72057594046644480:6:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [pools] }] } 2025-06-30T09:22:35.102583Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670380176769179:2379], row count: 0, finished: 0 2025-06-30T09:22:35.102769Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:35.102883Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools TableId: [72057594046644480:7:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [default] }] } 2025-06-30T09:22:35.102890Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670380176769179:2379], row count: 0, finished: 0 2025-06-30T09:22:35.102908Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools/default TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:35.102961Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools/default TableId: [72057594046644480:8:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindResourcePool DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:22:35.102977Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670380176769179:2379], row count: 6, finished: 0 2025-06-30T09:22:35.102998Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:35.103090Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-30T09:22:35.103096Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670380176769179:2379], row count: 1, finished: 0 2025-06-30T09:22:35.103149Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir2 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:35.103283Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir2 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-30T09:22:35.103290Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670380176769179:2379], row count: 0, finished: 0 2025-06-30T09:22:35.103360Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:35.103400Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [72057594046644480:4:1] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:22:35.103404Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670380176769179:2379], row count: 0, finished: 0 2025-06-30T09:22:35.103426Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7521670380176769179:2379], owner: [41:7521670380176769175:2377], scan id: 0, sys view info: Type: EAuthPermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:35.103872Z node 41 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [41:7521670371586832578:2097], database# , query hash# 12107705915200741666, cpu time# 14935 2025-06-30T09:22:35.103923Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275355099, txId: 281474976715692] shutting down 2025-06-30T09:22:35.106011Z node 45 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:35.106241Z node 43 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:35.105835Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 43 2025-06-30T09:22:35.106071Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(43, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:35.106615Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 44 2025-06-30T09:22:35.106660Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(44, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:35.106676Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 42 2025-06-30T09:22:35.106764Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:35.106780Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 45 2025-06-30T09:22:35.106850Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(45, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:35.107828Z node 41 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[42:7521670371944041793:2111], Type=268959746 2025-06-30T09:22:35.107844Z node 41 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[42:7521670371944041793:2111], Type=268959746 2025-06-30T09:22:35.107848Z node 41 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[42:7521670371944041793:2111], Type=268959746 2025-06-30T09:22:35.107852Z node 41 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[42:7521670371944041793:2111], Type=268959746 2025-06-30T09:22:35.107855Z node 41 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[42:7521670371944041793:2111], Type=268959746 2025-06-30T09:22:35.107859Z node 41 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[42:7521670371944041793:2111], Type=268959746 2025-06-30T09:22:35.124002Z node 44 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:35.129132Z node 45 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:35.200864Z node 43 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:35.201215Z node 43 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:35.203901Z node 42 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:35.209399Z node 42 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=incorrect path status: LookupError; |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::SimpleDropExternalDataSourceWithReboots [GOOD] |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::Shard_Build_Error [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:23.268354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:23.268382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:23.268389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:23.268396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:23.268411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:23.268415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:23.268426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:23.268443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:23.268565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:23.268641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:23.287309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:23.287333Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:23.297741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:23.297797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:23.297833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:23.316054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:23.316172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:23.316306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:23.316400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:23.317304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:23.317364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:23.317744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:23.317758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:23.317787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:23.317796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:23.317803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:23.317824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.319387Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:23.340785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:23.340866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.340928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:23.340935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:23.340974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:23.340984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:23.341766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:23.341800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:23.341845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.341852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:23.341856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:23.341860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:23.342256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.342267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:23.342273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:23.342578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.342586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.342591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:23.342603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:23.343154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:23.343548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:23.343587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:23.343753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:23.343782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:23.343794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:23.343855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:23.343862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:23.343889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:23.343898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:23.344295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:23.344302Z node 1 :FLAT_TX_SCHEMESHARD ... INFO: datashard_impl.h:3310: 72075186233409547 Reporting state Offline to schemeshard 72057594046678944 2025-06-30T09:22:36.401790Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:445:2413], Recipient [3:461:2425]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-30T09:22:36.402214Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [3:985:2902], Recipient [3:461:2425]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046678944 Status: OK ServerId: [3:986:2903] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-30T09:22:36.402224Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-30T09:22:36.402267Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5640: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 461 RawX2: 12884904313 } TabletId: 72075186233409547 State: 4 2025-06-30T09:22:36.402291Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409547, state: Offline, at schemeshard: 72057594046678944 2025-06-30T09:22:36.402404Z node 3 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186233409548 Reporting state Offline to schemeshard 72057594046678944 2025-06-30T09:22:36.402430Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:446:2414], Recipient [3:467:2429]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-30T09:22:36.402566Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [3:988:2905], Recipient [3:467:2429]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046678944 Status: OK ServerId: [3:990:2907] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-30T09:22:36.402573Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-30T09:22:36.402588Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5640: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 467 RawX2: 12884904317 } TabletId: 72075186233409548 State: 4 2025-06-30T09:22:36.402597Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409548, state: Offline, at schemeshard: 72057594046678944 2025-06-30T09:22:36.403246Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:22:36.403371Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [3:900:2829], Recipient [3:461:2425]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-30T09:22:36.403382Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-30T09:22:36.403389Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409547 state Offline 2025-06-30T09:22:36.403687Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:985:2902], Recipient [3:461:2425]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [3:985:2902] ServerId: [3:986:2903] } 2025-06-30T09:22:36.403703Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-30T09:22:36.403885Z node 3 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409547 2025-06-30T09:22:36.403962Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-30T09:22:36.404057Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 Forgetting tablet 72075186233409547 2025-06-30T09:22:36.404203Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [3:445:2413], Recipient [3:461:2425]: NKikimr::TEvTablet::TEvTabletDead 2025-06-30T09:22:36.404279Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409547 2025-06-30T09:22:36.404317Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409547 2025-06-30T09:22:36.404786Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:22:36.404966Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [3:900:2829], Recipient [3:467:2429]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-30T09:22:36.404977Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-30T09:22:36.404985Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409548 state Offline 2025-06-30T09:22:36.405003Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:36.405011Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:22:36.405032Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:22:36.405123Z node 3 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409548 2025-06-30T09:22:36.405177Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:988:2905], Recipient [3:467:2429]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [3:988:2905] ServerId: [3:990:2907] } 2025-06-30T09:22:36.405184Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-30T09:22:36.405212Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-30T09:22:36.405272Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 Forgetting tablet 72075186233409548 2025-06-30T09:22:36.405417Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [3:446:2414], Recipient [3:467:2429]: NKikimr::TEvTablet::TEvTabletDead 2025-06-30T09:22:36.405478Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409548 2025-06-30T09:22:36.405507Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409548 2025-06-30T09:22:36.407477Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-30T09:22:36.407500Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409547 2025-06-30T09:22:36.408027Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-30T09:22:36.408065Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:36.408073Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:22:36.408099Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:22:36.408108Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:22:36.408119Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:22:36.408178Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:22:36.408188Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409548 2025-06-30T09:22:36.408662Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:22:36.450563Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-30T09:22:36.450712Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:103: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 Issues { message: "One of the shards report BUILD_ERROR
: Error: Shard or requested range is empty\n
: Error: Datashard test fail\n at Filling stage, process has to be canceled, shardId: 72075186233409546, shardIdx: 72057594046678944:1" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:1 Status: BUILD_ERROR UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n
: Error: Datashard test fail\n SeqNoRound: 0 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } State: STATE_REJECTED Settings { source_path: "/MyRoot/vectors" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 0 } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 Issues { message: "One of the shards report BUILD_ERROR
: Error: Shard or requested range is empty\n
: Error: Datashard test fail\n at Filling stage, process has to be canceled, shardId: 72075186233409546, shardIdx: 72057594046678944:1" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:1 Status: BUILD_ERROR UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n
: Error: Datashard test fail\n SeqNoRound: 0 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } State: STATE_REJECTED Settings { source_path: "/MyRoot/vectors" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 0 } >> SystemView::AuthPermissions_Selects [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::SimpleDropExternalDataSourceWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:28.217586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:28.217615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:28.217622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:28.217628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:28.217646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:28.217651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:28.217662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:28.217678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:28.217833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:28.217959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:28.235109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:28.235142Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:28.235276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:28.239159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:28.240145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:28.240206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:28.242661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:28.242782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:28.242964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:28.243034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:28.244239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:28.244303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:28.244671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:28.244689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:28.244699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:28.244711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:28.244717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:28.244755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:28.246545Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:28.272247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:28.272341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:28.272429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:28.272440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:28.272499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:28.272527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:28.273429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:28.273484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:28.273536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:28.273546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:28.273551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:28.273557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:28.274064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:28.274081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:28.274088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:28.274548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:28.274565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:28.274572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:28.274580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:28.275426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:28.275940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:28.275992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:28.276237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:28.276272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... inStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1003 at step: 5000004 2025-06-30T09:22:36.664929Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:36.664953Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1003 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 141733922925 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:36.664962Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 1003:0 HandleReply TEvOperationPlan: step# 5000004 2025-06-30T09:22:36.664987Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:22:36.665004Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 128 -> 240 2025-06-30T09:22:36.665033Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:36.665042Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:22:36.665177Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:36.665566Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 FAKE_COORDINATOR: Erasing txId 1003 2025-06-30T09:22:36.665869Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:36.665876Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:36.665927Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:22:36.665953Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:36.665958Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [33:210:2210], at schemeshard: 72057594046678944, txId: 1003, path id: 1 2025-06-30T09:22:36.665964Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [33:210:2210], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-30T09:22:36.666028Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:36.666036Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:22:36.666050Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:36.666055Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:36.666060Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:36.666063Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:36.666068Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: false 2025-06-30T09:22:36.666074Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:36.666080Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:22:36.666085Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:22:36.666098Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:22:36.666105Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 2, subscribers: 0 2025-06-30T09:22:36.666109Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-30T09:22:36.666113Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-30T09:22:36.666180Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:36.666189Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:36.666194Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:36.666199Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-30T09:22:36.666203Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:22:36.666251Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:36.666256Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:22:36.666265Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:36.666299Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:36.666308Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:36.666312Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:36.666315Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-30T09:22:36.666319Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:36.666326Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:22:36.667003Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:36.667329Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:22:36.667345Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:22:36.667390Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:22:36.667396Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:22:36.667453Z node 33 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:22:36.667468Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:22:36.667471Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [33:361:2350] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:22:36.667524Z node 33 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:36.667551Z node 33 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 34us result status StatusPathDoesNotExist 2025-06-30T09:22:36.667590Z node 33 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TCdcStreamWithRebootsTests::CreateStreamExplicitReady[TabletReboots] [GOOD] |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::DropMultipleStandaloneTables [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:32.296694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:32.296724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:32.296731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:32.296739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:32.296753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:32.296759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:32.296771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:32.296790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:32.296921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:32.297009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:32.316315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:32.316342Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:32.316465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:32.320221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:32.321521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:32.321572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:32.323461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:32.323519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:32.323656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:32.323732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:32.324645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:32.324702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:32.324999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:32.325011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:32.325022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:32.325031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:32.325038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:32.325073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:32.326814Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:32.353949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:32.354049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.354129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:32.354138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:32.354194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:32.354211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:32.355047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:32.355090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:32.355172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.355196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:32.355203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:32.355210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:32.355678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.355691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:32.355699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:32.356072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.356083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.356091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:32.356100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:32.357038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:32.357474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:32.357517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:32.357760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:32.357809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... G: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:22:36.138526Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 0/1, is published: true 2025-06-30T09:22:36.138635Z node 111 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-30T09:22:36.138789Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:36.138991Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409546 2025-06-30T09:22:36.139240Z node 111 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409546;self_id=[111:340:2324];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-30T09:22:36.140556Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1004:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 275382275 2025-06-30T09:22:36.140661Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:36.140667Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:22:36.140686Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:36.141693Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:22:36.141748Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:22:36.141831Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-30T09:22:36.141842Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-30T09:22:36.141875Z node 111 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1005 2025-06-30T09:22:36.162965Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6237: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 1004 2025-06-30T09:22:36.162987Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1004, tablet: 72075186233409547, partId: 0 2025-06-30T09:22:36.163007Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1004:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 1004 2025-06-30T09:22:36.163017Z node 111 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1004:0 129 -> 130 FAKE_COORDINATOR: Erasing txId 1004 2025-06-30T09:22:36.163517Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:22:36.163550Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:22:36.163556Z node 111 :FLAT_TX_SCHEMESHARD INFO: drop_table.cpp:315: TDropColumnTable TProposedDeleteParts operationId# 1004:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:36.163586Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:22:36.163612Z node 111 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:22:36.163616Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:22:36.163622Z node 111 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:22:36.163626Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:22:36.163632Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: true 2025-06-30T09:22:36.163647Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [111:369:2344] message: TxId: 1004 2025-06-30T09:22:36.163655Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:22:36.163661Z node 111 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:0 2025-06-30T09:22:36.163666Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:0 2025-06-30T09:22:36.163689Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:22:36.164208Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:22:36.164247Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:22:36.164256Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [111:535:2490] 2025-06-30T09:22:36.164342Z node 111 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-30T09:22:36.164527Z node 111 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409547;self_id=[111:439:2404];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; Forgetting tablet 72075186233409547 2025-06-30T09:22:36.165788Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-30T09:22:36.165993Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:22:36.166104Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:36.166110Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:22:36.166121Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:36.166508Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:22:36.166520Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-30T09:22:36.166689Z node 111 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1004 2025-06-30T09:22:36.166791Z node 111 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ColumnTable1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:36.166827Z node 111 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ColumnTable1" took 46us result status StatusPathDoesNotExist 2025-06-30T09:22:36.166854Z node 111 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ColumnTable1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ColumnTable1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:22:36.166906Z node 111 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ColumnTable2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:36.166915Z node 111 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ColumnTable2" took 10us result status StatusPathDoesNotExist 2025-06-30T09:22:36.166929Z node 111 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ColumnTable2\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ColumnTable2" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::AuthPermissions_Selects [GOOD] Test command err: 2025-06-30T09:22:15.407241Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670292565624212:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:15.408237Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0030dd/r3tmp/tmpB4NV6G/pdisk_1.dat 2025-06-30T09:22:15.482224Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22355, node 1 2025-06-30T09:22:15.501913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:15.501927Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:15.501929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:15.501976Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:15.507585Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:15.507624Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:15.508392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21826 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:15.550329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:15.553869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:15.555711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:15.899948Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670292565624763:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.899953Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670292565624755:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.899971Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.900930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:15.903430Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670292565624769:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:22:15.961129Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670292565624820:2380] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:16.018215Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz029w6v07c7s8m9xpyvhnwr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmI3OGM3MjctMWIxZWRjNzctZTE5N2M4NmQtMWI1NjFiYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:16.044377Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz029wbbaykh6151dg63pa5n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTU1OWVkYzAtY2ZhYjM5ZmItZThhZTFiNC05MDM1YjEwZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:16.069633Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jz029wbe2x65yznar63km099, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjA3MDRiODAtZGQ5NDZkMjgtMTUyNmRlZjctZmZkMjZlZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:16.070515Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7521670296860592219:2323], owner: [1:7521670296860592215:2321], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:16.071191Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7521670296860592219:2323], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:16.071333Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7521670296860592219:2323], row count: 2, finished: 1 2025-06-30T09:22:16.071371Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7521670296860592219:2323], owner: [1:7521670296860592215:2321], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:16.072701Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275336069, txId: 281474976715663] shutting down 2025-06-30T09:22:16.363727Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670296644727885:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:16.363902Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0030dd/r3tmp/tmp0zXYFY/pdisk_1.dat 2025-06-30T09:22:16.385044Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19152, node 2 2025-06-30T09:22:16.422987Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:16.423006Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:16.423009Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:16.423064Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61986 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:22:16.463219Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:16.463251Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:16.467838Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:16.473036Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:16.487278Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:16.489575Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose its ... 7594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } Children [.metadata,Dir1,Table0,Tenant1,Tenant2] }] } 2025-06-30T09:22:36.265548Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7521670384234533994:2373], row count: 0, finished: 0 2025-06-30T09:22:36.265557Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:36.265599Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [SubDir1,SubDir2] }] } 2025-06-30T09:22:36.265609Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7521670384234533994:2373], row count: 0, finished: 0 2025-06-30T09:22:36.265615Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:36.265643Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-30T09:22:36.265666Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7521670384234533994:2373], row count: 2, finished: 0 2025-06-30T09:22:36.265687Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [33:7521670384234533994:2373], owner: [33:7521670384234533990:2371], scan id: 0, sys view info: Type: EAuthPermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:36.266147Z node 33 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [33:7521670379939564767:2094], database# , query hash# 3187945588805523718, cpu time# 16622 2025-06-30T09:22:36.266273Z node 33 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275356264, txId: 281474976715687] shutting down 2025-06-30T09:22:36.284130Z node 33 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715690. Ctx: { TraceId: 01jz02ag3b2fgejqr8w648k003, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=33&id=MzI3OGVmYWItZTJlODY0MTItMWQ5NTNiNDEtYjU2MTA4MzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:36.284699Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [33:7521670384234534029:2382], owner: [33:7521670384234534025:2380], scan id: 0, sys view info: Type: EAuthPermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:36.284940Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [33:7521670384234534029:2382], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:36.284952Z node 33 :SYSTEM_VIEWS DEBUG: auth_scan_base.h:100: ProceedToScan, tenant name: /Root tenant owner: root@builtin subject sid: empty require admin access: 0 is admin: 1 2025-06-30T09:22:36.284971Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:36.285055Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } Children [.metadata,Dir1,Table0,Tenant1,Tenant2] }] } 2025-06-30T09:22:36.285075Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7521670384234534029:2382], row count: 0, finished: 0 2025-06-30T09:22:36.285089Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:36.285142Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [SubDir1,SubDir2] }] } 2025-06-30T09:22:36.285159Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7521670384234534029:2382], row count: 0, finished: 0 2025-06-30T09:22:36.285169Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:36.285208Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-30T09:22:36.285227Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7521670384234534029:2382], row count: 1, finished: 0 2025-06-30T09:22:36.285266Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [33:7521670384234534029:2382], owner: [33:7521670384234534025:2380], scan id: 0, sys view info: Type: EAuthPermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:36.286526Z node 33 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [33:7521670379939564767:2094], database# , query hash# 15123460272068726277, cpu time# 16792 2025-06-30T09:22:36.286664Z node 33 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275356283, txId: 281474976715689] shutting down 2025-06-30T09:22:36.289317Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 37 2025-06-30T09:22:36.289523Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:36.289568Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 36 2025-06-30T09:22:36.289636Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(36, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:36.289668Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 34 2025-06-30T09:22:36.289697Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:36.289671Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:36.289704Z node 35 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:36.289834Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 35 2025-06-30T09:22:36.289961Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(35, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:36.291052Z node 33 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[35:7521670377859723664:2105], Type=268959746 2025-06-30T09:22:36.584426Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:36.585492Z node 37 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:36.589975Z node 36 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:36.675991Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:36.677655Z node 34 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> BsControllerConfig::MergeIntersectingBoxes |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> BsControllerConfig::ReassignGroupDisk >> TCdcStreamWithRebootsTests::CreateStreamWithAwsRegion[TabletReboots] [GOOD] >> BsControllerConfig::ExtendByCreatingSeparateBox >> BsControllerConfig::ManyPDisksRestarts >> SystemView::AuthGroupMembers_TableRange [GOOD] >> SystemView::AuthEffectivePermissions >> BsControllerConfig::OverlayMapCrossReferences >> BsControllerConfig::PDiskCreate >> BsControllerConfig::Basic >> BsControllerConfig::SelectAllGroups >> BsControllerConfig::AddDriveSerial >> BsControllerConfig::OverlayMap >> TImportWithRebootsTests::ShouldSucceedOnSingleView [GOOD] >> TImportWithRebootsTests::ShouldSucceedOnViewsAndTables ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest >> TCdcStreamWithRebootsTests::CreateStreamExplicitReady[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:46.731709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:46.731736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.731743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:46.731758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:46.731765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:46.731770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:46.731780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.731805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:46.731930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:46.732006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:46.749683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:46.749711Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:46.749869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.754576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:46.755832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:46.755885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:46.758083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:46.758148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:46.758296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.758371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:46.759490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.759553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:46.759835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:46.759849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.759859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:46.759869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:46.759876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:46.759908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:46.761511Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.786661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:46.786758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.786830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:46.786840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:46.786899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:46.786916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:46.787862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.787923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:46.787996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.788006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:46.788014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:46.788021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:46.788561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.788574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:46.788581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:46.789073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.789084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.789091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:46.789100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:46.789884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:46.790373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:46.790421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:46.790664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.790696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... ish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:22:37.568767Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:22:37.569586Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:2, at schemeshard: 72057594046678944 2025-06-30T09:22:37.569702Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:37.569710Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:22:37.569767Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:22:37.569805Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:37.569814Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [139:212:2212], at schemeshard: 72057594046678944, txId: 1003, path id: 4 2025-06-30T09:22:37.569820Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [139:212:2212], at schemeshard: 72057594046678944, txId: 1003, path id: 5 2025-06-30T09:22:37.569889Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:2, at schemeshard: 72057594046678944 2025-06-30T09:22:37.569898Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:2 ProgressState 2025-06-30T09:22:37.569911Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:2 progress is 3/3 2025-06-30T09:22:37.569915Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-30T09:22:37.569921Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:2 progress is 3/3 2025-06-30T09:22:37.569925Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-30T09:22:37.569929Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 3/3, is published: false 2025-06-30T09:22:37.569934Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-30T09:22:37.569941Z node 139 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:22:37.569946Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:22:37.569958Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:22:37.569963Z node 139 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:1 2025-06-30T09:22:37.569967Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:1 2025-06-30T09:22:37.569982Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:22:37.569987Z node 139 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:2 2025-06-30T09:22:37.569991Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:2 2025-06-30T09:22:37.569999Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 5 2025-06-30T09:22:37.570004Z node 139 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 2, subscribers: 0 2025-06-30T09:22:37.570009Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 4], 4 2025-06-30T09:22:37.570012Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 5], 2 2025-06-30T09:22:37.570239Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:37.570261Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:37.570265Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:37.570269Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 4 2025-06-30T09:22:37.570273Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:22:37.570680Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:37.570703Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:37.570708Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:37.570713Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 2 2025-06-30T09:22:37.570718Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:22:37.570732Z node 139 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:22:37.571554Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:37.572906Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:22:37.574488Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:22:37.574502Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:22:37.574580Z node 139 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:22:37.574600Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:22:37.574606Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [139:663:2579] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:22:37.574685Z node 139 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:22:37.574774Z node 139 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Stream" took 103us result status StatusSuccess 2025-06-30T09:22:37.574889Z node 139 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Stream" PathDescription { Self { Name: "Stream" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeCdcStream CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 CdcStreamVersion: 1 } ChildrenExist: true } Children { Name: "streamImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409548 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } CdcStreamDescription { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 4 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> BsControllerConfig::OverlayMapCrossReferences [GOOD] >> DbCounters::TabletsSimple [GOOD] >> LabeledDbCounters::OneTablet >> BsControllerConfig::OverlayMap [GOOD] >> TExternalDataSourceTestReboots::ParallelCreateDrop [GOOD] |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |82.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks >> SystemView::AuthEffectivePermissions [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_3_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_4_Table |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::OverlayMap [GOOD] |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest >> TCdcStreamWithRebootsTests::CreateStreamWithAwsRegion[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:46.265809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:46.265839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.265844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:46.265858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:46.265864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:46.265869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:46.265879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.265905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:46.266031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:46.266114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:46.284025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:46.284048Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:46.284177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.287389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:46.288553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:46.288596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:46.291174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:46.291235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:46.291386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.291445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:46.292385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.292443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:46.292736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:46.292748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.292758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:46.292766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:46.292773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:46.292804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:46.294383Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.321412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:46.321490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.321554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:46.321562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:46.321620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:46.321637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:46.322341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.322388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:46.322441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.322452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:46.322461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:46.322467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:46.322909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.322924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:46.322931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:46.323312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.323324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.323331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:46.323339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:46.324146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:46.324617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:46.324662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:46.324903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.324930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... 2057594046678944, LocalPathId: 4] was 3 2025-06-30T09:22:38.301175Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:22:38.301945Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:2, at schemeshard: 72057594046678944 2025-06-30T09:22:38.302057Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:38.302065Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:22:38.302101Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:22:38.302147Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:38.302154Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [139:212:2212], at schemeshard: 72057594046678944, txId: 1003, path id: 4 2025-06-30T09:22:38.302161Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [139:212:2212], at schemeshard: 72057594046678944, txId: 1003, path id: 5 2025-06-30T09:22:38.302241Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:2, at schemeshard: 72057594046678944 2025-06-30T09:22:38.302249Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:2 ProgressState 2025-06-30T09:22:38.302263Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:2 progress is 3/3 2025-06-30T09:22:38.302267Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-30T09:22:38.302272Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:2 progress is 3/3 2025-06-30T09:22:38.302274Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-30T09:22:38.302278Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 3/3, is published: false 2025-06-30T09:22:38.302282Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-30T09:22:38.302288Z node 139 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:22:38.302292Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:22:38.302303Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:22:38.302307Z node 139 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:1 2025-06-30T09:22:38.302310Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:1 2025-06-30T09:22:38.302324Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:22:38.302327Z node 139 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:2 2025-06-30T09:22:38.302330Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:2 2025-06-30T09:22:38.302337Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 5 2025-06-30T09:22:38.302341Z node 139 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 2, subscribers: 0 2025-06-30T09:22:38.302345Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 4], 4 2025-06-30T09:22:38.302347Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 5], 2 2025-06-30T09:22:38.302547Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:38.302559Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:38.302563Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:38.302566Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 4 2025-06-30T09:22:38.302570Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:22:38.303003Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:38.303019Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:38.303023Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:38.303026Z node 139 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 2 2025-06-30T09:22:38.303029Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:22:38.303042Z node 139 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:22:38.303768Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:38.304583Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:22:38.305623Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:22:38.305633Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:22:38.305730Z node 139 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:22:38.305754Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:22:38.305759Z node 139 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [139:663:2579] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:22:38.305831Z node 139 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:22:38.305886Z node 139 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Stream" took 67us result status StatusSuccess 2025-06-30T09:22:38.305990Z node 139 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Stream" PathDescription { Self { Name: "Stream" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeCdcStream CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 CdcStreamVersion: 1 } ChildrenExist: true } Children { Name: "streamImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409548 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } CdcStreamDescription { Name: "Stream" Mode: ECdcStreamModeNewAndOldImages PathId { OwnerId: 72057594046678944 LocalId: 4 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatDynamoDBStreamsJson VirtualTimestamps: false AwsRegion: "ru-central1" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest |82.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::OverlayMapCrossReferences [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::ParallelCreateDrop [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:26.477053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:26.477077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:26.477084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:26.477090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:26.477107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:26.477114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:26.477126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:26.477143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:26.477288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:26.477369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:26.494730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:26.494773Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:26.494906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:26.497806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:26.498674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:26.498712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:26.500766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:26.500813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:26.500908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:26.500943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:26.501990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:26.502042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:26.502362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:26.502378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:26.502393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:26.502404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:26.502411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:26.502443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:26.504431Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:26.524202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:26.524285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.524363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:26.524374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:26.524424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:26.524440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:26.525325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:26.525388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:26.525453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.525467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:26.525473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:26.525479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:26.526053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.526069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:26.526086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:26.526457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.526466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:26.526471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:26.526477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:26.527046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:26.527529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:26.527577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:26.527829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:26.527860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... 04 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1004 at step: 5000004 2025-06-30T09:22:39.194560Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:39.194586Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1004 Coordinator: 72057594046316545 AckTo { RawX1: 126 RawX2: 210453399654 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:39.194596Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 1004:0 HandleReply TEvOperationPlan: step# 5000004 2025-06-30T09:22:39.194624Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:22:39.194642Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1004:0 128 -> 240 2025-06-30T09:22:39.194694Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:39.194705Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:22:39.194893Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:22:39.195261Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 FAKE_COORDINATOR: Erasing txId 1004 2025-06-30T09:22:39.195710Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:39.195722Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:39.195784Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:22:39.195815Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:39.195822Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [49:209:2209], at schemeshard: 72057594046678944, txId: 1004, path id: 1 2025-06-30T09:22:39.195830Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [49:209:2209], at schemeshard: 72057594046678944, txId: 1004, path id: 3 2025-06-30T09:22:39.195880Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:22:39.195889Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-30T09:22:39.195907Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:22:39.195913Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:22:39.195919Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:22:39.195924Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:22:39.195934Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: false 2025-06-30T09:22:39.195941Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:22:39.195947Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:0 2025-06-30T09:22:39.195953Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:0 2025-06-30T09:22:39.195971Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:22:39.195979Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1004, publications: 2, subscribers: 0 2025-06-30T09:22:39.195984Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-30T09:22:39.195989Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-30T09:22:39.196118Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:22:39.196133Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:22:39.196139Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:22:39.196145Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-30T09:22:39.196150Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:22:39.196197Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:39.196203Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:22:39.196217Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:39.196269Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:22:39.196282Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:22:39.196287Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:22:39.196294Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-30T09:22:39.196299Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:39.196308Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1004, subscribers: 0 2025-06-30T09:22:39.197089Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:22:39.197210Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:22:39.197226Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1004 2025-06-30T09:22:39.197291Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-30T09:22:39.197301Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-30T09:22:39.197385Z node 49 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-30T09:22:39.197408Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:22:39.197414Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [49:362:2351] TestWaitNotification: OK eventTxId 1004 2025-06-30T09:22:39.197508Z node 49 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DropMe" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:39.197545Z node 49 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DropMe" took 55us result status StatusPathDoesNotExist 2025-06-30T09:22:39.197593Z node 49 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/DropMe\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/DropMe" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::AuthEffectivePermissions [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00309c/r3tmp/tmpjSnXfw/pdisk_1.dat 2025-06-30T09:22:16.632163Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:16.663009Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:16.678454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:16.678485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 15393, node 1 2025-06-30T09:22:16.689064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:16.695600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:16.695615Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:16.695617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:16.695672Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32586 TClient is connected to server localhost:32586 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:16.768286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:16.985345Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670296433413915:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.985377Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.985394Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670296433413926:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.986534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:16.991662Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670296433413929:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:22:17.068104Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670300728381296:2737] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:17.578356Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:17.636457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:17.847064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:22:17.938649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:18.081918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:18.214428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:22:18.303227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:22:18.388777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:18.409799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:22:19.154508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715697:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:22:19.706003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715714:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:22:19.733053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715715:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:19.840621Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7521670309318318519:2857], owner: [1:7521670309318318516:2855], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 } 2025-06-30T09:22:19.840663Z node 1 :SYSTEM_VIEWS INFO: show_create.cpp:107: Scan prepared, actor: [1:7521670309318318519:2857] 2025-06-30T09:22:19.854522Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7521670309318318519:2857], row count: 1, finished: 1 2025-06-30T09:22:19.854578Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7521670309318318519:2857], owner: [1:7521670309318318516:2855], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 } 2025-06-30T09:22:21.024385Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670317692504221:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:21.024411Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00309c/r3tmp/tmpeD4Vhe/pdisk_1.dat 2025-06-30T09:22:21.056356Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14158, node 6 2025-06-30T09:22:21.076697Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:21.076714Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:21.076716Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize ... ] }] } 2025-06-30T09:22:39.282313Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670396889065000:2337], row count: 1, finished: 0 2025-06-30T09:22:39.282370Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:39.282485Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [72057594046644480:4:1] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:22:39.282496Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670396889065000:2337], row count: 1, finished: 0 2025-06-30T09:22:39.282659Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7521670396889065000:2337], owner: [41:7521670396889064996:2335], scan id: 0, sys view info: Type: EAuthEffectivePermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:39.283047Z node 41 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [41:7521670392594095998:2082], database# , query hash# 11342553055430868283, cpu time# 13274 2025-06-30T09:22:39.283221Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275359278, txId: 281474976715676] shutting down 2025-06-30T09:22:39.297735Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jz02ak1n2eapyfz61ds8j7q9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=MThkNzRiOTYtMzc0MDA1MTEtZDFmMjE1YzUtYmNhY2UxNGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:39.298284Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [41:7521670396889065053:2346], owner: [41:7521670396889065049:2344], scan id: 0, sys view info: Type: EAuthEffectivePermissions SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-30T09:22:39.298461Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [41:7521670396889065053:2346], schemeshard id: 72075186224037888, hive id: 72057594037968897, database: /Root/Tenant1, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 2], database node count: 2 2025-06-30T09:22:39.298473Z node 41 :SYSTEM_VIEWS DEBUG: auth_scan_base.h:100: ProceedToScan, tenant name: /Root/Tenant1 tenant owner: root@builtin subject sid: empty require admin access: 0 is admin: 1 2025-06-30T09:22:39.298494Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:39.298618Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1 TableId: [72075186224037888:1:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037889 Coordinators: 72075186224037890 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037891 Mediators: 72075186224037892 SchemeShard: 72075186224037888 SysViewProcessor: 72075186224037893 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 }] Groups: [] } Children [Dir2,Table1] }] } 2025-06-30T09:22:39.298646Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670396889065053:2346], row count: 1, finished: 0 2025-06-30T09:22:39.298672Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1/Dir2 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:39.298828Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1/Dir2 TableId: [72075186224037888:3:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037889 Coordinators: 72075186224037890 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037891 Mediators: 72075186224037892 SchemeShard: 72075186224037888 SysViewProcessor: 72075186224037893 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-30T09:22:39.298852Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670396889065053:2346], row count: 2, finished: 0 2025-06-30T09:22:39.298942Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1/Table1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-30T09:22:39.299285Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1/Table1 TableId: [72075186224037888:2:1] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037889 Coordinators: 72075186224037890 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037891 Mediators: 72075186224037892 SchemeShard: 72075186224037888 SysViewProcessor: 72075186224037893 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:22:39.299299Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670396889065053:2346], row count: 1, finished: 0 2025-06-30T09:22:39.299419Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7521670396889065053:2346], owner: [41:7521670396889065049:2344], scan id: 0, sys view info: Type: EAuthEffectivePermissions SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-30T09:22:39.299774Z node 41 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [41:7521670392594095998:2082], database# , query hash# 17325808444334437222, cpu time# 12189 2025-06-30T09:22:39.299933Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275359297, txId: 281474976715678] shutting down 2025-06-30T09:22:39.301611Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 45 2025-06-30T09:22:39.301533Z node 42 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:39.301811Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(45, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:39.302229Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 42 2025-06-30T09:22:39.302297Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:39.302315Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 44 2025-06-30T09:22:39.302390Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(44, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:39.302800Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 43 2025-06-30T09:22:39.302898Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(43, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:39.302265Z node 45 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:39.306945Z node 41 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[44:7521670390117982564:2107], Type=268959746 2025-06-30T09:22:39.306965Z node 41 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[44:7521670390117982564:2107], Type=268959746 2025-06-30T09:22:39.306970Z node 41 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[44:7521670390117982564:2107], Type=268959746 2025-06-30T09:22:39.306999Z node 41 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[43:7521670391843538604:2107], Type=268959746 2025-06-30T09:22:39.307007Z node 41 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[43:7521670391843538604:2107], Type=268959746 2025-06-30T09:22:39.307011Z node 41 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[43:7521670391843538604:2107], Type=268959746 2025-06-30T09:22:39.772937Z node 45 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:39.773348Z node 44 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:39.774055Z node 45 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:39.774546Z node 44 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:39.850784Z node 42 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:39.851878Z node 42 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> BsControllerConfig::SelectAllGroups [GOOD] >> TExternalDataSourceTestReboots::CreateExternalDataSourceWithReboots [GOOD] >> SystemView::GroupsFields [GOOD] >> SystemView::Describe |82.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::SelectAllGroups [GOOD] Test command err: 2025-06-30T09:22:39.103487Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:39.104271Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:39.104366Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:39.104645Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:39.104740Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:39.104767Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:39.104773Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:39.104833Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:39.105734Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:39.105756Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:39.105785Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:39.105797Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:39.105808Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:39.105817Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion |82.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |82.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |82.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |82.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |82.2%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |82.2%| [LD] {RESULT} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::CreateExternalDataSourceWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:28.546138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:28.546156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:28.546160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:28.546163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:28.546175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:28.546178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:28.546184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:28.546200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:28.546302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:28.546379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:28.559177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:28.559202Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:28.559338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:28.562594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:28.563603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:28.563646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:28.565325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:28.565371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:28.565486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:28.565528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:28.566475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:28.566517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:28.566789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:28.566800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:28.566810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:28.566818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:28.566824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:28.566855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:28.568225Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:28.588403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:28.588481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:28.588545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:28.588554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:28.588599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:28.588622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:28.589614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:28.589667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:28.589736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:28.589745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:28.589749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:28.589754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:28.590274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:28.590290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:28.590296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:28.591023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:28.591051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:28.591062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:28.591074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:28.591991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:28.592775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:28.592822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:28.593089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:28.593133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... TxId: 1003, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1003 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1003 at step: 5000004 2025-06-30T09:22:41.336353Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:41.336375Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1003 Coordinator: 72057594046316545 AckTo { RawX1: 126 RawX2: 210453399654 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:41.336383Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:34: [72057594046678944] TCreateExternalDataSource TPropose, operationId: 1003:0HandleReply TEvOperationPlan: step# 5000004 2025-06-30T09:22:41.336417Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 128 -> 240 2025-06-30T09:22:41.336470Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:22:41.336484Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 FAKE_COORDINATOR: Erasing txId 1003 2025-06-30T09:22:41.340098Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:41.340119Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:22:41.340169Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:22:41.340191Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:22:41.340213Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:41.340224Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [49:209:2209], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-30T09:22:41.340231Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [49:209:2209], at schemeshard: 72057594046678944, txId: 1003, path id: 4 2025-06-30T09:22:41.340235Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [49:209:2209], at schemeshard: 72057594046678944, txId: 1003, path id: 4 2025-06-30T09:22:41.340283Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:41.340292Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-30T09:22:41.340311Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:41.340316Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:41.340322Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:41.340325Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:41.340331Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: false 2025-06-30T09:22:41.340337Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:41.340343Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:22:41.340348Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:22:41.340372Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:22:41.340379Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 2, subscribers: 1 2025-06-30T09:22:41.340383Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 3], 5 2025-06-30T09:22:41.340387Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 4], 2 2025-06-30T09:22:41.340661Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:41.340682Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:41.340688Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:41.340694Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-06-30T09:22:41.340703Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:22:41.341261Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:41.341285Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:41.341291Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:41.341297Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 2 2025-06-30T09:22:41.341303Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:22:41.341321Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 1 2025-06-30T09:22:41.341327Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [49:307:2296] 2025-06-30T09:22:41.342006Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:41.342333Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:41.342372Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:22:41.342378Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [49:308:2297] TestWaitNotification: OK eventTxId 1002 TestWaitNotification: OK eventTxId 1003 2025-06-30T09:22:41.342496Z node 49 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirExternalDataSource/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:41.342560Z node 49 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirExternalDataSource/MyExternalDataSource" took 71us result status StatusSuccess 2025-06-30T09:22:41.342661Z node 49 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirExternalDataSource/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |82.3%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/test-results/unittest/{meta.json ... results_accumulator.log} |82.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |82.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |82.3%| [LD] {RESULT} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration >> BsControllerConfig::Basic [GOOD] >> BsControllerConfig::DeleteStoragePool >> TChargeBTreeIndex::FewNodes_Groups_History_Sticky [GOOD] >> TClockProCache::Lifecycle [GOOD] >> TClockProCache::EvictNext [GOOD] >> TClockProCache::Erase [GOOD] >> TClockProCache::Random [GOOD] >> NFwd_TFlatIndexCache::Skip_Wait [GOOD] >> NFwd_TFlatIndexCache::Trace [GOOD] >> NFwd_TFlatIndexCache::Slices [GOOD] >> NFwd_TLoadedPagesCircularBuffer::Basics [GOOD] >> NOther::Blocks [GOOD] >> NPage::Encoded [GOOD] >> NPage::ABI_002 >> BsControllerConfig::PDiskCreate [GOOD] >> NPage::ABI_002 [GOOD] >> NPage::GroupIdEncoding [GOOD] >> NPageCollection::Align [GOOD] >> NPageCollection::Meta [GOOD] >> NPageCollection::PagesToBlobsConverter [GOOD] >> NPageCollection::Grow [GOOD] >> NPageCollection::Groups [GOOD] >> NPageCollection::Chop [GOOD] >> NPageCollection::CookieAllocator [GOOD] >> NProto::LargeGlobId [GOOD] >> Redo::ABI_008 [GOOD] >> Self::Literals [GOOD] >> SystemView::ShowCreateTable [GOOD] >> SystemView::ShowCreateTableChangefeeds >> BsControllerConfig::AddDriveSerial [GOOD] >> BsControllerConfig::AddDriveSerialMassive >> BsControllerConfig::ReassignGroupDisk [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::PDiskCreate [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:199:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:199:2077] Leader for TabletID 72057594037932033 is [1:235:2079] sender: [1:236:2066] recipient: [1:199:2077] 2025-06-30T09:22:38.941768Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:38.942837Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:38.942927Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:38.943242Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:38.943343Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:38.943364Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:38.943371Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:38.943412Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:38.944550Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:38.944579Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:38.944615Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:38.944631Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:38.944645Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:38.944655Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:235:2079] sender: [1:258:2066] recipient: [1:20:2067] 2025-06-30T09:22:38.955076Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:22:38.955124Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:38.965377Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:38.965413Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:38.965424Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:38.965432Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:38.965453Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:38.965458Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:38.965463Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:38.965472Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:38.975721Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:38.975778Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:38.986069Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:38.986133Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:22:38.986418Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:22:38.986427Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:22:38.986465Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:22:38.986472Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:22:38.989270Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } } } Command { QueryBaseConfig { } } } 2025-06-30T09:22:38.989462Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2025-06-30T09:22:38.989471Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2025-06-30T09:22:38.989476Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2025-06-30T09:22:38.989481Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2025-06-30T09:22:38.989485Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2025-06-30T09:22:38.989491Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2025-06-30T09:22:38.989495Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2025-06-30T09:22:38.989501Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2025-06-30T09:22:38.989506Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2025-06-30T09:22:38.989511Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2025-06-30T09:22:38.989515Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2025-06-30T09:22:38.989523Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2025-06-30T09:22:38.989528Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2025-06-30T09:22:38.989536Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2025-06-30T09:22:38.989541Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2025-06-30T09:22:38.989546Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2025-06-30T09:22:38.989551Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2025-06-30T09:22:38.989556Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2025-06-30T09:22:38.989560Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2025-06-30T09:22:38.989565Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2025-06-30T09:22:38.989571Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1002 Path# /dev/disk3 2025-06-30T09:22:38.989575Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1000 Path# /dev/disk1 2025-06-30T09:22:38.989580Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1001 Path# /dev/disk2 2025-06-30T09:22:38.989585Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1002 Path# /dev/disk3 2025-06-30T09:22:38.989589Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1000 Path# /dev/disk1 2025-06-30T09:22:38.989594Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1001 Path# /dev/disk2 2025-06-30T09:22:38.989598Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1002 Path# /dev/disk3 2025-06-30T09:22:38.989603Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1000 Path# /dev/disk1 2025-06-30T09:22:38.989614Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1001 Path# /dev/disk2 2025-06-30T09:22:38.989619Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1002 Path# /dev/disk3 Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:204:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:204:2077] Leader for TabletID 72057594037932033 is [11:233:2079] sender: [11:236:2066] recipient: [11:204:2077] 2025-06-30T09:22:40.948702Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:40.948984Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:40.949048Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:40.949173Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:40.949386Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:40.949410Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:40.949415Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:40.949464Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:40.950263Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:40.950299Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:40.950324Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:40.950339Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:40.950350Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:40.950357Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:233:2079] sender: [11:258:2066] recipient: [11:20:2067] 2025-06-30T09:22:40.960854Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:22:40.960934Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:40.971323Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:40.971374Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:40.971389Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:40.971399Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:40.971426Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:40.971432Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:40.971437Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:40.971445Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:40.981821Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:40.981877Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:40.992314Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:40.992394Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:22:40.992710Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:22:40.992720Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:22:40.992774Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:22:40.992785Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:22:40.993105Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 2 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 2 } } } Command { QueryBaseConfig { } } } 2025-06-30T09:22:40.993237Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1000 Path# /dev/disk1 2025-06-30T09:22:40.993247Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1001 Path# /dev/disk2 2025-06-30T09:22:40.993253Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1002 Path# /dev/disk3 2025-06-30T09:22:40.993259Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1000 Path# /dev/disk1 2025-06-30T09:22:40.993265Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1001 Path# /dev/disk2 2025-06-30T09:22:40.993274Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1002 Path# /dev/disk3 2025-06-30T09:22:40.993280Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1000 Path# /dev/disk1 2025-06-30T09:22:40.993286Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1001 Path# /dev/disk2 2025-06-30T09:22:40.993293Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1002 Path# /dev/disk3 2025-06-30T09:22:40.993299Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1000 Path# /dev/disk1 2025-06-30T09:22:40.993305Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1001 Path# /dev/disk2 2025-06-30T09:22:40.993315Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1002 Path# /dev/disk3 2025-06-30T09:22:40.993321Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1000 Path# /dev/disk1 2025-06-30T09:22:40.993326Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1001 Path# /dev/disk2 2025-06-30T09:22:40.993332Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1002 Path# /dev/disk3 2025-06-30T09:22:40.993338Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1000 Path# /dev/disk1 2025-06-30T09:22:40.993343Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1001 Path# /dev/disk2 2025-06-30T09:22:40.993349Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1002 Path# /dev/disk3 2025-06-30T09:22:40.993354Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1000 Path# /dev/disk1 2025-06-30T09:22:40.993362Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1001 Path# /dev/disk2 2025-06-30T09:22:40.993368Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1002 Path# /dev/disk3 2025-06-30T09:22:40.993373Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1000 Path# /dev/disk1 2025-06-30T09:22:40.993379Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1001 Path# /dev/disk2 2025-06-30T09:22:40.993385Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1002 Path# /dev/disk3 2025-06-30T09:22:40.993396Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1000 Path# /dev/disk1 2025-06-30T09:22:40.993403Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1001 Path# /dev/disk2 2025-06-30T09:22:40.993409Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1002 Path# /dev/disk3 2025-06-30T09:22:40.993415Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1000 Path# /dev/disk1 2025-06-30T09:22:40.993421Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1001 Path# /dev/disk2 2025-06-30T09:22:40.993426Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1002 Path# /dev/disk3 >> SystemView::TopPartitionsByCpuRanges [GOOD] >> SystemView::TopPartitionsByTliFields >> SystemView::TopPartitionsByCpuFollowers [GOOD] >> SystemView::SystemViewFailOps ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::ReassignGroupDisk [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:296:2068] recipient: [1:275:2079] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:296:2068] recipient: [1:275:2079] Leader for TabletID 72057594037932033 is [1:304:2081] sender: [1:305:2068] recipient: [1:275:2079] 2025-06-30T09:22:38.164723Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:38.165675Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:38.165779Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:38.166083Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:38.166170Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:38.166208Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:38.166215Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:38.166258Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:38.167457Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:38.167491Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:38.167530Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:38.167552Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:38.167567Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:38.167579Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:304:2081] sender: [1:326:2068] recipient: [1:22:2069] 2025-06-30T09:22:38.178013Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:22:38.178053Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:38.188326Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:38.188371Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:38.188385Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:38.188398Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:38.188423Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:38.188433Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:38.188441Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:38.188453Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:38.198756Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:38.198809Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:38.209150Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:38.209198Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:22:38.209507Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:22:38.209516Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:22:38.209559Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:22:38.209569Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:22:38.212493Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk" } } } Command { DefineBox { BoxId: 1 Name: "box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 8 PDiskFilter { Property { Type: ROT } } } } } 2025-06-30T09:22:38.212644Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk 2025-06-30T09:22:38.212652Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk 2025-06-30T09:22:38.212657Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk 2025-06-30T09:22:38.212662Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk 2025-06-30T09:22:38.212668Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk 2025-06-30T09:22:38.212673Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk 2025-06-30T09:22:38.212679Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk 2025-06-30T09:22:38.212703Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1000 Path# /dev/disk 2025-06-30T09:22:38.212709Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1000 Path# /dev/disk 2025-06-30T09:22:38.212714Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1000 Path# /dev/disk 2025-06-30T09:22:38.212721Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1000 Path# /dev/disk 2025-06-30T09:22:38.212730Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1000 Path# /dev/disk 2025-06-30T09:22:38.225795Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 Response# Status { Success: true } Status { Success: true } Status { Success: true } Success: true ConfigTxSeqNo: 1 2025-06-30T09:22:38.226305Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { UpdateDriveStatus { HostKey { NodeId: 1 } Path: "/dev/disk" Status: INACTIVE } } } Response# Status { Success: true } Success: true ConfigTxSeqNo: 2 Leader for TabletID 72057594037932033 is [0:0:0] sender: [13:291:2068] recipient: [13:266:2079] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [13:291:2068] recipient: [13:266:2079] Leader for TabletID 72057594037932033 is [13:301:2081] sender: [13:303:2068] recipient: [13:266:2079] 2025-06-30T09:22:40.564155Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:40.564396Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:40.564457Z node 13 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:40.564666Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:40.564761Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:40.564805Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:40.564811Z node 13 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:40.564853Z node 13 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:40.565864Z node 13 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:40.565895Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:40.565921Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:40.565938Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:40.565953Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:40.565962Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [13:301:2081] sender: [13:326:2068] recipient: [13:22:2069] 2025-06-30T09:22:40.576381Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:22:40.576430Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:40.586817Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:40.586871Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:40.586896Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:40.586909Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:40.586936Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:40.586945Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:40.586951Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:40.586962Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:40.597262Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:40.597301Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:40.608825Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:40.608872Z node 13 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:22:40.609048Z node 13 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:22:40.609056Z node 13 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:22:40.609095Z node 13 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:22:40.609103Z node 13 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:22:40.609350Z node 13 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 2 Drive { Path: "/dev/disk" } } } Command { DefineBox { BoxId: 1 Name: "box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 2 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 8 PDiskFilter { Property { Type: ROT } } } } } 2025-06-30T09:22:40.609459Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1000 Path# /dev/disk 2025-06-30T09:22:40.609466Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1000 Path# /dev/disk 2025-06-30T09:22:40.609471Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1000 Path# /dev/disk 2025-06-30T09:22:40.609475Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1000 Path# /dev/disk 2025-06-30T09:22:40.609480Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1000 Path# /dev/disk 2025-06-30T09:22:40.609485Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1000 Path# /dev/disk 2025-06-30T09:22:40.609489Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1000 Path# /dev/disk 2025-06-30T09:22:40.609494Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1000 Path# /dev/disk 2025-06-30T09:22:40.609498Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 21:1000 Path# /dev/disk 2025-06-30T09:22:40.609503Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 22:1000 Path# /dev/disk 2025-06-30T09:22:40.609511Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 23:1000 Path# /dev/disk 2025-06-30T09:22:40.609516Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 24:1000 Path# /dev/disk 2025-06-30T09:22:40.621801Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 Response# Status { Success: true } Status { Success: true } Status { Success: true } Success: true ConfigTxSeqNo: 1 2025-06-30T09:22:40.622139Z node 13 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { UpdateDriveStatus { HostKey { NodeId: 1 } Path: "/dev/disk" Status: INACTIVE } } } Response# Status { ErrorDescription: "Host not found NodeId# 1 HostKey# NodeId: 1\n incorrect" FailReason: kHostNotFound FailParam { NodeId: 1 } } ErrorDescription: "Host not found NodeId# 1 HostKey# NodeId: 1\n incorrect" >> TCdcStreamWithRebootsTests::CreateStreamOnIndexTableExplicitReady[TabletReboots] [GOOD] >> SystemView::Describe [GOOD] >> SystemView::DescribeSystemFolder >> VectorIndexBuildTest::Metering_ServerLessDB-smallScanBuffer-true [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> Self::Literals [GOOD] Test command err: + BTreeIndex{PageId: 0 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385, 13 rev 1, 683b} | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | > {0, a, false, 0} | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | > {1, b, true, 10} | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | > {2, c, false, 20} | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | > {3, d, true, 30} | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | > {4, e, false, 40} | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | > {5, f, true, 50} | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | > {6, g, false, 60} | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | > {7, h, true, 70} | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | > {8, i, false, 80} | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | > {9, j, true, 90} | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 + BTreeIndex{PageId: 9 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 116b} | + BTreeIndex{PageId: 5 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306, 13 rev 1, 179b} | | + BTreeIndex{PageId: 0 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93, 13 rev 1, 179b} | | | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | | | > {0, a, false, 0} | | | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | | | > {1, b, true, 10} | | | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | | > {2, c, false, 20} | | + BTreeIndex{PageId: 1 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195, 13 rev 1, 179b} | | | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | | | > {3, d, true, 30} | | | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | | | > {4, e, false, 40} | | | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | | > {5, f, true, 50} | | + BTreeIndex{PageId: 2 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306, 13 rev 1, 179b} | | | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | | | > {6, g, false, 60} | | | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | | | > {7, h, true, 70} | | | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | > {8, i, false, 80} | + BTreeIndex{PageId: 8 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 242b} | | + BTreeIndex{PageId: 3 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426, 13 rev 1, 179b} | | | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | | | > {9, j, true, 90} | | | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 | | | > {10, k, false, 100} | | | PageId: 10011 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426 | | > {11, l, true, 110} | | + BTreeIndex{PageId: 4 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555, 13 rev 1, 179b} | | | PageId: 10012 RowCount: 1378 DataSize: 13078 GroupDataSize: 26078 ErasedRowCount: 468 | | | > {12, m, false, 120} | | | PageId: 10013 RowCount: 1491 DataSize: 14091 GroupDataSize: 28091 ErasedRowCount: 511 | | | > {13, n, true, 130} | | | PageId: 10014 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555 | | > {14, o, false, 140} | | + BTreeIndex{PageId: 6 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693, 13 rev 1, 179b} | | | PageId: 10015 RowCount: 1720 DataSize: 16120 GroupDataSize: 32120 ErasedRowCount: 600 | | | > {15, p, true, 150} | | | PageId: 10016 RowCount: 1836 DataSize: 17136 GroupDataSize: 34136 ErasedRowCount: 646 | | | > {16, q, false, 160} | | | PageId: 10017 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693 | | > {17, r, true, 170} | | + BTreeIndex{PageId: 7 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 179b} | | | PageId: 10018 RowCount: 2071 DataSize: 19171 GroupDataSize: 38171 ErasedRowCount: 741 | | | > {18, s, false, 180} | | | PageId: 10019 RowCount: 2190 DataSize: 20190 GroupDataSize: 40190 ErasedRowCount: 790 | | | > {19, t, true, 190} | | | PageId: 10020 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840 + BTreeIndex{PageId: 15 RowCount: 15150 DataSize: 106050 GroupDataSize: 207050 ErasedRowCount: 8080, 13 rev 1, 174b} | + BTreeIndex{PageId: 12 RowCount: 9078 DataSize: 70278 GroupDataSize: 138278 ErasedRowCount: 4318, 13 rev 1, 690b} | | + BTreeIndex{PageId: 0 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426, 13 rev 1, 702b} | | | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | | | > {0, x, NULL, NULL} | | | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | | | > {1, xx, NULL, NULL} | | | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | | | > {2, xxx, NULL, NULL} | | | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | | | > {3, xxxx, NULL, NULL} | | | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | | | > {4, xxxxx, NULL, NULL} | | | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | | | > {5, xxxxxx, NULL, NULL} | | | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | | | > {6, xxxxxxx, NULL, NULL} | | | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | | | > {7, xxxxxxxx, NULL, NULL} | | | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | | | > {8, xxxxxxxxx, NULL, NULL} | | | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | | | > {9, xxxxxxxxxx, NULL, NULL} | | | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 | | | > {10, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10011 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426 | | > {11, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 1 RowCount: 2431 DataSize: 22231 GroupDataSize: 44231 ErasedRowCount: 891, 13 rev 1, 683b} | | | PageId: 10012 RowCount: 1378 DataSize: 13078 GroupDataSize: 26078 ErasedRowCount: 468 | | | > {12, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10013 RowCount: 1491 DataSize: 14091 GroupDataSize: 28091 ErasedRowCount: 511 | | | > {13, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10014 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555 | | | > {14, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10015 RowCount: 1720 DataSize: 16120 GroupDataSize: 32120 ErasedRowCount: 600 | | | > {15, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10016 RowCount: 1836 DataSize: 17136 GroupDataSize: 34136 ErasedRowCount: 646 | | | > {16, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10017 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693 | | | > {17, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10018 RowCount: 2071 DataSize: 19171 GroupDataSize: 38171 ErasedRowCount: 741 | | | > {18, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10019 RowCount: 2190 DataSize: 20190 GroupDataSize: 40190 ErasedRowCount: 790 | | | > {19, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10020 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840 | | | > {20, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10021 RowCount: 2431 DataSize: 22231 GroupDataSize: 44231 ErasedRowCount: 891 | | > {21, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 2 RowCount: 3565 DataSize: 31465 GroupDataSize: 62465 ErasedRowCount: 1395, 13 rev 1, 689b} | | | PageId: 10022 RowCount: 2553 DataSize: 23253 GroupDataSize: 46253 ErasedRowCount: 943 | | | > {22, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10023 RowCount: 2676 DataSize: 24276 GroupDataSize: 48276 ErasedRowCount: 996 | | | > {23, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10024 RowCount: 2800 DataSize: 25300 GroupDataSize: 50300 ErasedRowCount: 1050 | | | > {24, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10025 RowCount: 2925 DataSize: 26325 GroupDataSize: 52325 ErasedRowCount: 1105 | | | > {25, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10026 RowCount: 3051 DataSize: 27351 GroupDataSize: 54351 ErasedRowCount: 1161 | | | > {26, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10027 RowCount: 3178 DataSize: 28378 GroupDataSize: 56378 ErasedRowCount: 1218 | | | > {27, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10028 RowCount: 3306 DataSize: 29406 GroupDataSize: 58406 ErasedRowCount: 1276 | | | > {28, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10029 RowCount: 3435 DataSize: 30435 GroupDataSize: 60435 ErasedRowCount: 1335 | | | > {29, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10030 RowCount: 3565 DataSize: 31465 GroupDataSize: 62465 ErasedRowCount: 1395 | | > {30, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 3 RowCount: 4641 DataSize: 39741 GroupDataSize: 78741 ErasedRowCount: 1911, 13 rev 1, 669b} | | | PageId: 10031 RowCount: 3696 DataSize: 32496 GroupDataSize: 64496 ErasedRowCount: 1456 | | | > {31, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10032 RowCount: 3828 DataSize: 33528 GroupDataSize: 66528 ErasedRowCount: 1518 | | | > {32, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10033 RowCount: 3961 DataSize: 34561 GroupDataSize: 68561 ErasedRowCount: 1581 | | | > {33, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10034 RowCount: 4095 DataSize: 35595 GroupDataSize: 70595 ErasedRowCount: 1645 | | | > {34, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10035 RowCount: 4230 DataSize: 36630 GroupDataSize: 72630 ErasedRowCount: 1710 | | | > {35, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10036 RowCount: 4366 DataSize: 37666 GroupDataSize: 74666 ErasedRowCount: 1776 | | | > {36, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10037 RowCount: 4503 DataSize: 38703 GroupDataSize: 76703 ErasedRowCount: 1843 | | | > {37, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10038 RowCount: 4641 DataSize: 39741 GroupDataSize: 78741 ErasedRowCount: 1911 | | > {38, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 4 RowCount: 5781 DataSize: 48081 GroupDataSize: 95081 ErasedRowCount: 2491, 13 rev 1, 725b} | | | PageId: 10039 RowCount: 4780 DataSize: 40780 GroupDataSize: 80780 ErasedRowCount: 1980 | | | > {39, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10040 RowCount: 4920 DataSize: 41820 GroupDataSize: 82820 ErasedRowCount: 2050 | | | > {40, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10041 RowCount: 5061 DataSize: 42861 GroupDataSize: 84861 ErasedRowCount: 2121 | | | > {41, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10042 RowCount: 5203 DataSize: 43903 GroupDataSize: 86903 ErasedRowCount: 2193 | | | > {42, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10043 RowCount: 5346 DataSize: 44946 GroupDataSize: 88946 ErasedRowCount: 2266 | | | > {43, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10044 RowCount: 5490 DataSize: 45990 GroupDataSize: 90990 ErasedRowCount: 2340 | | | > {44, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10045 RowCount: 5635 DataSize: 47035 GroupDataSize: 93035 ErasedRowCount: 2415 | | | > {45, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10046 RowCount: 5781 DataSize: 48081 GroupDataSize: 95081 ErasedRowCount: 2491 | | > {46, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 5 RowCount: 6831 DataSize: 55431 GroupDataSize: 109431 ErasedRowCount: 3051, 13 ... 7 RowCount: 34 DataSize: 1202 GroupDataSize: 7632 ErasedRowCount: 0 | | | > {4, 10} | | | PageId: 70 RowCount: 36 DataSize: 1284 GroupDataSize: 8163 ErasedRowCount: 0 | | | > {5, 3} | | | PageId: 82 RowCount: 38 DataSize: 1350 GroupDataSize: 8602 ErasedRowCount: 0 | | | > {5, 6} | | | PageId: 87 RowCount: 40 DataSize: 1416 GroupDataSize: 9358 ErasedRowCount: 0 + Rows{0} Label{04 rev 1, 66b}, [0, +2)row | ERowOp 1: {0, 1} | ERowOp 1: {0, 3} + Rows{2} Label{24 rev 1, 66b}, [2, +2)row | ERowOp 1: {0, 4} | ERowOp 1: {0, 6} + Rows{4} Label{44 rev 1, 82b}, [4, +2)row | ERowOp 1: {0, 7} | ERowOp 1: {0, 8} + Rows{8} Label{84 rev 1, 66b}, [6, +2)row | ERowOp 1: {0, 10} | ERowOp 1: {1, 1} + Rows{11} Label{114 rev 1, 66b}, [8, +2)row | ERowOp 1: {1, 3} | ERowOp 1: {1, 4} + Rows{14} Label{144 rev 1, 82b}, [10, +2)row | ERowOp 1: {1, 6} | ERowOp 1: {1, 7} + Rows{20} Label{204 rev 1, 66b}, [12, +2)row | ERowOp 1: {1, 8} | ERowOp 1: {1, 10} + Rows{23} Label{234 rev 1, 66b}, [14, +2)row | ERowOp 1: {2, 1} | ERowOp 1: {2, 3} + Rows{26} Label{264 rev 1, 82b}, [16, +2)row | ERowOp 1: {2, 4} | ERowOp 1: {2, 6} + Rows{36} Label{364 rev 1, 66b}, [18, +2)row | ERowOp 1: {2, 7} | ERowOp 1: {2, 8} + Rows{39} Label{394 rev 1, 66b}, [20, +2)row | ERowOp 1: {2, 10} | ERowOp 1: {3, 1} + Rows{42} Label{424 rev 1, 82b}, [22, +2)row | ERowOp 1: {3, 3} | ERowOp 1: {3, 4} + Rows{48} Label{484 rev 1, 66b}, [24, +2)row | ERowOp 1: {3, 6} | ERowOp 1: {3, 7} + Rows{53} Label{534 rev 1, 66b}, [26, +2)row | ERowOp 1: {3, 8} | ERowOp 1: {3, 10} + Rows{58} Label{584 rev 1, 82b}, [28, +2)row | ERowOp 1: {4, 1} | ERowOp 1: {4, 3} + Rows{64} Label{644 rev 1, 66b}, [30, +2)row | ERowOp 1: {4, 4} | ERowOp 1: {4, 6} + Rows{67} Label{674 rev 1, 66b}, [32, +2)row | ERowOp 1: {4, 7} | ERowOp 1: {4, 8} + Rows{70} Label{704 rev 1, 82b}, [34, +2)row | ERowOp 1: {4, 10} | ERowOp 1: {5, 1} + Rows{82} Label{824 rev 1, 66b}, [36, +2)row | ERowOp 1: {5, 3} | ERowOp 1: {5, 4} + Rows{87} Label{874 rev 1, 66b}, [38, +2)row | ERowOp 1: {5, 6} | ERowOp 1: {5, 7} Slices{ [0, 39] } 0.29156 Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::Metering_ServerLessDB-smallScanBuffer-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:23.083381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:23.083408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:23.083415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:23.083421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:23.083436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:23.083441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:23.083451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:23.083466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:23.083583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:23.083658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:23.105005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:23.105032Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:23.113442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:23.113509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:23.113557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:23.128374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:23.128541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:23.128686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:23.128789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:23.132624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:23.132706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:23.133115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:23.133129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:23.133155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:23.133165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:23.133171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:23.133194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.135493Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:23.164038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:23.164135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.164218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:23.164227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:23.164279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:23.164297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:23.166957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:23.167009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:23.167069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.167083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:23.167089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:23.167096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:23.169053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.169078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:23.169087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:23.169785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.169803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:23.169811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:23.169830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:23.170688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:23.171250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:23.171295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:23.171520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:23.171555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:23.171572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:23.171651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:23.171661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:23.171697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:23.171710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:23.173428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:23.173443Z node 1 :FLAT_TX_SCHEMESHARD ... ePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "embedding" Type: "String" TypeId: 4097 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "prefix" Type: "Uint32" TypeId: 2 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "String" TypeId: 4097 Id: 4 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "index1" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "embedding" SchemaVersion: 2 PathOwnerId: 72075186233409549 DataSize: 0 IndexImplTableDescriptions { } IndexImplTableDescriptions { } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_UINT8 vector_dimension: 4 } clusters: 4 levels: 2 } } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 3 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409551 SchemeShard: 72075186233409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 13 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SharedHive: 72057594037968897 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409549, at schemeshard: 72075186233409549 2025-06-30T09:22:43.351121Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLessDB/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72075186233409549 2025-06-30T09:22:43.351165Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409549 describe path "/MyRoot/ServerLessDB/Table/index1" took 46us result status StatusSuccess 2025-06-30T09:22:43.351425Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLessDB/Table/index1" PathDescription { Self { Name: "index1" PathId: 3 SchemeshardId: 72075186233409549 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976725758 CreateStep: 300 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72075186233409549 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976725758 CreateStep: 300 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72075186233409549 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976725758 CreateStep: 300 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409551 SchemeShard: 72075186233409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 13 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SharedHive: 72057594037968897 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "index1" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "embedding" SchemaVersion: 2 PathOwnerId: 72075186233409549 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "__ydb_id" Type: "Uint64" TypeId: 4 Id: 2 NotNull: true IsBuildInProgress: false } Columns { Name: "__ydb_centroid" Type: "String" TypeId: 4097 Id: 3 NotNull: true IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "__ydb_id" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } IndexImplTableDescriptions { Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_UINT8 vector_dimension: 4 } clusters: 4 levels: 2 } } } } PathId: 3 PathOwnerId: 72075186233409549, at schemeshard: 72075186233409549 ... unblocking NKikimr::NMetering::TEvMetering::TEvWriteMeteringJson from FLAT_SCHEMESHARD_ACTOR to TFakeMetering >> TSchemeShardCheckProposeSize::CopyTable >> TSchemeShardTest::CreateIndexedTable >> TSchemeShardTest::Boot >> TSchemeShardTest::DropIndexedTableAndForceDropSimultaneously >> TSchemeShardTest::CreateTable >> TSchemeShardTest::RmDirTwice >> TSchemeShardTest::PathName >> TSchemeShardTest::InitRootAgain >> TSchemeShardTest::CacheEffectiveACL [GOOD] >> TSchemeShardTest::ConsistentCopyTable >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-false >> KqpSplit::AfterResolve+Ascending >> SystemView::SystemViewFailOps [GOOD] >> SystemView::TabletsFields >> TSchemeShardTest::PathName [GOOD] >> TSchemeShardTest::PathName_SetLocale >> SystemView::ShowCreateTableTtlSettings [GOOD] >> SystemView::ShowCreateTableTemporary >> TSchemeShardTest::InitRootAgain [GOOD] >> TSchemeShardTest::InitRootWithOwner >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoWithError [GOOD] >> TSchemeShardTest::Boot [GOOD] >> TSchemeShardTest::AlterTableKeyColumns >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-false [GOOD] >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-true >> TSchemeShardTest::RmDirTwice [GOOD] >> TSchemeShardTest::TopicMeteringMode >> TDataShardLocksTest::MvccTestWriteBreaksLocks [GOOD] >> TDataShardLocksTest::Points_ManyTx >> TSchemeShardTest::DropIndexedTableAndForceDropSimultaneously [GOOD] >> TSchemeShardTest::DependentOps >> TSchemeShardTest::CreateTable [GOOD] >> TSchemeShardTest::CreateTableWithDate >> TExternalDataSourceTestReboots::DropExternalDataSourceWithReboots [GOOD] >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-true [GOOD] >> TSchemeShardTest::AlterTableAndConcurrentSplit >> TSchemeShardTest::TopicMeteringMode [GOOD] >> TSchemeShardTest::Restart >> TDataShardLocksTest::Points_ManyTx [GOOD] >> TDataShardLocksTest::Points_ManyTx_BreakAll >> TSchemeShardTest::PathName_SetLocale [GOOD] >> TSchemeShardTest::NameFormat ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream_reboots/unittest >> TCdcStreamWithRebootsTests::CreateStreamOnIndexTableExplicitReady[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:46.763723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:46.763750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.763756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:46.763770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:46.763776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:46.763788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:46.763798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:46.763820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:46.763954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:46.764054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:46.797554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:46.797580Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:46.797727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.808382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:46.809634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:46.809693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:46.812885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:46.812955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:46.813110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.813181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:46.814695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.814786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:46.815090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:46.815106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:46.815118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:46.815127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:46.815134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:46.815164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:46.817219Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.862941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:46.863018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.863088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:46.863099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:46.863181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:46.863198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:46.864183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.864233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:46.864301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.864314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:46.864321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:46.864328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:46.864993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.865013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:46.865022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:46.865650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.865668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.865676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:46.865685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:46.866576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:46.867533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:46.867583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:46.867810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.867848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 4294969455 } } Step: 5000001 Media ... FO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:43.311692Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 6] 2025-06-30T09:22:43.311773Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-30T09:22:43.311829Z node 142 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:43.311836Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [142:212:2212], at schemeshard: 72057594046678944, txId: 1003, path id: 6 2025-06-30T09:22:43.311843Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [142:212:2212], at schemeshard: 72057594046678944, txId: 1003, path id: 7 2025-06-30T09:22:43.311962Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:3, at schemeshard: 72057594046678944 2025-06-30T09:22:43.311975Z node 142 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:3 ProgressState 2025-06-30T09:22:43.311994Z node 142 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:3 progress is 4/4 2025-06-30T09:22:43.311999Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 4/4 2025-06-30T09:22:43.312005Z node 142 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:3 progress is 4/4 2025-06-30T09:22:43.312009Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 4/4 2025-06-30T09:22:43.312014Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 4/4, is published: false 2025-06-30T09:22:43.312022Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 4/4 2025-06-30T09:22:43.312029Z node 142 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:22:43.312036Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:22:43.312054Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:22:43.312060Z node 142 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:1 2025-06-30T09:22:43.312064Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:1 2025-06-30T09:22:43.312069Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 4 2025-06-30T09:22:43.312074Z node 142 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:2 2025-06-30T09:22:43.312078Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:2 2025-06-30T09:22:43.312096Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-30T09:22:43.312101Z node 142 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:3 2025-06-30T09:22:43.312105Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:3 2025-06-30T09:22:43.312116Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 5 2025-06-30T09:22:43.312126Z node 142 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1003, publications: 2, subscribers: 0 2025-06-30T09:22:43.312130Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 6], 4 2025-06-30T09:22:43.312134Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 7], 2 2025-06-30T09:22:43.312493Z node 142 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:43.312511Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:43.312516Z node 142 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:43.312522Z node 142 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 4 2025-06-30T09:22:43.312528Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-30T09:22:43.313032Z node 142 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:43.313066Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:43.313075Z node 142 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:43.313084Z node 142 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 2 2025-06-30T09:22:43.313092Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 4 2025-06-30T09:22:43.313118Z node 142 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:22:43.314612Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:43.315516Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:22:43.317484Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:22:43.317500Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:22:43.317612Z node 142 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:22:43.317639Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:22:43.317646Z node 142 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [142:743:2648] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:22:43.317777Z node 142 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:22:43.317866Z node 142 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable/Stream" took 114us result status StatusSuccess 2025-06-30T09:22:43.318012Z node 142 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index/indexImplTable/Stream" PathDescription { Self { Name: "Stream" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeCdcStream CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 CdcStreamVersion: 1 } ChildrenExist: true } Children { Name: "streamImpl" PathId: 7 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409549 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } CdcStreamDescription { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 6 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |82.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/fqrun/fqrun |82.3%| [LD] {RESULT} $(B)/ydb/tests/tools/fqrun/fqrun |82.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/fqrun/fqrun |82.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write >> TSchemeShardTest::InitRootWithOwner [GOOD] >> TSchemeShardTest::MkRmDir >> TDataShardLocksTest::Points_ManyTx_BreakAll [GOOD] >> TDataShardLocksTest::Points_ManyTx_BreakHalf_RemoveHalf |82.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |82.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoWithError [GOOD] Test command err: 2025-06-30T09:22:01.531996Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670234005420400:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:01.532030Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046b6/r3tmp/tmp57nBv8/pdisk_1.dat 2025-06-30T09:22:01.615120Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2220, node 1 2025-06-30T09:22:01.639012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:01.639047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:01.641340Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:01.641344Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:01.641346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:01.641393Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:01.641927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:01.717154Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:01.718182Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:01.718196Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:01.718391Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:20351, port: 20351 2025-06-30T09:22:01.718802Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:01.736978Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:01.783862Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****KPrA (07ACA09B) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046b6/r3tmp/tmpTZ3JLz/pdisk_1.dat TServer::EnableGrpc on GrpcPort 1038, node 2 2025-06-30T09:22:02.145577Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:02.145600Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670234701075928:2080] 1751275322112669 != 1751275322112672 2025-06-30T09:22:02.145674Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.145677Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.145679Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.145731Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.219283Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.219319Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.220281Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:02.310830Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:02.310926Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:02.310931Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:02.311153Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:16933, port: 16933 2025-06-30T09:22:02.311187Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:02.335578Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:02.379552Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:02.379967Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:16933 return no entries 2025-06-30T09:22:02.380199Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****pJhw (BE440310) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:16933 return no entries)' 2025-06-30T09:22:02.641626Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670234845117745:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:02.641655Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046b6/r3tmp/tmp4nrTwm/pdisk_1.dat TServer::EnableGrpc on GrpcPort 27112, node 3 2025-06-30T09:22:02.663408Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:02.673213Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:02.673232Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:02.673235Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:02.673284Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:02.748131Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:02.748168Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:02.749527Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:03.001971Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:03.003970Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:03.003984Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:03.004176Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:22982, port: 22982 2025-06-30T09:22:03.004196Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:03.009864Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:03.054989Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:03.102981Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:03.103156Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:03.103180Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:03.150940Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:03.198887Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:03.203239Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****n4cQ (79EB9E96) () has now valid token of ldapuser@ldap 2025-06-30T09:22:03.644367Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:07.643826Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7521670234845117745:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:07.644579Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:22:07.655945Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****n4cQ (79EB9E96) 2025-06-30T09:22:07.656023Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:22982, port: 22982 2025-06-30T09:22:07.656051Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:07.686685Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:07.735605Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:07.778948Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:07.779109Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:07.779120Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:07.825882Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups ... : ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:29.123036Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:29.123279Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:9726 return no entries 2025-06-30T09:22:29.123507Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****K4uw (2B8D6F42) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:9726 return no entries)' 2025-06-30T09:22:32.058410Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****K4uw (2B8D6F42) 2025-06-30T09:22:34.603596Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521670373340977180:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:34.603616Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0046b6/r3tmp/tmpgxyvZ8/pdisk_1.dat 2025-06-30T09:22:34.619788Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1972, node 6 2025-06-30T09:22:34.630663Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:34.630676Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:34.630679Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:34.630754Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:34.702952Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-30T09:22:34.704976Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:34.704997Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:34.705221Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:29624, port: 29624 2025-06-30T09:22:34.705247Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:34.709297Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:34.709343Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:34.710310Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:34.711901Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:34.755000Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:34.755317Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:29624. Server is busy 2025-06-30T09:22:34.755589Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****2rcw (4EE514E5) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:29624. Server is busy)' 2025-06-30T09:22:34.755648Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:34.755652Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:34.755814Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:29624, port: 29624 2025-06-30T09:22:34.755835Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:34.761076Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:34.803056Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:34.806974Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:29624. Server is busy 2025-06-30T09:22:34.807282Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****2rcw (4EE514E5) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:29624. Server is busy)' 2025-06-30T09:22:35.605206Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:37.605470Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****2rcw (4EE514E5) 2025-06-30T09:22:37.605564Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:37.605577Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:37.605860Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:29624, port: 29624 2025-06-30T09:22:37.605889Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:37.618682Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:37.667018Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:37.667258Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:29624. Server is busy 2025-06-30T09:22:37.667479Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****2rcw (4EE514E5) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:29624. Server is busy)' 2025-06-30T09:22:39.606379Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7521670373340977180:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:39.606423Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:22:40.606763Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****2rcw (4EE514E5) 2025-06-30T09:22:40.606846Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-30T09:22:40.606859Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-30T09:22:40.607096Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:29624, port: 29624 2025-06-30T09:22:40.607131Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:40.642299Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:40.687058Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:40.731661Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:40.731940Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:40.731977Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:40.778957Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:40.826979Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:40.827428Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****2rcw (4EE514E5) () has now valid token of ldapuser@ldap 2025-06-30T09:22:44.610870Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****2rcw (4EE514E5) 2025-06-30T09:22:44.610986Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:29624, port: 29624 2025-06-30T09:22:44.611016Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-30T09:22:44.627118Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-30T09:22:44.671016Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-30T09:22:44.714962Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-30T09:22:44.715269Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-30T09:22:44.715286Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-30T09:22:44.758946Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf LDAP_MOCK: SearchRequest, protocol error, unexpected request, not matched any of provided to mock 2025-06-30T09:22:44.759407Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****2rcw (4EE514E5) () has now valid token of ldapuser@ldap >> TSchemeShardCheckProposeSize::CopyTable [GOOD] >> TSchemeShardCheckProposeSize::CopyTables >> TSchemeShardTest::CreateIndexedTable [GOOD] >> TSchemeShardTest::CreateIndexedTableAndForceDrop >> TSchemeShardTest::AlterTableKeyColumns [GOOD] >> TSchemeShardTest::AlterTableFollowers >> TSchemeShardTest::DependentOps [GOOD] >> TSchemeShardTest::DropPQ >> SystemView::DescribeSystemFolder [GOOD] >> SystemView::DescribeAccessDenied >> SystemView::TabletsFields [GOOD] >> SystemView::TabletsShards >> TDataShardLocksTest::MvccTestOooTxDoesntBreakPrecedingReadersLocks [GOOD] >> TDataShardLocksTest::MvccTestOutdatedLocksRemove [GOOD] >> TDataShardLocksTest::MvccTestBreakEdge [GOOD] >> TDataShardLocksTest::MvccTestAlreadyBrokenLocks [GOOD] >> TDataShardLocksTest::Points_ManyTx_BreakHalf_RemoveHalf [GOOD] >> TSchemeShardTest::MkRmDir [GOOD] >> TSchemeShardTest::DropTableTwice >> TSchemeShardTest::CreateIndexedTableAndForceDrop [GOOD] >> TSchemeShardTest::CreateAlterTableWithCodec ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::DropExternalDataSourceWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:36.552085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:36.552108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:36.552115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:36.552120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:36.552133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:36.552138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:36.552148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:36.552166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:36.552279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:36.552359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:36.564243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:36.564263Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:36.564338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:36.567425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:36.568309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:36.568352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:36.570477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:36.570537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:36.570667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:36.570726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:36.575162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:36.575223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:36.575484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:36.575495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:36.575508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:36.575514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:36.575519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:36.575544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:36.577075Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:36.595841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:36.595913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:36.595966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:36.595975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:36.596017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:36.596028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:36.598243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:36.598298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:36.598349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:36.598357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:36.598361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:36.598365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:36.598877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:36.598889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:36.598894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:36.599246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:36.599257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:36.599262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:36.599270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:36.600016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:36.600412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:36.600448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:36.600609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:36.600629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... ep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1004 at step: 5000006 2025-06-30T09:22:44.941800Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.941831Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1004 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 141733922925 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:44.941843Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 1004:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-30T09:22:44.941872Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:22:44.941893Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1004:0 128 -> 240 2025-06-30T09:22:44.941931Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:44.941941Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:22:44.942352Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:22:44.942468Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 FAKE_COORDINATOR: Erasing txId 1004 2025-06-30T09:22:44.942889Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.942900Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:44.942947Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:22:44.942977Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.942983Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [33:210:2210], at schemeshard: 72057594046678944, txId: 1004, path id: 1 2025-06-30T09:22:44.942989Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [33:210:2210], at schemeshard: 72057594046678944, txId: 1004, path id: 4 2025-06-30T09:22:44.943057Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.943066Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-30T09:22:44.943082Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:22:44.943092Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:22:44.943099Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:22:44.943103Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:22:44.943108Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: false 2025-06-30T09:22:44.943116Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:22:44.943121Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:0 2025-06-30T09:22:44.943126Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:0 2025-06-30T09:22:44.943142Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:22:44.943150Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1004, publications: 2, subscribers: 0 2025-06-30T09:22:44.943154Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-30T09:22:44.943158Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:22:44.943238Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:22:44.943251Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:22:44.943257Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:22:44.943262Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:22:44.943267Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:22:44.943342Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:44.943349Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:22:44.943362Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:44.943389Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:22:44.943400Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:22:44.943405Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:22:44.943410Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-30T09:22:44.943415Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:44.943424Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1004, subscribers: 0 2025-06-30T09:22:44.944311Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:22:44.944339Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:22:44.944351Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1004 2025-06-30T09:22:44.944419Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-30T09:22:44.944429Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-30T09:22:44.944526Z node 33 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-30T09:22:44.944548Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:22:44.944554Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [33:417:2406] TestWaitNotification: OK eventTxId 1004 2025-06-30T09:22:44.944652Z node 33 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:44.944687Z node 33 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 50us result status StatusPathDoesNotExist 2025-06-30T09:22:44.944735Z node 33 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardTest::Restart [GOOD] >> TSchemeShardTest::SchemeErrors >> TSchemeShardTest::DropTableTwice [GOOD] >> TSchemeShardTest::ModifyACL >> TSchemeShardTest::NameFormat [GOOD] >> TSchemeShardTest::ParallelCreateTable >> TSchemeShardTest::CreateTableWithDate [GOOD] >> TSchemeShardTest::CreateIndexedTableRejects >> KqpSplit::AfterResolve+Ascending [GOOD] >> KqpSplit::AfterResolve+Descending >> YdbIndexTable::OnlineBuildWithDataColumn [GOOD] |82.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_locks/unittest >> TDataShardLocksTest::MvccTestAlreadyBrokenLocks [GOOD] |82.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_locks/unittest >> TDataShardLocksTest::Points_ManyTx_BreakHalf_RemoveHalf [GOOD] >> TSchemeShardTest::AlterTableAndConcurrentSplit [GOOD] >> TSchemeShardTest::AlterTable >> TSchemeShardTest::ConsistentCopyTable [GOOD] >> TSchemeShardTest::ConsistentCopyTableAwait >> TSchemeShardTest::SchemeErrors [GOOD] >> TSchemeShardTest::SerializedCellVec [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldNotUpdate >> TSchemeShardTest::ModifyACL [GOOD] >> TSchemeShardTest::IgnoreUserColumnIds |82.3%| [TA] $(B)/ydb/core/security/ldap_auth_provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::CreateAlterTableWithCodec [GOOD] >> TSchemeShardTest::CopyTableTwiceSimultaneously >> IndexBuildTestReboots::CancelBuild [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldNotUpdate [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldUpdate >> TSchemeShardTest::ParallelCreateTable [GOOD] >> TSchemeShardTest::ParallelCreateSameTable >> TSchemeShardTest::AlterTableFollowers [GOOD] >> TSchemeShardTest::AlterTableSizeToSplit >> TSchemeShardTest::IgnoreUserColumnIds [GOOD] >> TSchemeShardTest::DropTableAndConcurrentSplit |82.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |82.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |82.3%| [TA] {RESULT} $(B)/ydb/core/security/ldap_auth_provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} |82.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots >> SystemView::TabletsShards [GOOD] >> SystemView::TabletsFollowers >> BsControllerConfig::AddDriveSerialMassive [GOOD] >> TSchemeShardTest::CopyTableTwiceSimultaneously [GOOD] >> TSchemeShardTest::CopyTableWithAlterConfig >> TSchemeShardTest::AlterTable [GOOD] >> TSchemeShardTest::AlterTableDropColumnReCreateSplit >> TSchemeShardTest::ConsistentCopyTableAwait [GOOD] >> TSchemeShardTest::ConsistentCopyTableRejects >> KqpScan::UnionThree ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::OnlineBuildWithDataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 20198, MsgBus: 14097 2025-06-30T09:21:42.794094Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670152200209894:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:42.794215Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00339c/r3tmp/tmptngruN/pdisk_1.dat 2025-06-30T09:21:42.884340Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:42.884445Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670152200209673:2080] 1751275302788959 != 1751275302788962 2025-06-30T09:21:42.894278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:42.894315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:42.895786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20198, node 1 2025-06-30T09:21:42.914992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:42.915015Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:42.915017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:42.915066Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14097 TClient is connected to server localhost:14097 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:21:43.099958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:21:43.107205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:43.119763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:21:43.205034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:43.272040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:43.288234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:43.349664Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670156495178579:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:43.349687Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:43.415176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:43.430143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:43.447158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:43.459291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:43.472961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:43.489379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:43.509592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:43.544017Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670156495179236:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:43.544050Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:43.544138Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670156495179241:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:43.545204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:43.549671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:43.549816Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670156495179243:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:43.632344Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670156495179294:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:43.787949Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:43.826587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:43.961861Z node 1 :KQP_EXECUTER ERROR: kqp_ ... Ctx: { TraceId: 01jz02arke03m66y41x9vaxwwq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:44.977808Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720638. Ctx: { TraceId: 01jz02arke03m66y41x9vaxwwq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:44.985279Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720639. Ctx: { TraceId: 01jz02arkq9bdf2nys6zpbdn0f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:44.986718Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720640. Ctx: { TraceId: 01jz02arkq9bdf2nys6zpbdn0f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:44.993502Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720641. Ctx: { TraceId: 01jz02arkzab35vypfdynvrhkm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:44.995130Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720642. Ctx: { TraceId: 01jz02arkzab35vypfdynvrhkm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.002081Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720643. Ctx: { TraceId: 01jz02arm818tpr8gd8mkcb6c9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.003859Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720644. Ctx: { TraceId: 01jz02arm818tpr8gd8mkcb6c9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.010941Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720645. Ctx: { TraceId: 01jz02armh4wvcqk5zkj4bcszz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.012647Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720646. Ctx: { TraceId: 01jz02armh4wvcqk5zkj4bcszz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.020217Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720647. Ctx: { TraceId: 01jz02armt3enz2qkn1vegv62j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.021947Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720648. Ctx: { TraceId: 01jz02armt3enz2qkn1vegv62j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.028813Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720649. Ctx: { TraceId: 01jz02arn3dvv75v47h5tvtw04, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.030426Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720650. Ctx: { TraceId: 01jz02arn3dvv75v47h5tvtw04, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.039082Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720651. Ctx: { TraceId: 01jz02arnb2ng2dv5y48177g57, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.040849Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720652. Ctx: { TraceId: 01jz02arnb2ng2dv5y48177g57, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.049843Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720653. Ctx: { TraceId: 01jz02arnr52z6cazkcj64r0g2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.052691Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720654. Ctx: { TraceId: 01jz02arnr52z6cazkcj64r0g2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.065124Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720655. Ctx: { TraceId: 01jz02arp71ane969sasv0txp8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.070834Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720656. Ctx: { TraceId: 01jz02arp71ane969sasv0txp8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.084786Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720657. Ctx: { TraceId: 01jz02arpvc27zdtkxkp5pxnf9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.086848Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720658. Ctx: { TraceId: 01jz02arpvc27zdtkxkp5pxnf9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.098457Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720659. Ctx: { TraceId: 01jz02arq81h8hbcnesynd96yj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.100558Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720660. Ctx: { TraceId: 01jz02arq81h8hbcnesynd96yj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.107751Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720661. Ctx: { TraceId: 01jz02arqjdke4ydvxzn6er56c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.109381Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720662. Ctx: { TraceId: 01jz02arqjdke4ydvxzn6er56c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.116884Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720663. Ctx: { TraceId: 01jz02arqv0pcaaa4qyx8yjg5m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.118849Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720664. Ctx: { TraceId: 01jz02arqv0pcaaa4qyx8yjg5m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.129767Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720665. Ctx: { TraceId: 01jz02arr7ad0fqjgk5fn3r539, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.132145Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720666. Ctx: { TraceId: 01jz02arr7ad0fqjgk5fn3r539, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.139855Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720667. Ctx: { TraceId: 01jz02arrjch09tzb4e72dpwq8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.141394Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720668. Ctx: { TraceId: 01jz02arrjch09tzb4e72dpwq8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODI2MzBkYWQtNjgwMTIwMjQtYTZmNDQwNzUtZDY1NTA2YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.148411Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720669. Ctx: { TraceId: 01jz02arrt9kcw804118q34q8c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:45.149644Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720670. Ctx: { TraceId: 01jz02arrt9kcw804118q34q8c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMwMGFhYWUtNDc3MjNiMWEtYTJiYjI1N2ItZWU2Njg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldUpdate [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonStorageConfig >> TSchemeShardTest::AlterTableSizeToSplit [GOOD] >> TSchemeShardTest::AlterTableSplitSchema >> KqpScan::GrepByString >> TDataShardLocksTest::Points_OneTx [GOOD] |82.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut >> TDataShardLocksTest::Points_ManyTx_RemoveAll |82.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |82.3%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut >> TSchemeShardTest::DropPQ [GOOD] >> TSchemeShardTest::DropPQFail >> TSchemeShardTest::ParallelCreateSameTable [GOOD] >> TSchemeShardTest::MultipleColumnFamilies ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::CancelBuild [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:33.657515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:33.657545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:33.657551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:33.657556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:33.657575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:33.657580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:33.657591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:33.657607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:33.657757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:33.657846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:33.672743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:33.672766Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:33.672888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:33.675753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:33.676520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:33.676556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:33.678699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:33.678792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:33.678959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:33.679007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:33.680046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:33.680097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:33.680441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:33.680458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:33.680468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:33.680476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:33.680484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:33.680525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:33.682213Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:33.703668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:33.703751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:33.703802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:33.703809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:33.703855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:33.703870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:33.705478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:33.705543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:33.705614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:33.705629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:33.705636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:33.705642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:33.706453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:33.706478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:33.706485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:33.707188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:33.707212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:33.707223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:33.707234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:33.708044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:33.709116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:33.709166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:33.709432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:33.709485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... otification wait txId: 1003 2025-06-30T09:22:45.928353Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:22:45.928362Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:22:45.928412Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:103: NotifyTxCompletion index build in-flight, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:22:45.928418Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:110: NotifyTxCompletion, index build is ready to notify, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:22:45.928431Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:22:45.928436Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [20:862:2802] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:22:45.928506Z node 20 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 1003 2025-06-30T09:22:45.928605Z node 20 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:103: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 1003 Issues { message: "TShardStatus { ShardIdx: 72057594046678944:2 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/dir/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 1003 Issues { message: "TShardStatus { ShardIdx: 72057594046678944:2 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 }" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/dir/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } 2025-06-30T09:22:45.928729Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:45.928787Z node 20 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table" took 64us result status StatusSuccess 2025-06-30T09:22:45.928955Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table" PathDescription { Self { Name: "Table" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "index1" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:45.929080Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:22:45.929134Z node 20 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1" took 58us result status StatusSuccess 2025-06-30T09:22:45.929347Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1" PathDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "index1" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:45.929460Z node 20 :BUILD_INDEX NOTICE: schemeshard_build_index__forget.cpp:18: TIndexBuilder::TXTYPE_FORGET_INDEX_BUILD: DoExecute TxId: 1005 DatabaseName: "/MyRoot" IndexBuildId: 1003 2025-06-30T09:22:45.929514Z node 20 :BUILD_INDEX NOTICE: schemeshard_build_index_tx_base.h:101: TIndexBuilder::TXTYPE_FORGET_INDEX_BUILD: Reply TxId: 1005 Status: SUCCESS BUILDINDEX RESPONSE Forget: NKikimrIndexBuilder.TEvForgetResponse TxId: 1005 Status: SUCCESS ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::AddDriveSerialMassive [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:199:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:199:2077] Leader for TabletID 72057594037932033 is [1:235:2079] sender: [1:236:2066] recipient: [1:199:2077] 2025-06-30T09:22:39.119030Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:39.120079Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:39.120169Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:39.120493Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:39.120597Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:39.120619Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:39.120626Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:39.120675Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:39.121737Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:39.121772Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:39.121815Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:39.121837Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:39.121851Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:39.121860Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:235:2079] sender: [1:257:2066] recipient: [1:20:2067] 2025-06-30T09:22:39.132282Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:22:39.132329Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:39.142645Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:39.142693Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:39.142712Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:39.142726Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:39.142785Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:39.142796Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:39.142803Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:39.142814Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:39.153876Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:39.153925Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:39.164656Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:39.164707Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:22:39.164959Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:22:39.164966Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:22:39.165004Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:22:39.165012Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:22:39.167981Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-06-30T09:22:39.168409Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-06-30T09:22:39.168506Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:204:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:204:2077] Leader for TabletID 72057594037932033 is [11:233:2079] sender: [11:236:2066] recipient: [11:204:2077] 2025-06-30T09:22:41.123730Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:41.123963Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:41.124023Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:41.124134Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:41.124333Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:41.124362Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:41.124368Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:41.124419Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:41.125434Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:41.125470Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:41.125499Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:41.125520Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:41.125533Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:41.125542Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:233:2079] sender: [11:257:2066] recipient: [11:20:2067] 2025-06-30T09:22:41.135946Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:22:41.136033Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:41.146360Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:41.146408Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:41.146434Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:41.146447Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:41.146472Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:41.146480Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:41.146487Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:41.146499Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:41.156792Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:41.156839Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:41.167122Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:41.167164Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:22:41.167318Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:22:41.167324Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:22:41.167353Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:22:41.167358Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:22:41.167517Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-06-30T09:22:41.167732Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# ... ommand { AddDriveSerial { Serial: "SN_5" BoxId: 1 } } } 2025-06-30T09:22:43.012626Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_6" BoxId: 1 } } } 2025-06-30T09:22:43.012732Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_7" BoxId: 1 } } } 2025-06-30T09:22:43.012832Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_8" BoxId: 1 } } } 2025-06-30T09:22:43.013144Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_9" BoxId: 1 } } } 2025-06-30T09:22:43.013280Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_0" } } } 2025-06-30T09:22:43.013400Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_1" } } } 2025-06-30T09:22:43.013519Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_2" } } } 2025-06-30T09:22:43.013632Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_3" } } } 2025-06-30T09:22:43.013815Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_4" } } } 2025-06-30T09:22:43.013945Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_5" } } } 2025-06-30T09:22:43.014085Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_6" } } } 2025-06-30T09:22:43.014225Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_7" } } } 2025-06-30T09:22:43.014351Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_8" } } } 2025-06-30T09:22:43.014491Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_9" } } } Leader for TabletID 72057594037932033 is [0:0:0] sender: [31:231:2066] recipient: [31:213:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [31:231:2066] recipient: [31:213:2077] Leader for TabletID 72057594037932033 is [31:236:2079] sender: [31:237:2066] recipient: [31:213:2077] 2025-06-30T09:22:44.779468Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:44.779661Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:44.779711Z node 31 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:44.779928Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:44.779970Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:44.780007Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:44.780014Z node 31 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:44.780055Z node 31 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:44.781064Z node 31 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:44.781090Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:44.781116Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:44.781134Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:44.781145Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:44.781155Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [31:236:2079] sender: [31:257:2066] recipient: [31:20:2067] 2025-06-30T09:22:44.791505Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:22:44.791544Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:44.801885Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:44.801939Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:44.801954Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:44.801966Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:44.801993Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:44.802003Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:44.802009Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:44.802023Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:44.812345Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:44.812391Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:44.822673Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:44.822715Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:22:44.822894Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:22:44.822902Z node 31 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:22:44.822936Z node 31 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:22:44.822944Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:22:44.823086Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_0" BoxId: 1 } } } 2025-06-30T09:22:44.823265Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_1" BoxId: 1 } } } 2025-06-30T09:22:44.823322Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_2" BoxId: 1 } } } 2025-06-30T09:22:44.823386Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_3" BoxId: 1 } } } 2025-06-30T09:22:44.823444Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_4" BoxId: 1 } } } 2025-06-30T09:22:44.823529Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_5" BoxId: 1 } } } 2025-06-30T09:22:44.823583Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_6" BoxId: 1 } } } 2025-06-30T09:22:44.823637Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_7" BoxId: 1 } } } 2025-06-30T09:22:44.823689Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_8" BoxId: 1 } } } 2025-06-30T09:22:44.823740Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_9" BoxId: 1 } } } 2025-06-30T09:22:44.823793Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_0" } } } 2025-06-30T09:22:44.823849Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_1" } } } 2025-06-30T09:22:44.823905Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_2" } } } 2025-06-30T09:22:44.823964Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_3" } } } 2025-06-30T09:22:44.824054Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_4" } } } 2025-06-30T09:22:44.824114Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_5" } } } 2025-06-30T09:22:44.824169Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_6" } } } 2025-06-30T09:22:44.824226Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_7" } } } 2025-06-30T09:22:44.824281Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_8" } } } 2025-06-30T09:22:44.824337Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_9" } } } >> TSchemeShardTest::DropTableAndConcurrentSplit [GOOD] >> TSchemeShardTest::DropTable >> TSchemeShardTest::UpdateChannelsBindingSolomonStorageConfig [GOOD] >> TSchemeShardTest::SimultaneousDropForceDrop >> TDataShardLocksTest::Points_ManyTx_RemoveAll [GOOD] >> TDataShardLocksTest::UseLocksCache >> KqpSplit::AfterResolve+Descending [GOOD] >> TSchemeShardTest::AlterTableDropColumnReCreateSplit [GOOD] >> TSchemeShardTest::AlterTableDropColumnSplitThenReCreate >> KqpScan::NullInKey >> TSchemeShardTest::AlterTableSplitSchema [GOOD] >> TSchemeShardTest::AlterTableSettings >> TSchemeShardTest::SimultaneousDropForceDrop [GOOD] >> TSchemeShardTest::RejectSystemViewPath-EnableSystemNamesProtection-false >> KqpSplit::StreamLookupSplitAfterFirstResult >> TSchemeShardTest::CreateIndexedTableRejects [GOOD] >> TSchemeShardTest::CreateIndexedTableAndForceDropSimultaneously >> SystemView::DescribeAccessDenied [GOOD] >> SystemView::CollectScriptingQueries >> SystemView::TabletsFollowers [GOOD] >> SystemView::TabletsRanges >> TSchemeShardTest::CopyTableWithAlterConfig [GOOD] >> TSchemeShardTest::CopyTableOmitFollowers >> TSchemeShardTest::AlterTableDropColumnSplitThenReCreate [GOOD] >> TSchemeShardTest::AlterTableById >> TSchemeShardTest::ConsistentCopyTableRejects [GOOD] >> TSchemeShardTest::ConsistentCopyTableToDeletedPath >> TSchemeShardTest::CreateIndexedTableAndForceDropSimultaneously [GOOD] >> TSchemeShardTest::CreateTableWithUniformPartitioning >> TSchemeShardTest::RejectSystemViewPath-EnableSystemNamesProtection-false [GOOD] >> TSchemeShardTest::RejectSystemViewPath-EnableSystemNamesProtection-true ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpSplit::AfterResolve+Descending [GOOD] Test command err: Trying to start YDB, gRPC: 29995, MsgBus: 3695 2025-06-30T09:22:44.610632Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670418801387746:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:44.610656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e7a/r3tmp/tmpCwaDsy/pdisk_1.dat 2025-06-30T09:22:44.667102Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:44.667215Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670418801387724:2080] 1751275364610471 != 1751275364610474 TServer::EnableGrpc on GrpcPort 29995, node 1 2025-06-30T09:22:44.682770Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:44.682782Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:44.682785Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:44.682829Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3695 TClient is connected to server localhost:3695 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:44.732899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:44.739677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:44.744897Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:44.744916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:44.746028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:44.805174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:44.866700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:44.881960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:45.003124Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670423096356652:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:45.003166Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:45.063375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:45.072244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:45.085688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:45.100275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:45.114317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:45.128572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:45.185793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:45.200811Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670423096357307:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:45.200865Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:45.200889Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670423096357312:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:45.202116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:45.211017Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670423096357314:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:22:45.279595Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670423096357365:3402] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:45.443433Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521670423096357656:2468] TxId: 281474976710673. Ctx: { TraceId: 01jz02as1ebaqx2jkewys995ej, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzA4YjViNmEtOWRiZDQ2Y2MtYzE1ZjU3MC01YzYxZmRhMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database 2025-06-30T09:22:45.443545Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { TraceId: 01jz02as1ebaqx2jkewys995ej, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzA4YjViNmEtOWRiZDQ2Y2MtYzE1ZjU3MC01YzYxZmRhMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976710674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594 ... 2];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:45.790492Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e7a/r3tmp/tmpvQ7TQp/pdisk_1.dat 2025-06-30T09:22:45.809000Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:45.809430Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670420803186363:2080] 1751275365790268 != 1751275365790271 TServer::EnableGrpc on GrpcPort 8831, node 2 2025-06-30T09:22:45.819790Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:45.819809Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:45.819811Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:45.819875Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10335 TClient is connected to server localhost:10335 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:45.897195Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:45.897239Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:45.897879Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:45.898242Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:22:45.904954Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:45.919438Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:45.947901Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:45.968444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:46.302188Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670425098155252:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:46.302220Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:46.316357Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:46.328519Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:46.340999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:46.354394Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:46.416362Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:46.479254Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:46.494483Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:46.516476Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670425098155915:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:46.516506Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:46.516693Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670425098155920:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:46.517848Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:46.521189Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:46.521316Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670425098155922:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:46.608654Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670425098155973:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:46.792935Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:46.795935Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jz02atbpbfgn777h450kvzf4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDlmMWZmNGUtOWE1YmUwNDUtOTY3MDQ4YTQtOWMwZDgwNDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 resume evread ----------------------------------------------------------- 2025-06-30T09:22:46.810507Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275366841, txId: 281474976715672] shutting down |82.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut >> TSchemeShardTest::DropTable [GOOD] >> TSchemeShardTest::DropTableById |82.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |82.3%| [LD] {RESULT} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut >> TSchemeShardTest::AlterTableSettings [GOOD] >> TSchemeShardTest::AssignBlockStoreVolume >> TSchemeShardTest::AlterTableById [GOOD] >> TSchemeShardTest::AlterTableConfig >> TSchemeShardTest::RejectSystemViewPath-EnableSystemNamesProtection-true [GOOD] >> TSchemeShardTest::SplitKey [GOOD] >> TSchemeShardTest::SplitAlterCopy >> KqpScan::UnionThree [GOOD] >> KqpScan::UnionSameTable |82.3%| [TA] $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::AssignBlockStoreVolume [GOOD] >> TSchemeShardTest::AssignBlockStoreVolumeDuringAlter >> KqpScan::GrepByString [GOOD] >> KqpScan::GrepLimit |82.3%| [TA] $(B)/ydb/core/kqp/ut/idx_test/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::CopyTableOmitFollowers [GOOD] >> TSchemeShardTest::CreateIndexedTableAfterBackup >> TSchemeShardTest::DropPQFail [GOOD] >> TSchemeShardTest::DropPQAbort >> TColumnShardTestSchema::RebootHotTiers >> TSchemeShardTest::DropTableById [GOOD] >> TSchemeShardTest::ManyDirs >> SystemView::CollectScriptingQueries [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest >> TSchemeShardTest::SplitAlterCopy [GOOD] >> TSchemeShardTest::TopicReserveSize >> TSchemeShardTest::MultipleColumnFamilies [GOOD] >> TSchemeShardTest::MultipleColumnFamiliesWithStorage >> TSchemeShardTest::AssignBlockStoreVolumeDuringAlter [GOOD] >> TSchemeShardTest::AssignBlockStoreCheckVersionInAlter >> Yq_1::Basic >> SystemView::ShowCreateTableChangefeeds [GOOD] >> SystemView::ShowCreateTableColumnAlterColumn >> TSchemeShardTest::ConsistentCopyTableToDeletedPath [GOOD] >> TSchemeShardTest::CopyIndexedTable >> KqpScan::NullInKey [GOOD] >> KqpScan::NullInKeySuffix >> TSchemeShardTest::CreateTableWithUniformPartitioning [GOOD] >> TSchemeShardTest::CreateTableWithSplitBoundaries >> TSchemeShardTest::AlterTableConfig [GOOD] >> TSchemeShardTest::AlterTableCompactionPolicy >> SystemView::TabletsRanges [GOOD] >> SystemView::TabletsRangesPredicateExtractDisabled >> TSchemeShardTest::AssignBlockStoreCheckVersionInAlter [GOOD] >> TSchemeShardTest::AssignBlockStoreCheckFillGenerationInAlter >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst >> TSchemeShardTest::CreateIndexedTableAfterBackup [GOOD] >> TSchemeShardTest::CreateFinishedInDescription |82.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |82.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond >> TSchemeShardTest::TopicReserveSize [GOOD] >> TSchemeShardTest::TopicWithAutopartitioningReserveSize >> PrivateApi::PingTask >> TSchemeShardTest::CreateFinishedInDescription [GOOD] >> TSchemeShardTest::CreateBlockStoreVolume >> TSchemeShardTest::AssignBlockStoreCheckFillGenerationInAlter [GOOD] >> TSchemeShardTest::BlockStoreVolumeLimits >> TSchemeShardTest::CopyIndexedTable [GOOD] >> KqpScan::UnionSameTable [GOOD] >> TSchemeShardTest::CreateTableWithSplitBoundaries [GOOD] >> TSchemeShardTest::CreateTableWithConfig >> TSchemeShardTest::CopyTable >> KqpSplit::StreamLookupSplitAfterFirstResult [GOOD] >> TSchemeShardTest::MultipleColumnFamiliesWithStorage [GOOD] >> TSchemeShardTest::ParallelModifying >> KqpSplit::StreamLookupRetryAttemptForFinishedRead >> VectorIndexBuildTest::Metering_ServerLessDB_Restarts-doRestarts-true [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive |82.3%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::CollectScriptingQueries [GOOD] Test command err: 2025-06-30T09:22:18.475216Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670303726785189:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:18.475240Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003074/r3tmp/tmphZ3H1O/pdisk_1.dat 2025-06-30T09:22:18.575398Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:18.579983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:18.580008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:18.595405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20610, node 1 2025-06-30T09:22:18.656068Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:18.656083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:18.656085Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:18.656139Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17906 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:22:18.711043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:18.720374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) 2025-06-30T09:22:18.735247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:18.739552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:18.739580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:18.741101Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-30T09:22:18.742944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:18.742969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:18.743617Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-30T09:22:18.791150Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:18.803548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:18.917463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-30T09:22:18.939136Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670306013035862:2095];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:18.939167Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:18.941840Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670303196751817:2155];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:18.943759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:18.948853Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:18.955217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:18.955245Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:18.956002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:18.956014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:18.958667Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:22:18.959165Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-30T09:22:18.959204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:18.960061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:19.160561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:19.216490Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670308021753694:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:19.216527Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:19.216648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670308021753706:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:19.217606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715663:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:19.243418Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670308021753708:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715663 completed, doublechecking } 2025-06-30T09:22:19.339331Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670308021753779:2934] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:19.422929Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz029zef4q2s3eyct30k19fa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmU2NjgwYWYtYzM3NThiYjAtOTAwYmIxNGItYTljMTc4NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:19.443862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:19.478816Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:19.539143Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jz029zr10geqpkhpnh19j5b0, Database: , DatabaseId: /Root, SessionId: ydb:// ... 22:46.386945Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/.sys
: Error: Access denied 2025-06-30T09:22:46.391046Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/Tenant1/.sys
: Error: Access denied 2025-06-30T09:22:46.395333Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/.sys/partition_stats
: Error: Access denied 2025-06-30T09:22:46.399404Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/Tenant1/.sys/partition_stats
: Error: Access denied 2025-06-30T09:22:46.416990Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 35 2025-06-30T09:22:46.417161Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(35, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:46.417193Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 36 2025-06-30T09:22:46.417255Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(36, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:46.418964Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 33 2025-06-30T09:22:46.419089Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(33, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:46.419367Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 34 2025-06-30T09:22:46.419438Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:46.421060Z node 32 :HIVE WARN: hive_impl.cpp:948: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[34:7521670423127801999:2106], Type=268959746 2025-06-30T09:22:46.689110Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:46.690440Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:46.691254Z node 35 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:46.691390Z node 36 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:46.759234Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:46.760420Z node 33 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:47.435707Z node 37 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[37:7521670429332424542:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:47.435763Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003074/r3tmp/tmp6K03Sp/pdisk_1.dat 2025-06-30T09:22:47.460992Z node 37 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12220, node 37 2025-06-30T09:22:47.490235Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:47.490247Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:47.490249Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:47.490310Z node 37 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19060 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:47.536466Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:47.536513Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:47.537279Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:22:47.540796Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:47.542528Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:47.544341Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:47.825392Z node 37 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [37:7521670429332425199:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.825427Z node 37 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [37:7521670429332425191:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.825467Z node 37 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.826689Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:47.829891Z node 37 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [37:7521670429332425205:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:22:47.927776Z node 37 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [37:7521670429332425256:2379] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:47.945874Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02avcga718jtcvnswwx51d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=YjJkNTFlZGUtNDhlYWE4ODQtOWE1Yzg3OTMtOTAyYmRjMzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:47.975115Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz02avgqbn83zxhbgbb7wmbx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=NzFmMDdlMzUtZTg5MDAyNjQtYTJkMjA0MTEtMTcxODRhZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:47.982495Z node 37 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275368017, txId: 281474976715662] shutting down 2025-06-30T09:22:48.015630Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz02avhp075pxvrhkrbyrwd7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=ZTBmZTkzODQtZmQ1NzEyYzgtNTEzNDE2MTctZmYzOTdmMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:48.016326Z node 37 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [37:7521670433627392682:2325], owner: [37:7521670433627392678:2323], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:48.019097Z node 37 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [37:7521670433627392682:2325], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:48.019231Z node 37 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [37:7521670433627392682:2325], row count: 2, finished: 1 2025-06-30T09:22:48.019266Z node 37 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [37:7521670433627392682:2325], owner: [37:7521670433627392678:2323], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:48.020539Z node 37 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275368015, txId: 281474976715664] shutting down >> TSchemeShardTest::CreateBlockStoreVolume [GOOD] >> TSchemeShardTest::CreateBlockStoreVolumeWithVolumeChannelsProfiles >> TSchemeShardTest::AlterTableCompactionPolicy [GOOD] >> TSchemeShardTest::AlterPersQueueGroup >> TSchemeShardTest::TopicWithAutopartitioningReserveSize [GOOD] >> TSchemeShardTest::CopyTable [GOOD] >> KqpScan::GrepLimit [GOOD] >> KqpScan::GrepNonKeyColumns >> TSchemeShardTest::CopyTableAndConcurrentChanges >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> SystemView::TabletsRangesPredicateExtractDisabled [GOOD] >> TSchemeShardTest::CreateBlockStoreVolumeWithVolumeChannelsProfiles [GOOD] >> TSchemeShardTest::CreateBlockStoreVolumeWithNonReplicatedPartitions >> TSchemeShardTest::CreateTableWithConfig [GOOD] >> TSchemeShardTest::CreateTableWithNamedConfig |82.4%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/idx_test/test-results/unittest/{meta.json ... results_accumulator.log} |82.4%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::UnionSameTable [GOOD] Test command err: Trying to start YDB, gRPC: 17317, MsgBus: 63212 2025-06-30T09:22:46.763138Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670423286882833:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:46.763171Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e78/r3tmp/tmpwYgj2p/pdisk_1.dat 2025-06-30T09:22:46.825917Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 17317, node 1 2025-06-30T09:22:46.833352Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:46.833431Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670423286882806:2080] 1751275366762955 != 1751275366762958 2025-06-30T09:22:46.833483Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:46.833489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:46.833492Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:46.833532Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63212 2025-06-30T09:22:46.864930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:46.864955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:46.866116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63212 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:46.906432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:46.912103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:46.932913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:46.952361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:46.964999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:47.111009Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670427581851703:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.111031Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.158574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.166857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.178557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.192643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.206811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.221207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.234868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.250471Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670427581852353:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.250498Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.250501Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670427581852358:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.251278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:47.254388Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670427581852360:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:47.344985Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670427581852411:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:47.585996Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521670427581852730:2469] TxId: 281474976715673. Ctx: { TraceId: 01jz02av31btx21jx7n1hef5ax, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmI4ZWZjNTgtNTJkMzgzMTctODgzNjk5N2MtNWFmN2I3M2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-30T09:22:47.589975Z node 1 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [1:7521670427581852799:2057], tablet: [1:7521670423286883571:2284], scanId: 3, table: /Root/EightShard 2025-06-30T09:22:47.590003Z node 1 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [1:7521670427581852801:2058], tablet: [1:7521670423286883573:2285], scanId: 5, table: /Root/EightShard 2025-06-30T09:22:47.590461Z node 1 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [1:7521670427581852807:2060], tablet: [1:7521670423286883594:2290], scanId: 4, table: /Root/EightShard 2025-06-30T09:22:47.593120Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275367632, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 21000, MsgBus: 6126 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e78/r3tmp/tmpcECRV2/pdisk_1.dat 2025-06-30T09:22:47.894756Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670430921710318:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:47.897356Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:47.913015Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:47.914840Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670430921710287:2080] 1751275367894503 != 1751275367894506 TServer::EnableGrpc on GrpcPort 21000, node 2 2025-06-30T09:22:47.921330Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:47.921341Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:47.921344Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:47.921390Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6126 TClient is connected to server localhost:6126 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:48.000099Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:48.000135Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:48.000732Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:48.002475Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:22:48.003115Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:48.014091Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:48.027053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:48.053645Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:48.066539Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:48.400711Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670435216679188:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:48.400747Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:48.410929Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.424094Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.435292Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.446941Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.461001Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.474491Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.489924Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.509940Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670435216679839:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:48.509972Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:48.510090Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670435216679844:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:48.511406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:48.515134Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670435216679846:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:48.603312Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670435216679897:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:48.757357Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275368801, txId: 281474976715672] shutting down >> KqpScan::NullInKeySuffix [GOOD] >> KqpScan::Offset |82.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations >> TSchemeShardTest::CreateBlockStoreVolumeWithNonReplicatedPartitions [GOOD] >> TSchemeShardTest::CreateAlterBlockStoreVolumeWithInvalidPoolKinds |82.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |82.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations >> TSchemeShardTest::BlockStoreVolumeLimits [GOOD] >> TSchemeShardTest::BlockStoreNonreplVolumeLimits ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::TopicWithAutopartitioningReserveSize [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:44.759296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:44.759322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.759327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:44.759331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:44.759337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:44.759340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:44.759349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.766278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:44.766412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:44.766492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:44.789054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:44.789078Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:44.792750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:44.792808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:44.792847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:44.795147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:44.795242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:44.795376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.795446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:44.801656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.801764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:44.802086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:44.802127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:44.802133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:44.802148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.803970Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:44.830420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:44.830497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.830553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:44.830561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:44.830621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:44.830636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:44.831420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.831460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:44.831504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.831514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:44.831520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:44.831527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:44.832131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.832146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:44.832152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:44.832590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.832602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.832606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.832612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:44.833172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:44.833651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:44.833701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:44.833944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.834048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:44.834057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.834099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:44.834113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:44.834673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.834685Z node 1 :FLAT_TX_SCHEMESHARD ... 2075186233409551, partId: 0 2025-06-30T09:22:49.121615Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409551 Status: COMPLETE TxId: 104 Step: 5000005 2025-06-30T09:22:49.121624Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:623: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409551 Status: COMPLETE TxId: 104 Step: 5000005 2025-06-30T09:22:49.121630Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:264: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:6, shard: 72075186233409551, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-30T09:22:49.121636Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:628: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-30T09:22:49.121686Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 104:0 128 -> 240 2025-06-30T09:22:49.121761Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-30T09:22:49.124250Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:22:49.124283Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:22:49.124303Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:22:49.124318Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:22:49.124339Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:22:49.124356Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:22:49.124386Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:49.124393Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:22:49.124465Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:49.124471Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [13:211:2211], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-30T09:22:49.124563Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:22:49.124573Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-30T09:22:49.124591Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:22:49.124597Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:22:49.124604Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:22:49.124608Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:22:49.124614Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-30T09:22:49.124621Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:22:49.124629Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-30T09:22:49.124634Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:0 2025-06-30T09:22:49.124688Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 10 2025-06-30T09:22:49.124696Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 1, subscribers: 0 2025-06-30T09:22:49.124701Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-30T09:22:49.124826Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:22:49.124840Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:22:49.124849Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:22:49.124855Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-30T09:22:49.124860Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-30T09:22:49.124876Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-30T09:22:49.125978Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-30T09:22:49.144720Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-30T09:22:49.144739Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-30T09:22:49.144850Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-30T09:22:49.144877Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-30T09:22:49.144883Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [13:1464:3261] TestWaitNotification: OK eventTxId 104 2025-06-30T09:22:49.144992Z node 13 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:49.145040Z node 13 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 63us result status StatusSuccess 2025-06-30T09:22:49.145230Z node 13 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 4 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 6 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 7 PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "A" } Status: Inactive ParentPartitionIds: 0 ChildPartitionIds: 3 ChildPartitionIds: 4 } Partitions { PartitionId: 2 TabletId: 72075186233409549 KeyRange { FromBound: "A" } Status: Inactive ParentPartitionIds: 0 ChildPartitionIds: 5 } Partitions { PartitionId: 3 TabletId: 72075186233409550 KeyRange { ToBound: "0" } Status: Active ParentPartitionIds: 1 } Partitions { PartitionId: 4 TabletId: 72075186233409551 KeyRange { FromBound: "0" ToBound: "A" } Status: Inactive ParentPartitionIds: 1 ChildPartitionIds: 5 } Partitions { PartitionId: 5 TabletId: 72075186233409552 KeyRange { FromBound: "0" } Status: Active ParentPartitionIds: 2 ParentPartitionIds: 4 } AlterVersion: 4 BalancerTabletID: 72075186233409547 NextPartitionId: 6 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 494 AccountSize: 494 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 6 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } 2025-06-30T09:22:48.706097Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:22:48.707464Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:22:48.707560Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928037] doesn't have tx info 2025-06-30T09:22:48.707570Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:22:48.707576Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-30T09:22:48.707584Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:22:48.707595Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:48.707607Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-30T09:22:48.707802Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:265:2255], now have 1 active actors on pipe 2025-06-30T09:22:48.707827Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-30T09:22:48.710457Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-30T09:22:48.711585Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-30T09:22:48.711624Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:48.711818Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928037] Config applied version 1 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-30T09:22:48.711855Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:22:48.711952Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:22:48.712046Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:273:2261] 2025-06-30T09:22:48.712742Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-30T09:22:48.712759Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:273:2261] 2025-06-30T09:22:48.712769Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:22:48.712782Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-30T09:22:48.712870Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:22:48.713001Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:276:2263], now have 1 active actors on pipe 2025-06-30T09:22:48.725681Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:22:48.728580Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:22:48.728709Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928137] doesn't have tx info 2025-06-30T09:22:48.728719Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:22:48.728725Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-06-30T09:22:48.728733Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:22:48.728744Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:48.728757Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928137] doesn't have tx writes info 2025-06-30T09:22:48.728962Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928137] server connected, pipe [2:409:2362], now have 1 active actors on pipe 2025-06-30T09:22:48.728988Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-06-30T09:22:48.729058Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-30T09:22:48.729851Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-30T09:22:48.729890Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:48.730043Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928137] Config applied version 2 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-30T09:22:48.730070Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:22:48.730148Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:22:48.730187Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [2:417:2368] 2025-06-30T09:22:48.730874Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-06-30T09:22:48.730896Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [2:417:2368] 2025-06-30T09:22:48.730907Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:22:48.730917Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-30T09:22:48.730996Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928137, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:22:48.731136Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928137] server connected, pipe [2:420:2370], now have 1 active actors on pipe 2025-06-30T09:22:48.734953Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:22:48.736262Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:22:48.736337Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928138] doesn't have tx info 2025-06-30T09:22:48.736343Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:22:48.736347Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-30T09:22:48.736353Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:22:48.736360Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:48.736369Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-30T09:22:48.736513Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [2:469:2407], now have 1 active actors on pipe 2025-06-30T09:22:48.736522Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-30T09:22:48.736574Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-30T09:22:48.737032Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-30T09:22:48.737058Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:48.737174Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928138] Config applied version 3 actor [2 ... me: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-30T09:22:49.264966Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:49.265077Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928138] Config applied version 11 actor [4:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-30T09:22:49.265112Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:22:49.265176Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:22:49.265216Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [4:479:2415] 2025-06-30T09:22:49.265729Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-30T09:22:49.265739Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [4:479:2415] 2025-06-30T09:22:49.265748Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:22:49.265758Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-30T09:22:49.265816Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928138, Partition: 1, State: StateIdle] no data for compaction 2025-06-30T09:22:49.265915Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [4:482:2417], now have 1 active actors on pipe 2025-06-30T09:22:49.269580Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:22:49.270419Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:22:49.270495Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-30T09:22:49.270502Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:22:49.270505Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-30T09:22:49.270510Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:22:49.270517Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:49.270526Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-30T09:22:49.270663Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [4:531:2454], now have 1 active actors on pipe 2025-06-30T09:22:49.270672Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-30T09:22:49.270717Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928139] Config update version 12(current 0) received from actor [4:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-30T09:22:49.271268Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-30T09:22:49.271304Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:49.271403Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928139] Config applied version 12 actor [4:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-30T09:22:49.271422Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:22:49.271473Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:22:49.271499Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:539:2460] 2025-06-30T09:22:49.271950Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-30T09:22:49.271962Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [4:539:2460] 2025-06-30T09:22:49.271967Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:22:49.271972Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-30T09:22:49.272032Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-30T09:22:49.272113Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [4:542:2462], now have 1 active actors on pipe 2025-06-30T09:22:49.272309Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [4:548:2465], now have 1 active actors on pipe 2025-06-30T09:22:49.272327Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [4:549:2466], now have 1 active actors on pipe 2025-06-30T09:22:49.272369Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [4:550:2466], now have 1 active actors on pipe 2025-06-30T09:22:49.282914Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [4:558:2473], now have 1 active actors on pipe 2025-06-30T09:22:49.290505Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:22:49.291428Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:22:49.291518Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-30T09:22:49.291528Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-30T09:22:49.291568Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:22:49.291706Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:49.291720Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-30T09:22:49.291746Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:22:49.291816Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:22:49.291853Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:615:2518] 2025-06-30T09:22:49.292475Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-30T09:22:49.292767Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-30T09:22:49.292821Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-30T09:22:49.292876Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-30T09:22:49.292911Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-30T09:22:49.292918Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-30T09:22:49.292924Z node 4 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:22:49.292930Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-30T09:22:49.292940Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [4:615:2518] 2025-06-30T09:22:49.292950Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:22:49.292959Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-30T09:22:49.293007Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-30T09:22:49.293222Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928138] server disconnected, pipe [4:549:2466] destroyed 2025-06-30T09:22:49.293240Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928037] server disconnected, pipe [4:548:2465] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionOffsetsResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 StartOffset: 0 EndOffset: 0 ErrorCode: OK WriteTimestampEstimateMS: 0 } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 1 StartOffset: 0 EndOffset: 0 ErrorCode: OK WriteTimestampEstimateMS: 0 } PartitionResult { Partition: 2 ErrorCode: INITIALIZING ErrorReason: "partition is not ready yet" } ErrorCode: OK } } } >> SystemView::ShowCreateTableTemporary [GOOD] >> SystemView::ShowCreateTableSequences ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::Metering_ServerLessDB_Restarts-doRestarts-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:14.323101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:14.323130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:14.323137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:14.323143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:14.323159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:14.323164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:14.323175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:14.323192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:14.323315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:14.323391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:14.341258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:14.341289Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:14.347331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:14.347420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:14.347471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:14.350069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:14.350202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:14.350352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:14.350466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:14.351586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:14.351662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:14.352050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:14.352063Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:14.352094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:14.352107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:14.352114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:14.352141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.353967Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:14.377495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:14.377601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.377689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:14.377698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:14.377771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:14.377789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:14.378929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:14.378981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:14.379043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.379055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:14.379062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:14.379069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:14.379655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.379668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:14.379678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:14.380118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.380129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:14.380136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:14.380155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:14.380892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:14.381421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:14.381466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:14.381697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:14.381728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:14.381761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:14.381839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:14.381847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:14.381884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:14.381899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:14.382384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:14.382394Z node 1 :FLAT_TX_SCHEMESHARD ... X_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.977732Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 1] was 1 2025-06-30T09:22:48.977809Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 3, at schemeshard: 72075186233409549 2025-06-30T09:22:48.977850Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 1 2025-06-30T09:22:48.977862Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 4] was 0 2025-06-30T09:22:48.977872Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 5] was 0 2025-06-30T09:22:48.977887Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__tenant_data_erasure_manager.cpp:401: [TenantDataErasureManager] Restore: Generation# 0, Status# 0, NumberDataErasureShardsInRunning# 0 2025-06-30T09:22:48.977948Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 9, at schemeshard: 72075186233409549 2025-06-30T09:22:48.977982Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978007Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 8, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978014Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 1] was 2 2025-06-30T09:22:48.978018Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 1] was 3 2025-06-30T09:22:48.978023Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 1] was 4 2025-06-30T09:22:48.978027Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 2 2025-06-30T09:22:48.978031Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-06-30T09:22:48.978035Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 4 2025-06-30T09:22:48.978039Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 4] was 1 2025-06-30T09:22:48.978044Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 5] was 1 2025-06-30T09:22:48.978074Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 5, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978126Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978170Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 15, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978233Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 1, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978244Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72075186233409549, LocalPathId: 3] was 2 2025-06-30T09:22:48.978266Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 1, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978334Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978345Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978392Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978411Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978429Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978465Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978479Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978525Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978589Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 1, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978611Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4706: KMeansTreeSample records: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978633Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4765: KMeansTreeCluster records: 4, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978664Z node 3 :BUILD_INDEX DEBUG: schemeshard_info_types.h:3702: AddShardStatus id# 109 shard 72075186233409549:9 2025-06-30T09:22:48.978680Z node 3 :BUILD_INDEX DEBUG: schemeshard_info_types.h:3702: AddShardStatus id# 109 shard 72075186233409549:10 2025-06-30T09:22:48.978687Z node 3 :BUILD_INDEX DEBUG: schemeshard_info_types.h:3702: AddShardStatus id# 109 shard 72075186233409549:11 2025-06-30T09:22:48.978694Z node 3 :BUILD_INDEX DEBUG: schemeshard_info_types.h:3702: AddShardStatus id# 109 shard 72075186233409549:12 2025-06-30T09:22:48.978705Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4854: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978715Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4881: SnapshotSteps: snapshots: 0, at schemeshard: 72075186233409549 2025-06-30T09:22:48.978727Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4908: LongLocks: records: 1, at schemeshard: 72075186233409549 2025-06-30T09:22:48.981509Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_domain_links.cpp:48: Send TEvSyncTenantSchemeShard, to parent: [OwnerId: 72057594046678944, LocalPathId: 3], from: 72075186233409549 2025-06-30T09:22:48.981559Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__tenant_data_erasure_manager.cpp:80: [TenantDataErasureManager] Stop 2025-06-30T09:22:48.982104Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 109 Done 2025-06-30T09:22:48.982171Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 109 Done TBuildInfo{ IndexBuildId: 109, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index1, IndexColumn: embedding, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [0:0:0], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976725757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976740757, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976740758, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 420 UploadBytes: 6220 ReadRows: 2000 ReadBytes: 26000, Billed: UploadRows: 420 UploadBytes: 6220 ReadRows: 2000 ReadBytes: 26000} 2025-06-30T09:22:48.982179Z node 3 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:336: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 109, subscribers count# 0 2025-06-30T09:22:48.982914Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5900: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 3 TabletID: 72075186233409549 Generation: 6 EffectiveACLVersion: 0 SubdomainVersion: 2 UserAttributesVersion: 1 TenantHive: 18446744073709551615 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-06-30T09:22:48.982947Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:22:48.982974Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 6, ActorId:[3:4446:6107], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:22:48.982993Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:22:48.983148Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409549 2025-06-30T09:22:48.983158Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409549 2025-06-30T09:22:48.983197Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:48.983209Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:53: TTxServerlessStorageBilling: unable to make a bill, AllowServerlessStorageBilling is false, schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], next retry at: 1970-01-01T00:01:00.000000Z 2025-06-30T09:22:48.983216Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:48.983248Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72075186233409549 2025-06-30T09:22:49.056454Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__monitoring.cpp:1535: Handle TEvRemoteHttpInfo: /app?Page=BuildIndexInfo&BuildIndexId=109 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::TabletsRangesPredicateExtractDisabled [GOOD] Test command err: 2025-06-30T09:22:15.414446Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670292080653307:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:15.414650Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0030ef/r3tmp/tmpcY8UP0/pdisk_1.dat 2025-06-30T09:22:15.474851Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22902, node 1 2025-06-30T09:22:15.494793Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:15.494807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:15.494809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:15.494857Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:15.516476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:15.516505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:15.517640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63569 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:15.557324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:15.561220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:15.894966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670292080653885:2287], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.894988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670292080653893:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.895007Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.895958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:15.898107Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670292080653899:2291], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:22:15.968499Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670292080653950:2326] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:16.028480Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521670296375621281:2286] TxId: 281474976715661. Ctx: { TraceId: 01jz029vwh7epxw5qj980v44rc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWM3NzdlYjctZjliNmVjMGYtY2NmMjg0MGQtMzdjODA1MWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database 2025-06-30T09:22:16.028567Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz029vwh7epxw5qj980v44rc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWM3NzdlYjctZjliNmVjMGYtY2NmMjg0MGQtMzdjODA1MWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:16.033809Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7521670296375621288:2299], owner: [1:7521670296375621284:2297], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:16.034038Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7521670296375621288:2299], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:16.034177Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7521670296375621288:2299], row count: 0, finished: 1 2025-06-30T09:22:16.034207Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7521670296375621288:2299], owner: [1:7521670296375621284:2297], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:16.035838Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275336027, txId: 281474976715660] shutting down 2025-06-30T09:22:16.417179Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:17.050504Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz029xad6k5kyfnwxkz8jrw6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODQ0NmE2NDQtOWQ4NzRjNTktNjc3ZjgxZTEtZGMyNWU0OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:17.051177Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7521670300670588635:2313], owner: [1:7521670300670588632:2311], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:17.051414Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7521670300670588635:2313], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:17.051491Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7521670300670588635:2313], row count: 0, finished: 1 2025-06-30T09:22:17.051505Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7521670300670588635:2313], owner: [1:7521670300670588632:2311], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:17.052069Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275337049, txId: 281474976715662] shutting down 2025-06-30T09:22:18.075136Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz029ya59m541qpqxry0vex2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTUyMDQ3NS0yNmE5Zjg1MS03MGZmNWUxZS01NjgyNmJjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:18.075941Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7521670304965555969:2324], owner: [1:7521670304965555965:2322], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:18.079116Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7521670304965555969:2324], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:18.079251Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7521670304965555969:2324], row count: 0, finished: 1 2025-06-30T09:22:18.079291Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7521670304965555969:2324], owner: [1:7521670304965555965:2322], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:18.080177Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275338074, txId: 281474976715664] shutting down 2025-06-30T09:22:19.114797Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jz029zah8vfaempcjx943nq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWYzNTQwMWMtYmY4MTEzMjMtOGEzMGExMzktN2VjZGViYTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:19.115604Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7521670309260523301:2335], owner: [1:7521670309260523298:2333], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:19.126988Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7521670309260523301:2335], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:19.127191Z node 1 ... Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:48.257220Z node 20 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275368252, txId: 281474976715675] shutting down 2025-06-30T09:22:48.276655Z node 20 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jz02avt3b35gpw66cwaydngb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=20&id=ZWNkMjdmNTAtYWI0ZDA0OTEtMTAxYjRiNmItYmE2YjQzMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:48.277184Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [20:7521670435662893385:2396], owner: [20:7521670435662893381:2394], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:48.277346Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [20:7521670435662893385:2396], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:48.277462Z node 20 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [20:7521670435662893385:2396], row count: 4, finished: 1 2025-06-30T09:22:48.277479Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [20:7521670435662893385:2396], owner: [20:7521670435662893381:2394], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:48.278225Z node 20 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275368276, txId: 281474976715677] shutting down 2025-06-30T09:22:48.303275Z node 20 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jz02avtqbrdebr0vt8zam3x5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=20&id=Nzg1YzU1YzctODA5YTVhYTMtODNkOGY3ZjEtZmFkMWZkYzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:48.304058Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [20:7521670435662893416:2405], owner: [20:7521670435662893413:2403], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:48.306677Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [20:7521670435662893416:2405], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:48.307067Z node 20 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [20:7521670435662893416:2405], row count: 4, finished: 1 2025-06-30T09:22:48.307099Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [20:7521670435662893416:2405], owner: [20:7521670435662893413:2403], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:48.309773Z node 20 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275368302, txId: 281474976715679] shutting down 2025-06-30T09:22:48.578928Z node 21 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[21:7521670434132945268:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:48.579663Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0030ef/r3tmp/tmpCxkfRS/pdisk_1.dat 2025-06-30T09:22:48.594013Z node 21 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20442, node 21 2025-06-30T09:22:48.614988Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:48.615009Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:48.615011Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:48.615073Z node 21 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10919 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:48.678236Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:48.678275Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:48.679313Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:48.682272Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:49.062063Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.086588Z node 21 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [21:7521670438427913294:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.086617Z node 21 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.086757Z node 21 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [21:7521670438427913306:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.087727Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:49.096114Z node 21 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [21:7521670438427913308:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:22:49.157845Z node 21 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [21:7521670438427913359:2476] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:49.193192Z node 21 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02awkx0j09qq7ak3kvcmrz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=21&id=MmM4Nzg2ODMtNThiNGFiY2ItMzAzMjEyZjktNGIxZDFmNGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:49.193948Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [21:7521670438427913402:2323], owner: [21:7521670438427913401:2322], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:49.194275Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [21:7521670438427913402:2323], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:49.194459Z node 21 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [21:7521670438427913402:2323], row count: 4, finished: 1 2025-06-30T09:22:49.194480Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [21:7521670438427913402:2323], owner: [21:7521670438427913401:2322], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:49.194534Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [21:7521670438427913408:2326], owner: [21:7521670438427913401:2322], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:49.194885Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [21:7521670438427913408:2326], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:49.198930Z node 21 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [21:7521670438427913408:2326], row count: 4, finished: 1 2025-06-30T09:22:49.198979Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [21:7521670438427913408:2326], owner: [21:7521670438427913401:2322], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:49.200061Z node 21 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275369191, txId: 281474976715661] shutting down >> TSchemeShardTest::CreateAlterBlockStoreVolumeWithInvalidPoolKinds [GOOD] >> TSchemeShardTest::CreateDropKesus >> TSchemeShardTest::CreateTableWithNamedConfig [GOOD] >> TSchemeShardTest::CreateTableWithUnknownNamedConfig >> TSchemeShardTest::ParallelModifying [GOOD] >> TSchemeShardTest::PQGroupExplicitChannels >> TSchemeShardTest::BlockStoreNonreplVolumeLimits [GOOD] >> TSchemeShardTest::BlockStoreSystemVolumeLimits >> TSchemeShardTest::CreateTableWithUnknownNamedConfig [GOOD] >> TSchemeShardTest::DefaultColumnFamiliesWithNonCanonicName >> KqpSplit::StreamLookupRetryAttemptForFinishedRead [GOOD] |82.4%| [TA] $(B)/ydb/core/tablet_flat/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::CopyTableAndConcurrentChanges [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentSplit >> TSchemeShardTest::CreateDropKesus [GOOD] >> TSchemeShardTest::CreateAlterKesus >> IndexBuildTest::MergeIndexTableShardsOnlyWhenReady [GOOD] >> IndexBuildTest::IndexPartitioningIsPersisted >> KqpScan::GrepNonKeyColumns [GOOD] >> TSchemeShardTest::PQGroupExplicitChannels [GOOD] >> TSchemeShardTest::ReadOnlyMode >> Yq_1::ListConnections >> KqpScan::Offset [GOOD] >> TSchemeShardTest::BlockStoreSystemVolumeLimits [GOOD] >> TSchemeShardTest::AlterTableWithCompactionStrategies >> TDataShardLocksTest::UseLocksCache [GOOD] >> TSchemeShardTest::CreateAlterKesus [GOOD] >> TSchemeShardTest::CreateDropSolomon ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpSplit::StreamLookupRetryAttemptForFinishedRead [GOOD] Test command err: Trying to start YDB, gRPC: 26534, MsgBus: 13357 2025-06-30T09:22:47.360861Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670428729363675:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:47.361046Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e5b/r3tmp/tmpgKSSd7/pdisk_1.dat 2025-06-30T09:22:47.434033Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:47.434125Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670428729363653:2080] 1751275367360655 != 1751275367360658 TServer::EnableGrpc on GrpcPort 26534, node 1 2025-06-30T09:22:47.456117Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:47.456130Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:47.456132Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:47.456177Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13357 2025-06-30T09:22:47.510896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:47.510923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:47.520924Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13357 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:47.546671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:47.550322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:22:47.560039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.628898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:47.662163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:47.730518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:47.905578Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670428729365254:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.905614Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.962283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.972361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.984131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.000456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.058328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.068345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.082979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.098871Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670433024333206:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:48.098940Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:48.098941Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670433024333211:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:48.100094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:48.113179Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670433024333213:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:22:48.192925Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670433024333264:3401] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:48.353264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.363182Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:48.446633Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { TraceId: 01jz02avxbffsaq871pbdzhdm8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTQzYmIxZmQtOTI3NzlhNmQtOGI2OTEyZD ... on GrpcPort 12185, node 2 2025-06-30T09:22:49.135922Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:49.135938Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:49.135941Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:49.135996Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16699 TClient is connected to server localhost:16699 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:49.212687Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:49.212727Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:49.213229Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:49.213957Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:49.214679Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:49.217424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:49.232292Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.257047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:49.270387Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:49.456116Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670440424699110:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.456146Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.469005Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.478160Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.489399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.503616Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.517173Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.531262Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.545472Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.561552Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670440424699761:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.561579Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670440424699766:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.561600Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.562424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:49.572041Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670440424699768:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:49.623744Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670440424699819:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:49.755006Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.823739Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jz02ax95d9jbcjnzf5mesc4m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmU1ZjNiYjktNzc4ZGJhYzUtYjVmYjg1OWMtZGZhZTA1Mjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:49.826374Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jz02ax95d9jbcjnzf5mesc4m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmU1ZjNiYjktNzc4ZGJhYzUtYjVmYjg1OWMtZGZhZTA1Mjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:49.882607Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715676. Ctx: { TraceId: 01jz02axb81pfxc7tkgq8f24eb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTExN2M5ZmYtOTE3ZDJiNTMtMTU3MmU4MTQtY2MwOWZhYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- 2025-06-30T09:22:49.885222Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275369928, txId: 281474976715675] shutting down >> TSchemeShardTest::AlterTableWithCompactionStrategies [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-false >> Yq_1::ModifyConnections >> TSchemeShardTest::ReadOnlyMode [GOOD] >> TSchemeShardTest::PathErrors ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::GrepNonKeyColumns [GOOD] Test command err: Trying to start YDB, gRPC: 25519, MsgBus: 7147 2025-06-30T09:22:46.954562Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670427106444306:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:46.954678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e62/r3tmp/tmpBJEpZL/pdisk_1.dat 2025-06-30T09:22:47.024193Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25519, node 1 2025-06-30T09:22:47.040242Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:47.040259Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:47.040262Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:47.040320Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7147 TClient is connected to server localhost:7147 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:22:47.100000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:47.100039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:47.101061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:22:47.106626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:47.110666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:22:47.113315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.136383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:47.159452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:47.172608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:47.395188Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670431401412996:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.395227Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.432727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.444248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.457317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.472457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.483204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.498685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.510571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.527655Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670431401413647:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.527691Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.530944Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670431401413652:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.532170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:47.535238Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670431401413654:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:47.629809Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670431401413705:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:47.899386Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275367940, txId: 281474976715672] shutting down 2025-06-30T09:22:47.953703Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 25105, MsgBus: 2451 2025-06-30T09:22:48.238172Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670435191758757:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:48.238202Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e62/r3tmp/tmp3KQX2f/pdisk_1.dat 2025-06-30T09:22:48.254447Z node 2 :IMPORT WARN: schemeshard_imp ... aseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:48.876438Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670435191761044:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:49.079014Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275369123, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 20115, MsgBus: 16040 2025-06-30T09:22:49.383829Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670439475120002:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:49.384019Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e62/r3tmp/tmpvKjZWR/pdisk_1.dat 2025-06-30T09:22:49.403145Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:49.406265Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670439475119970:2080] 1751275369383606 != 1751275369383609 TServer::EnableGrpc on GrpcPort 20115, node 3 2025-06-30T09:22:49.415179Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:49.415198Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:49.415201Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:49.415264Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16040 TClient is connected to server localhost:16040 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:49.489823Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:49.489863Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:49.490337Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:49.490956Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:49.495553Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:49.553402Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:49.578971Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:49.591602Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:49.832270Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670439475121566:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.832312Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.842384Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.852089Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.861201Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.874974Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.888341Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.902973Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.916868Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.932751Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670439475122219:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.932791Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670439475122224:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.932791Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:49.933860Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:49.943099Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670439475122226:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:50.005932Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670443770089573:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:50.294081Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275370334, txId: 281474976715672] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::Offset [GOOD] Test command err: Trying to start YDB, gRPC: 23971, MsgBus: 19641 2025-06-30T09:22:47.343673Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670428984526918:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:47.343709Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e5d/r3tmp/tmptbpLJM/pdisk_1.dat 2025-06-30T09:22:47.404724Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:47.404845Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670428984526898:2080] 1751275367343501 != 1751275367343504 TServer::EnableGrpc on GrpcPort 23971, node 1 2025-06-30T09:22:47.417997Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:47.418023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:47.418025Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:47.418069Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19641 2025-06-30T09:22:47.445166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:47.445199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:47.446387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19641 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:22:47.495803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:47.499320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:47.569199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:47.593586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:47.656541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:47.799760Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670428984528505:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.799800Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:47.854782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.872393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.886179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.901731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.915965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.928778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.943872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.010926Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670433279496454:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:48.010963Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:48.011064Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670433279496459:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:48.012006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:48.018765Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670433279496461:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:48.080953Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670433279496512:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:48.223200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:48.301311Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275368339, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 10564, MsgBus: 19595 2025-06-30T09:22:48.490024Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670432554939317:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:48.491808Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect ... t accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:49.333063Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.390224Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275369431, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 18099, MsgBus: 5162 2025-06-30T09:22:49.660342Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670439975385062:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:49.660385Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002e5d/r3tmp/tmpQVexmq/pdisk_1.dat 2025-06-30T09:22:49.674066Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18099, node 3 2025-06-30T09:22:49.684163Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:49.684184Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:49.684186Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:49.684237Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5162 TClient is connected to server localhost:5162 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:49.765403Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:49.765436Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:49.765746Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:49.766308Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:49.774570Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:49.784655Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:49.805992Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:49.820804Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:50.122839Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670444270353921:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:50.122936Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:50.147771Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:50.166161Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:50.195892Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:50.223991Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:50.244241Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:50.269372Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:50.284876Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:50.306209Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670444270354576:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:50.306239Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:50.306254Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670444270354581:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:50.307371Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:50.314244Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670444270354583:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:50.378287Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670444270354634:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:50.634356Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275370670, txId: 281474976715672] shutting down 2025-06-30T09:22:50.662385Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> DataShardWrite::UpsertWithDefaults >> TSchemeShardTest::CopyTableAndConcurrentSplit [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentMerge >> TSchemeShardTest::DefaultColumnFamiliesWithNonCanonicName [GOOD] >> TSchemeShardTest::CreatePersQueueGroup >> IndexBuildTest::IndexPartitioningIsPersisted [GOOD] >> TSchemeShardTest::PathErrors [GOOD] >> TSchemeShardTest::NestedDirs ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_locks/unittest >> TDataShardLocksTest::UseLocksCache [GOOD] Test command err: 2025-06-30T09:22:47.580123Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:47.580193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:47.580206Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003eec/r3tmp/tmpa4DjXq/pdisk_1.dat 2025-06-30T09:22:47.694102Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:47.695272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:47.717303Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:47.721305Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275367096656 != 1751275367096660 2025-06-30T09:22:47.766063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:47.766106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:47.776907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:47.863763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:47.886880Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:636:2537]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:47.887263Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:636:2537]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:47.887380Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:636:2537] 2025-06-30T09:22:47.887458Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:22:47.888775Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:622:2529], Recipient [1:638:2539]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:47.898944Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:636:2537]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:47.899137Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:622:2529], Recipient [1:638:2539]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:47.899331Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:638:2539] 2025-06-30T09:22:47.899385Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:22:47.900867Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:622:2529], Recipient [1:638:2539]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:47.901324Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:22:47.901370Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:22:47.901595Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:22:47.901611Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:22:47.901621Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:22:47.901698Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:22:47.901783Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:22:47.901803Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:669:2537] in generation 1 2025-06-30T09:22:47.901903Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:22:47.901923Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:22:47.902052Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-30T09:22:47.902060Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-30T09:22:47.902068Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-30T09:22:47.902102Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:22:47.902117Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:22:47.902127Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:670:2539] in generation 1 2025-06-30T09:22:47.913016Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:22:47.921484Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:22:47.921604Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:22:47.921639Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:673:2558] 2025-06-30T09:22:47.921647Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:22:47.921653Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:22:47.921660Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:22:47.921759Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:636:2537], Recipient [1:636:2537]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:22:47.921768Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:22:47.921891Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:22:47.921900Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-30T09:22:47.921913Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:22:47.921924Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:674:2559] 2025-06-30T09:22:47.921928Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:22:47.921933Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-30T09:22:47.921938Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:22:47.921988Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:22:47.922016Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:22:47.922028Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:638:2539], Recipient [1:638:2539]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:22:47.922033Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:22:47.922091Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:22:47.922101Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:22:47.922110Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:22:47.922117Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:22:47.922122Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:22:47.922129Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:22:47.922138Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:22:47.922153Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:653:2547], Recipient [1:636:2537]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:47.922159Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:47.922185Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:629:2533], serverId# [1:653:2547], sessionId# [0:0:0] 2025-06-30T09:22:47.922194Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-30T09:22:47.922207Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-30T09:22:47.922350Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:653:2547] 2025-06-30T09:22:47.922359Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:22:47.922384Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:22:47.922439Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:22:47.922452Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 28147497 ... node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2025-06-30T09:22:50.739916Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-30T09:22:50.739922Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:22:50.739928Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:22:50.739933Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:22:50.739948Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2025-06-30T09:22:50.739954Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-30T09:22:50.739958Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:22:50.739962Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:22:50.739971Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:22:50.739988Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-30T09:22:50.740042Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v2500/18446744073709551615 2025-06-30T09:22:50.740049Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is DelayComplete 2025-06-30T09:22:50.740054Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:22:50.740059Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:22:50.740062Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:22:50.740073Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-30T09:22:50.740077Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:22:50.740085Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037888 has finished 2025-06-30T09:22:50.740090Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-30T09:22:50.751010Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:22:50.751040Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2500:281474976715661] at 72075186224037888 on unit CompleteWrite 2025-06-30T09:22:50.751061Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2500 : 281474976715661] from 72075186224037888 at tablet 72075186224037888 send result to client [2:889:2676] 2025-06-30T09:22:50.751081Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037888 {TEvReadSet step# 2500 txid# 281474976715661 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-30T09:22:50.751090Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:22:50.751114Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:22:50.751119Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-30T09:22:50.751127Z node 2 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 2 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-30T09:22:50.751148Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:22:50.751166Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-30T09:22:50.751171Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:3] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:22:50.751180Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[2:955:2743], 0} after executionsCount# 1 2025-06-30T09:22:50.751194Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[2:955:2743], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:22:50.751213Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[2:955:2743], 0} finished in read 2025-06-30T09:22:50.751263Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:927:2728], Recipient [2:637:2538]: {TEvReadSet step# 2500 txid# 281474976715661 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-30T09:22:50.751271Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:22:50.751277Z node 2 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715661 2025-06-30T09:22:50.751606Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [2:955:2743], Recipient [2:637:2538]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 RangesSize: 1 2025-06-30T09:22:50.751641Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:955:2743], Recipient [2:927:2728]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-30T09:22:50.751677Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-30T09:22:50.751699Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-06-30T09:22:50.751713Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037889 on unit CheckRead 2025-06-30T09:22:50.751731Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037889 is Executed 2025-06-30T09:22:50.751736Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037889 executing on unit CheckRead 2025-06-30T09:22:50.751741Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-30T09:22:50.751747Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-30T09:22:50.751759Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037889 2025-06-30T09:22:50.751766Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037889 is Executed 2025-06-30T09:22:50.751769Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-30T09:22:50.751773Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037889 to execution unit ExecuteRead 2025-06-30T09:22:50.751777Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037889 on unit ExecuteRead 2025-06-30T09:22:50.751791Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 } 2025-06-30T09:22:50.751838Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v2500/18446744073709551615 2025-06-30T09:22:50.751843Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[2:955:2743], 1} after executionsCount# 1 2025-06-30T09:22:50.751849Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[2:955:2743], 1} sends rowCount# 2, bytes# 64, quota rows left# 997, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:22:50.751860Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[2:955:2743], 1} finished in read 2025-06-30T09:22:50.751868Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037889 is Executed 2025-06-30T09:22:50.751872Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037889 executing on unit ExecuteRead 2025-06-30T09:22:50.751876Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037889 to execution unit CompletedOperations 2025-06-30T09:22:50.751883Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037889 on unit CompletedOperations 2025-06-30T09:22:50.751892Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037889 is Executed 2025-06-30T09:22:50.751896Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037889 executing on unit CompletedOperations 2025-06-30T09:22:50.751901Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037889 has finished 2025-06-30T09:22:50.751906Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-30T09:22:50.751921Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-30T09:22:50.752185Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:955:2743], Recipient [2:637:2538]: NKikimrTxDataShard.TEvReadCancel ReadId: 1 2025-06-30T09:22:50.752196Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 1 } >> TSchemeShardTest::CreateDropSolomon [GOOD] >> TSchemeShardTest::CreateAlterDropSolomon >> TSchemeShardTest::AlterPersQueueGroup [GOOD] >> TSchemeShardTest::AlterPersQueueGroupWithKeySchema >> DataShardWrite::UpsertImmediate >> TSchemeShardTest::CreateAlterDropSolomon [GOOD] >> Yq_1::CreateConnection_With_Existing_Name >> DataShardWrite::ExecSQLUpsertImmediate+EvWrite ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::IndexPartitioningIsPersisted [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentMerge [GOOD] >> TSchemeShardTest::NestedDirs [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:22.103605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:22.103637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:22.103644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:22.103650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:22.103667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:22.103672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:22.103683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:22.103699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:22.103833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:22.103920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:22.122664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:22.122696Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:22.126868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:22.126935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:22.126977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:22.131020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:22.131108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:22.131213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.131287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:22.131935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:22.131994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:22.132268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:22.132276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:22.132299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:22.132312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:22.132317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:22.132333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.133877Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:22.162772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:22.162887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.162973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:22.162984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:22.163041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:22.163059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:22.164222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.164295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:22.164371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.164385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:22.164393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:22.164400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:22.165101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.165119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:22.165130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:22.165623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.165637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.165644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:22.165663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:22.166532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:22.167069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:22.167117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:22.167364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.167399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:22.167417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:22.167506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:22.167517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:22.167554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:22.167569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:22.168169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:22.168183Z node 1 :FLAT_TX_SCHEMESHARD ... ountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:51.234395Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:22:51.234420Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index" took 26us result status StatusSuccess 2025-06-30T09:22:51.234532Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index" PathDescription { Self { Name: "Index" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Index" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:51.234573Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:22:51.234608Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable" took 36us result status StatusSuccess 2025-06-30T09:22:51.234704Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "alice" } } Tuple { } } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "bob" } } Tuple { } } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\005\000\000\000alice\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409547 } TablePartitions { EndOfRangeKeyPrefix: "\002\000\003\000\000\000bob\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 3 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTest::AlterPersQueueGroupWithKeySchema [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentSplitMerge |82.4%| [TA] $(B)/ydb/core/tx/datashard/ut_locks/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::RejectAlterSolomon >> TSchemeShardTest::AlterBlockStoreVolume |82.4%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_locks/test-results/unittest/{meta.json ... results_accumulator.log} >> PrivateApi::PingTask [GOOD] >> PrivateApi::GetTask >> TSchemeShardTest::CreatePersQueueGroup [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-false [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true >> TSchemeShardTest::CreatePersQueueGroupWithKeySchema >> TSchemeShardTest::RejectAlterSolomon [GOOD] >> TSchemeShardTest::NewOwnerOnDatabase ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::CreateAlterDropSolomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:44.759288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:44.759316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.759324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:44.759329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:44.759336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:44.759341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:44.759350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.766282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:44.766450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:44.766558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:44.785365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:44.785389Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:44.788592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:44.788688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:44.788725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:44.792273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:44.792412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:44.792558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.792660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:44.801749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.801866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:44.802330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:44.802410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:44.802423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:44.802458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.804566Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:44.832532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:44.832615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.832677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:44.832688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:44.832753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:44.832770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:44.833563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:44.833656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:44.833674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:44.833681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:44.834443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.834459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:44.834466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:44.838210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.838235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.838243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.838254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:44.838981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:44.839551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:44.839585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:44.839757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.839786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:44.839795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.839867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:44.839875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.839912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:44.839922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:44.840429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.840442Z node 1 :FLAT_TX_SCHEMESHARD ... 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:51.648728Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-30T09:22:51.649528Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:22:51.649542Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:22:51.649547Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:22:51.649551Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:22:51.649738Z node 16 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 Forgetting tablet 72075186233409548 2025-06-30T09:22:51.649820Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-30T09:22:51.649896Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:22:51.649985Z node 16 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-30T09:22:51.650027Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:51.650057Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409546 2025-06-30T09:22:51.650371Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:22:51.650461Z node 16 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-30T09:22:51.650498Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-30T09:22:51.650536Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:22:51.650713Z node 16 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-30T09:22:51.650777Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 Forgetting tablet 72075186233409549 2025-06-30T09:22:51.650814Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-30T09:22:51.650845Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 Forgetting tablet 72075186233409547 2025-06-30T09:22:51.651038Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:51.651045Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:22:51.651058Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:51.651562Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-30T09:22:51.651576Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-30T09:22:51.652167Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-30T09:22:51.652183Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-30T09:22:51.652204Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-30T09:22:51.652212Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-30T09:22:51.652222Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:22:51.652228Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-30T09:22:51.652274Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-30T09:22:51.652339Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-30T09:22:51.652346Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-30T09:22:51.652415Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-30T09:22:51.652437Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:22:51.652443Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [16:545:2497] TestWaitNotification: OK eventTxId 103 2025-06-30T09:22:51.652519Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Solomon" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:51.652555Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Solomon" took 50us result status StatusPathDoesNotExist 2025-06-30T09:22:51.652593Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Solomon\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Solomon" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted wait until 72075186233409549 is deleted 2025-06-30T09:22:51.652657Z node 16 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-30T09:22:51.652668Z node 16 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-30T09:22:51.652677Z node 16 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 2025-06-30T09:22:51.652685Z node 16 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409549 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 Deleted tabletId 72075186233409549 2025-06-30T09:22:51.652767Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:51.652794Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 30us result status StatusSuccess 2025-06-30T09:22:51.652888Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DataShardWrite::UpsertWithDefaults [GOOD] >> DataShardWrite::WriteImmediateBadRequest >> TSchemeShardTest::AlterBlockStoreVolume [GOOD] >> TSchemeShardTest::AlterBlockStoreVolumeWithNonReplicatedPartitions >> TOlapReboots::CreateDropStore [GOOD] >> TSchemeShardTest::NewOwnerOnDatabase [GOOD] >> TSchemeShardTest::PreserveColumnOrder >> DataShardWrite::UpsertPrepared+Volatile >> Yq_1::Basic [GOOD] >> TSchemeShardTest::CreatePersQueueGroupWithKeySchema [GOOD] >> TSequenceReboots::CreateMultipleSequencesHaveInitialSequenceShard >> TSchemeShardTest::AlterBlockStoreVolumeWithNonReplicatedPartitions [GOOD] >> TSchemeShardTest::AdoptDropSolomon >> TSchemeShardTest::CreateTableWithCompactionStrategies >> Yq_1::Basic_EmptyList >> TSchemeShardTest::PreserveColumnOrder [GOOD] >> TPersQueueTest::DirectReadPreCached >> TPersQueueTest::FetchRequest >> DataShardWrite::UpsertImmediate [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentSplitMerge [GOOD] >> TSchemeShardTest::CopyTableForBackup >> DataShardWrite::UpsertImmediateManyColumns >> TSchemeShardTest::CreateTableWithCompactionStrategies [GOOD] >> TSchemeShardTest::AdoptDropSolomon [GOOD] >> TSchemeShardTest::AlterTableAndAfterSplit >> TSchemeShardTest::CreateWithIntermediateDirs >> TPersQueueTest::BadTopic >> DataShardWrite::ExecSQLUpsertImmediate+EvWrite [GOOD] >> DataShardWrite::ExecSQLUpsertImmediate-EvWrite >> Yq_1::ListConnections [GOOD] >> SystemView::ShowCreateTableColumnAlterColumn [GOOD] >> DataShardWrite::WriteImmediateBadRequest [GOOD] >> TSchemeShardTest::CreateWithIntermediateDirs [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true [GOOD] >> Yq_1::ListConnectionsOnEmptyConnectionsTable >> TSchemeShardTest::CreateTopicOverDiskSpaceQuotas >> TSchemeShardTest::CreateTopicOverDiskSpaceQuotas [GOOD] >> TSchemeShardTest::CreateSystemColumn >> SystemView::ShowCreateTableColumnUpsertOptions |82.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |82.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut >> DataShardWrite::UpsertPrepared+Volatile [GOOD] >> DataShardWrite::WriteImmediateSeveralOperations >> DataShardWrite::UpsertPrepared-Volatile >> TSchemeShardTest::AlterTableAndAfterSplit [GOOD] >> TSchemeShardTest::AlterIndexTableDirectly >> Yq_1::ModifyConnections [GOOD] >> Yq_1::ModifyQuery |82.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_stream_creator/unittest |82.4%| [LD] {RESULT} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::PreserveColumnOrder [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:44.760399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:44.760425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.760431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:44.760436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:44.760443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:44.760446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:44.760455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.766319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:44.766453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:44.766526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:44.788066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:44.788088Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:44.791900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:44.791964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:44.791990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:44.793643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:44.793735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:44.793840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.793893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:44.801627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.801709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:44.802035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:44.802084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:44.802091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:44.802113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.803764Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:44.820551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:44.820610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.820657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:44.820663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:44.820702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:44.820711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:44.821465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.821516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:44.821585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.821597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:44.821603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:44.821610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:44.822126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.822140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:44.822159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:44.822646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.822662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.822669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.822677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:44.832709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:44.833384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:44.833426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:44.833617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.833754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:44.833766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.833804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:44.833818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:44.834330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.834339Z node 1 :FLAT_TX_SCHEMESHARD ... 33409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 407 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-30T09:22:52.705416Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 407 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-30T09:22:52.705702Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 64424511736 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-30T09:22:52.705725Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-30T09:22:52.705740Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 64424511736 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-30T09:22:52.705749Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-30T09:22:52.705758Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 311 RawX2: 64424511736 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-30T09:22:52.705771Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:52.705776Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:22:52.705781Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:22:52.705787Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 101:0 129 -> 240 2025-06-30T09:22:52.706572Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:22:52.706626Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:22:52.706782Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:22:52.706813Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:22:52.706885Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:22:52.706895Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-30T09:22:52.706911Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:22:52.706920Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:22:52.706926Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:22:52.706931Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:22:52.706937Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-30T09:22:52.706955Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [15:340:2317] message: TxId: 101 2025-06-30T09:22:52.706962Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:22:52.706968Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-30T09:22:52.706974Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 101:0 2025-06-30T09:22:52.706999Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:22:52.707517Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-30T09:22:52.707534Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [15:341:2318] TestWaitNotification: OK eventTxId 101 2025-06-30T09:22:52.707668Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:52.707742Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 84us result status StatusSuccess 2025-06-30T09:22:52.707938Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "col01" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "col02" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "col03" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "col04" Type: "Utf8" TypeId: 4608 Id: 4 NotNull: false IsBuildInProgress: false } Columns { Name: "col05" Type: "Utf8" TypeId: 4608 Id: 5 NotNull: false IsBuildInProgress: false } Columns { Name: "col06" Type: "Utf8" TypeId: 4608 Id: 6 NotNull: false IsBuildInProgress: false } Columns { Name: "col07" Type: "Utf8" TypeId: 4608 Id: 7 NotNull: false IsBuildInProgress: false } Columns { Name: "col08" Type: "Utf8" TypeId: 4608 Id: 8 NotNull: false IsBuildInProgress: false } Columns { Name: "col09" Type: "Utf8" TypeId: 4608 Id: 9 NotNull: false IsBuildInProgress: false } Columns { Name: "col10" Type: "Utf8" TypeId: 4608 Id: 10 NotNull: false IsBuildInProgress: false } Columns { Name: "col11" Type: "Utf8" TypeId: 4608 Id: 11 NotNull: false IsBuildInProgress: false } Columns { Name: "col12" Type: "Utf8" TypeId: 4608 Id: 12 NotNull: false IsBuildInProgress: false } Columns { Name: "col13" Type: "Utf8" TypeId: 4608 Id: 13 NotNull: false IsBuildInProgress: false } Columns { Name: "col14" Type: "Utf8" TypeId: 4608 Id: 14 NotNull: false IsBuildInProgress: false } Columns { Name: "col15" Type: "Utf8" TypeId: 4608 Id: 15 NotNull: false IsBuildInProgress: false } Columns { Name: "col16" Type: "Utf8" TypeId: 4608 Id: 16 NotNull: false IsBuildInProgress: false } Columns { Name: "col17" Type: "Utf8" TypeId: 4608 Id: 17 NotNull: false IsBuildInProgress: false } Columns { Name: "col18" Type: "Utf8" TypeId: 4608 Id: 18 NotNull: false IsBuildInProgress: false } Columns { Name: "col19" Type: "Utf8" TypeId: 4608 Id: 19 NotNull: false IsBuildInProgress: false } Columns { Name: "col20" Type: "Utf8" TypeId: 4608 Id: 20 NotNull: false IsBuildInProgress: false } Columns { Name: "col21" Type: "Utf8" TypeId: 4608 Id: 21 NotNull: false IsBuildInProgress: false } Columns { Name: "col22" Type: "Utf8" TypeId: 4608 Id: 22 NotNull: false IsBuildInProgress: false } Columns { Name: "col23" Type: "Utf8" TypeId: 4608 Id: 23 NotNull: false IsBuildInProgress: false } Columns { Name: "col24" Type: "Utf8" TypeId: 4608 Id: 24 NotNull: false IsBuildInProgress: false } Columns { Name: "col25" Type: "Utf8" TypeId: 4608 Id: 25 NotNull: false IsBuildInProgress: false } Columns { Name: "col26" Type: "Utf8" TypeId: 4608 Id: 26 NotNull: false IsBuildInProgress: false } Columns { Name: "col27" Type: "Utf8" TypeId: 4608 Id: 27 NotNull: false IsBuildInProgress: false } Columns { Name: "col28" Type: "Utf8" TypeId: 4608 Id: 28 NotNull: false IsBuildInProgress: false } Columns { Name: "col29" Type: "Utf8" TypeId: 4608 Id: 29 NotNull: false IsBuildInProgress: false } KeyColumnNames: "col01" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |82.4%| [TA] {RESULT} $(B)/ydb/core/tablet_flat/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:44.760304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:44.760327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.760333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:44.760338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:44.760344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:44.760348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:44.760357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.766284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:44.766426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:44.766516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:44.788998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:44.789029Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:44.793069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:44.793150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:44.793180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:44.796216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:44.796309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:44.796423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.796501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:44.801635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.801780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:44.802216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:44.802264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:44.802270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:44.802290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.804309Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:44.828438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:44.828534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.828613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:44.828621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:44.828672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:44.828686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:44.829537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.829584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:44.829632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.829641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:44.829645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:44.829650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:44.830113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.830127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:44.830134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:44.830721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.830762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.830770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.830779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:44.832975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:44.833681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:44.833730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:44.833916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.834028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:44.834039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.834067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:44.834079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:44.834761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.834774Z node 1 :FLAT_TX_SCHEMESHARD ... CL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 } ChildrenExist: true } Children { Name: "DirB" PathId: 30 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "Table2" PathId: 32 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 29 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:53.503570Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:53.503608Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" took 41us result status StatusSuccess 2025-06-30T09:22:53.503719Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" PathDescription { Self { Name: "Table2" PathId: 32 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 4 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 32 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:53.503839Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:53.503861Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" took 25us result status StatusSuccess 2025-06-30T09:22:53.503922Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" PathDescription { Self { Name: "DirB" PathId: 30 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "Table3" PathId: 33 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 30 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 30 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:53.504047Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:53.504074Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" took 30us result status StatusSuccess 2025-06-30T09:22:53.504159Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" PathDescription { Self { Name: "Table3" PathId: 33 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 30 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 33 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DataShardWrite::UpsertImmediateManyColumns [GOOD] >> DataShardWrite::ReplaceImmediate >> TSchemeShardTest::CreateSystemColumn [GOOD] >> Yq_1::CreateConnection_With_Existing_Name [GOOD] >> Yq_1::CreateConnections_With_Idempotency >> PrivateApi::GetTask [GOOD] >> PrivateApi::Nodes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::CreateDropStore [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:32.463628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:32.463657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:32.463664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:32.463671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:32.463686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:32.463691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:32.463703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:32.463720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:32.463842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:32.463922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:32.481789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:32.481815Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:32.481958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:32.485436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:32.486664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:32.486701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:32.488649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:32.488697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:32.488798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:32.488855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:32.489897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:32.489947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:32.490173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:32.490181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:32.490189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:32.490195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:32.490200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:32.490234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:32.491678Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:32.512971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:32.513057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.513109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:32.513116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:32.513162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:32.513179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:32.513842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:32.513882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:32.513931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.513949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:32.513953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:32.513957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:32.514418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.514432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:32.514438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:32.515420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.515439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:32.515446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:32.515456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:32.516100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:32.516616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:32.516664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:32.516884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:32.516910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... t schemeshard: 72057594046678944, txId: 1003, path id: 1 2025-06-30T09:22:52.261116Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [193:212:2212], at schemeshard: 72057594046678944, txId: 1003, path id: 3 FAKE_COORDINATOR: Erasing txId 1003 2025-06-30T09:22:52.261189Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:52.261197Z node 193 :FLAT_TX_SCHEMESHARD INFO: drop_store.cpp:182: TDropOlapStore TProposedWaitParts operationId# 1003:0 ProgressState at schemeshard: 72057594046678944 2025-06-30T09:22:52.261207Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: drop_store.cpp:202: TDropOlapStore TProposedWaitParts operationId# 1003:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-06-30T09:22:52.261330Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:52.261346Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:52.261351Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:52.261356Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-30T09:22:52.261362Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:22:52.261435Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:52.261447Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-30T09:22:52.261452Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-30T09:22:52.261457Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-30T09:22:52.261462Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:52.261476Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-30T09:22:52.262401Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1003:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-30T09:22:52.262431Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1003, partId: 0, tablet: 72075186233409546 2025-06-30T09:22:52.262516Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6237: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1003 2025-06-30T09:22:52.262523Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-30T09:22:52.262537Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1003 2025-06-30T09:22:52.262547Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1003:0 129 -> 130 2025-06-30T09:22:52.262819Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:52.262991Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:22:52.263097Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:52.263127Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-30T09:22:52.263135Z node 193 :FLAT_TX_SCHEMESHARD INFO: drop_store.cpp:235: TDropOlapStore TProposedDeleteParts operationId# 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:52.263150Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:22:52.263181Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:52.263205Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:52.263212Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-30T09:22:52.263216Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:52.263222Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-30T09:22:52.263227Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-30T09:22:52.263233Z node 193 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-30T09:22:52.263238Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1003:0 2025-06-30T09:22:52.263265Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:22:52.263728Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:22:52.263828Z node 193 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-30T09:22:52.263911Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:52.264121Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:22:52.264272Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:22:52.264283Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:22:52.264299Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:52.264430Z node 193 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409546;self_id=[193:340:2325];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; Forgetting tablet 72075186233409546 2025-06-30T09:22:52.265819Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-30T09:22:52.265833Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-30T09:22:52.266252Z node 193 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 1003 2025-06-30T09:22:52.266329Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:22:52.266338Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:22:52.266411Z node 193 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:22:52.266438Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:22:52.266444Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [193:457:2426] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:22:52.266532Z node 193 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:22:52.266575Z node 193 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore" took 61us result status StatusPathDoesNotExist 2025-06-30T09:22:52.266622Z node 193 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/OlapStore\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/OlapStore" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> DataShardWrite::WriteImmediateSeveralOperations [GOOD] >> DataShardWrite::UpsertPreparedManyTables-Volatile >> BsControllerConfig::MergeIntersectingBoxes [GOOD] >> BsControllerConfig::MoveGroups >> TxUsage::Sinks_Oltp_WriteToTopics_4_Table [GOOD] >> DataShardWrite::UpsertPrepared-Volatile [GOOD] >> DataShardWrite::UpsertPreparedManyTables+Volatile >> DataShardWrite::ExecSQLUpsertImmediate-EvWrite [GOOD] >> DataShardWrite::DeleteImmediate ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::CreateSystemColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:44.758112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:44.758130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.758135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:44.758139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:44.758145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:44.758148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:44.758157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.766241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:44.766364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:44.766442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:44.784352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:44.784375Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:44.787393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:44.787451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:44.787477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:44.789555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:44.789642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:44.789771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.789834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:44.801553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.801673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:44.802032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:44.802072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:44.802078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:44.802095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.803693Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:44.827255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:44.827334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.827400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:44.827410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:44.827467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:44.827481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:44.828282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.828309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:44.828340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.828349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:44.828354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:44.828358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:44.828894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.828905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:44.828911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:44.829342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.829354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.829361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.829370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:44.832727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:44.833340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:44.833368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:44.833516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.833612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:44.833621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.833661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:44.833673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:44.834220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.834229Z node 1 :FLAT_TX_SCHEMESHARD ... ached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: SystemColumnInCopyAllowed, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:22:53.936491Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-30T09:22:53.936496Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:22:53.936504Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 103:0 type: TxCopyTable target path: [OwnerId: 72057594046678944, LocalPathId: 3] source path: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:22:53.936511Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-30T09:22:53.936535Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:22:53.936555Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:53.936683Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:22:53.936692Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:22:53.937142Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944 PathId: 3, at schemeshard: 72057594046678944 2025-06-30T09:22:53.937190Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /MyRoot/SystemColumnInCopyAllowed 2025-06-30T09:22:53.937227Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:53.937232Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:53.937266Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:22:53.937279Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:53.937283Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:212:2212], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-06-30T09:22:53.937293Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [16:212:2212], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-30T09:22:53.937363Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:22:53.937374Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 103:0 ProgressState, operation type: TxCopyTable, at tablet# 72057594046678944 2025-06-30T09:22:53.937418Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 103:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046678944 OwnerIdx: 2 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } 2025-06-30T09:22:53.937590Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:22:53.937602Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:22:53.937606Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-30T09:22:53.937610Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 6 2025-06-30T09:22:53.937614Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:22:53.937783Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 1 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:22:53.937795Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 1 PathOwnerId: 72057594046678944, cookie: 103 2025-06-30T09:22:53.937798Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-30T09:22:53.937801Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 1 2025-06-30T09:22:53.937805Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:22:53.937816Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-30T09:22:53.938303Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 103:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:2 msg type: 268697601 2025-06-30T09:22:53.938336Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 103, partId: 0, tablet: 72057594037968897 2025-06-30T09:22:53.938341Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1790: TOperation RegisterRelationByShardIdx, TxId: 103, shardIdx: 72057594046678944:2, partId: 0 2025-06-30T09:22:53.938640Z node 16 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72057594046678944 OwnerIdx: 2 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } 2025-06-30T09:22:53.938707Z node 16 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72057594046678944, OwnerIdx 2, type DataShard, boot OK, tablet id 72075186233409547 2025-06-30T09:22:53.938758Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5958: Handle TEvCreateTabletReply at schemeshard: 72057594046678944 message: Status: OK Owner: 72057594046678944 OwnerIdx: 2 TabletID: 72075186233409547 Origin: 72057594037968897 2025-06-30T09:22:53.938767Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1804: TOperation FindRelatedPartByShardIdx, TxId: 103, shardIdx: 72057594046678944:2, partId: 0 2025-06-30T09:22:53.938786Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: Status: OK Owner: 72057594046678944 OwnerIdx: 2 TabletID: 72075186233409547 Origin: 72057594037968897 2025-06-30T09:22:53.938792Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 103:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046678944 2025-06-30T09:22:53.938797Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:180: TCreateParts opId# 103:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72057594046678944 OwnerIdx: 2 TabletID: 72075186233409547 Origin: 72057594037968897 2025-06-30T09:22:53.938815Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 103:0 2 -> 3 2025-06-30T09:22:53.939043Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:22:53.941536Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-30T09:22:53.942309Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:22:53.942421Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-30T09:22:53.942432Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_copy_table.cpp:71: TCopyTable TConfigureParts operationId# 103:0 ProgressState at tablet# 72057594046678944 2025-06-30T09:22:53.942446Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_copy_table.cpp:103: TCopyTable TConfigureParts operationId# 103:0 Propose modify scheme on dstDatashard# 72075186233409547 idx# 72057594046678944:2 srcDatashard# 72075186233409546 idx# 72057594046678944:1 operationId# 103:0 seqNo# 2:2 at tablet# 72057594046678944 2025-06-30T09:22:53.943587Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 103:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2025-06-30T09:22:53.943635Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 103:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 269549568 2025-06-30T09:22:53.943668Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 103, partId: 0, tablet: 72075186233409547 2025-06-30T09:22:53.943673Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 103, partId: 0, tablet: 72075186233409546 TestModificationResult got TxId: 103, wait until txId: 103 >> TxUsage::Sinks_Oltp_WriteToTopics_4_Query >> DataShardWrite::ReplaceImmediate [GOOD] >> DataShardWrite::ReplaceImmediate_DefaultValue >> KqpBatchUpdate::SimpleOnePartition >> BsControllerConfig::ExtendByCreatingSeparateBox [GOOD] >> BsControllerConfig::ExtendBoxAndStoragePool |82.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/batch_operations/unittest |82.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> SystemView::TopPartitionsByTliFields [GOOD] >> ViewQuerySplit::Basic [GOOD] >> ViewQuerySplit::WithPragmaTablePathPrefix [GOOD] >> ViewQuerySplit::WithPairedPragmaTablePathPrefix [GOOD] >> ViewQuerySplit::WithComments [GOOD] >> ViewQuerySplit::Joins [GOOD] >> Yq_1::ListConnectionsOnEmptyConnectionsTable [GOOD] >> SystemView::ShowCreateTableSequences [GOOD] >> SystemView::ShowCreateTablePartitionPolicyIndexTable >> DataShardWrite::DeleteImmediate [GOOD] >> DataShardWrite::CancelImmediate >> DataShardWrite::UpsertPreparedManyTables-Volatile [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache+Volatile >> DataShardWrite::ReplaceImmediate_DefaultValue [GOOD] >> DataShardWrite::UpdateImmediate >> KqpBatchUpdate::ManyPartitions_1 >> DataShardWrite::UpsertPreparedManyTables+Volatile [GOOD] >> DataShardWrite::UpsertNoLocksArbiter ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/fq/ut_integration/unittest >> Yq_1::ListConnectionsOnEmptyConnectionsTable [GOOD] Test command err: 2025-06-30T09:22:51.047112Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670445739400532:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:51.047232Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0630 09:22:51.090567665 360802 dns_resolver_ares.cc:452] no server name supplied in dns URI E0630 09:22:51.090616055 360802 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-30T09:22:51.092849Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:15754: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:15754 } ] test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004380/r3tmp/tmpaeHhTx/pdisk_1.dat 2025-06-30T09:22:51.593225Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670445739400801:2272], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:51.593280Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 15754, node 1 2025-06-30T09:22:51.669520Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TClient is connected to server localhost:25772 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:51.768370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:51.776675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:51.776715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:51.778260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:51.779065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:51.948786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:51.948802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:51.948804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:51.948873Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:51.950806Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:52.047507Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:52.114875Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-30T09:22:52.114890Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-30T09:22:52.114892Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-30T09:22:52.115060Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-30T09:22:52.115063Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-30T09:22:52.115065Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-30T09:22:52.115188Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/queries". Create session OK 2025-06-30T09:22:52.115191Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/queries" 2025-06-30T09:22:52.115192Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/queries" 2025-06-30T09:22:52.115228Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/compute_databases". Create session OK 2025-06-30T09:22:52.115232Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/compute_databases" 2025-06-30T09:22:52.115233Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/compute_databases" 2025-06-30T09:22:52.116957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.117248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.117568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.118853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.119025Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-30T09:22:52.119030Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-30T09:22:52.119031Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-30T09:22:52.119371Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/jobs". Create session OK 2025-06-30T09:22:52.119373Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/jobs" 2025-06-30T09:22:52.119374Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/jobs" 2025-06-30T09:22:52.119457Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-30T09:22:52.119459Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-30T09:22:52.119460Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-30T09:22:52.119514Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-30T09:22:52.119516Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-30T09:22:52.119516Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-30T09:22:52.119561Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-30T09:22:52.119562Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-30T09:22:52.119563Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" 2025-06-30T09:22:52.120105Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/quotas". Create session OK 2025-06-30T09:22:52.120111Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/quotas" 2025-06-30T09:22:52.120112Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/quotas" 2025-06-30T09:22:52.120242Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-30T09:22:52.120244Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-30T09:22:52.120246Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-30T09:22:52.120315Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-30T09:22:52.120317Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-30T09:22:52.120317Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-30T09:22:52.120483Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-30T09:22:52.120485Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-30T09:22:52.120486Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-30T09:22:52.122104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-0 ... /Root. Database : . PoolId : default. }. Update output channelId: 1, peer: [4:7521670459891643795:2472] 2025-06-30T09:22:54.806839Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-06-30T09:22:54.806843Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 1 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-06-30T09:22:54.806853Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670459891643794:2471], TxId: 281474976715685, task: 1. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-30T09:22:54.806864Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7521670459891643794:2471], TxId: 281474976715685, task: 1. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 DstTaskId: 2 SrcEndpoint { ActorId { RawX1: 7521670459891643794 RawX2: 4503616807242151 } } DstEndpoint { ActorId { RawX1: 7521670459891643795 RawX2: 4503616807242152 } } InMemory: true DstStageId: 1 } 2025-06-30T09:22:54.806866Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-06-30T09:22:54.806868Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 1 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-06-30T09:22:54.806873Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670459891643794:2471], TxId: 281474976715685, task: 1. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:22:54.806874Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-06-30T09:22:54.806876Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 1 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-06-30T09:22:54.807073Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:958: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. Recv TEvReadResult from ShardID=72075186224037898, ReadId=0, Status=SUCCESS, Finished=1, RowCount=0, TxLocks= , BrokenTxLocks= 2025-06-30T09:22:54.807083Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1050: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. Taken 0 locks 2025-06-30T09:22:54.807087Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1064: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. new data for read #0 seqno = 1 finished = 1 2025-06-30T09:22:54.807093Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670459891643794:2471], TxId: 281474976715685, task: 1. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 276037645 2025-06-30T09:22:54.807098Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670459891643794:2471], TxId: 281474976715685, task: 1. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:22:54.807103Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. enter getasyncinputdata results size 1, freeSpace 8388608 2025-06-30T09:22:54.807106Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1227: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. enter pack cells method shardId: 72075186224037898 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-30T09:22:54.807109Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1308: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. exit pack cells method shardId: 72075186224037898 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-30T09:22:54.807111Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1365: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. returned 0 rows; processed 0 rows 2025-06-30T09:22:54.807125Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1402: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. dropping batch for read #0 2025-06-30T09:22:54.807127Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. effective maxinflight 1 sorted 1 2025-06-30T09:22:54.807129Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-06-30T09:22:54.807132Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715685, task: 1, CA Id [4:7521670459891643794:2471]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-30T09:22:54.807162Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7521670459891643794:2471], TxId: 281474976715685, task: 1. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-30T09:22:54.807170Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670459891643795:2472], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646923 2025-06-30T09:22:54.807177Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715685, task: 2. Finish input channelId: 1, from: [4:7521670459891643794:2471] 2025-06-30T09:22:54.807183Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670459891643795:2472], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:22:54.807191Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7521670459891643795:2472], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-30T09:22:54.807194Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670459891643794:2471], TxId: 281474976715685, task: 1. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646927 2025-06-30T09:22:54.807198Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670459891643794:2471], TxId: 281474976715685, task: 1. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:22:54.807202Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715685, task: 1. Tasks execution finished 2025-06-30T09:22:54.807205Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7521670459891643794:2471], TxId: 281474976715685, task: 1. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-30T09:22:54.807240Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715685, task: 1. pass away 2025-06-30T09:22:54.807274Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715685;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-30T09:22:54.807367Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670459891643795:2472], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:22:54.807374Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715685, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-30T09:22:54.807375Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715685, task: 2. Tasks execution finished 2025-06-30T09:22:54.807377Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7521670459891643795:2472], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jz02b2568bpaha9d349fs3q5. SessionId : ydb://session/3?node_id=4&id=NTIxZDE3NmMtNWFjMzU5YTQtYTA2MjEwZWUtZDI5ZjJmYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-30T09:22:54.807386Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715685, task: 2. pass away 2025-06-30T09:22:54.807392Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715685;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-30T09:22:54.991405Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: Client is stopped >> KqpBatchUpdate::Large_1 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> ViewQuerySplit::Joins [GOOD] Test command err: 2025-06-30T09:22:16.313980Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670296025769628:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:16.314249Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0030aa/r3tmp/tmpbhkMg4/pdisk_1.dat 2025-06-30T09:22:16.405850Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:16.415604Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:16.415644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 12869, node 1 2025-06-30T09:22:16.417974Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:16.430900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:16.430915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:16.430917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:16.430969Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20368 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:16.472157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:16.476146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:22:16.757236Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670296025770178:2287], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.757260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670296025770185:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.757268Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.758198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:16.760233Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670296025770207:2291], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-30T09:22:16.850057Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670296025770258:2326] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:16.945826Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521670296025770293:2286] TxId: 281474976710661. Ctx: { TraceId: 01jz029ws1fjbrryd2wgnnvtdz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTdiZGVkOWEtNTFkOTJlOTUtYjk3OTMxNWMtZmI0NmU3Njg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database 2025-06-30T09:22:16.945924Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jz029ws1fjbrryd2wgnnvtdz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTdiZGVkOWEtNTFkOTJlOTUtYjk3OTMxNWMtZmI0NmU3Njg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:16.953242Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7521670296025770300:2299], owner: [1:7521670296025770297:2297], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:16.953659Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7521670296025770300:2299], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:16.955622Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7521670296025770300:2299], row count: 1, finished: 1 2025-06-30T09:22:16.955675Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7521670296025770300:2299], owner: [1:7521670296025770297:2297], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:16.957621Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275336945, txId: 281474976710660] shutting down 2025-06-30T09:22:17.315992Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:18.026529Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jz029y7c0knnmnq3f62xg28a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmM0NTVkNzQtMTFjMTg2ODYtZjcwZTI4OWUtY2UzZjA5NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:18.027479Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7521670304615704944:2313], owner: [1:7521670304615704941:2311], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:18.031088Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7521670304615704944:2313], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:18.031271Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7521670304615704944:2313], row count: 1, finished: 1 2025-06-30T09:22:18.031308Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7521670304615704944:2313], owner: [1:7521670304615704941:2311], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:18.032265Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275338025, txId: 281474976710662] shutting down 2025-06-30T09:22:19.076927Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jz029z8vbpetzez24j91c9cq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDVlNTU0N2YtZWUyYTM0MzgtZWNhMGMyMjMtZTEyYmNlMjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:19.077795Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7521670308910672277:2324], owner: [1:7521670308910672274:2322], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:19.083157Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7521670308910672277:2324], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:19.083341Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7521670308910672277:2324], row count: 1, finished: 1 2025-06-30T09:22:19.083386Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7521670308910672277:2324], owner: [1:7521670308910672274:2322], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:19.084633Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275339076, txId: 281474976710664] shutting down 2025-06-30T09:22:20.112317Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jz02a09p68vj9cfc80efs4vw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGRjOWY4YjMtYzkxNjRmNzAtNzFhZGQ0My0yZjIwM2FhNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:20.113097Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7521670313205639610:2335], owner: [1:7521670313205639607:2333], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:22:20.113286Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7521670313205639610:2335], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:22:20.113389Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl. ... To { IntervalEndUs: 18446744073709551615 Rank: 4294967295 } InclusiveTo: true Type: TOP_PARTITIONS_BY_TLI_ONE_MINUTE , rows# 5, bytes# 315, next# 2025-06-30T09:22:54.447481Z node 12 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [12:7521670459760197688:2419], row count: 5, finished: 1 2025-06-30T09:22:54.447530Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [12:7521670459760197688:2419], owner: [12:7521670459760197684:2417], scan id: 0, sys view info: Type: ETopPartitionsByTliOneMinute SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-30T09:22:54.462007Z node 15 :SYSTEM_VIEWS DEBUG: partition_stats.cpp:510: NSysView::TPartitionStatsCollector: TEvProcessOverloaded , top size by CPU # 1, top size by TLI # 1, time# 2025-06-30T09:22:54.461837Z 2025-06-30T09:22:54.462498Z node 15 :SYSTEM_VIEWS DEBUG: tx_top_partitions.cpp:125: [72075186224037893] TTxTopPartitions::Execute: , partition by CPU count# 1, partition by TLI count# 1 2025-06-30T09:22:54.471454Z node 15 :SYSTEM_VIEWS DEBUG: tx_top_partitions.cpp:137: [72075186224037893] TTxTopPartitions::Complete 2025-06-30T09:22:54.501669Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715684. Ctx: { TraceId: 01jz02b1q32gb4hxybpc0m1nq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=YjVkMWM4MWMtMmIzNTFjYWUtODZiOTYyNGYtYjNlMGU2MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:54.503399Z node 12 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275374366, txId: 281474976715682] shutting down 2025-06-30T09:22:54.537236Z node 14 :SYSTEM_VIEWS DEBUG: partition_stats.cpp:510: NSysView::TPartitionStatsCollector: TEvProcessOverloaded , top size by CPU # 1, top size by TLI # 1, time# 2025-06-30T09:22:54.537091Z 2025-06-30T09:22:54.537434Z node 14 :SYSTEM_VIEWS DEBUG: tx_top_partitions.cpp:125: [72075186224037899] TTxTopPartitions::Execute: , partition by CPU count# 1, partition by TLI count# 1 2025-06-30T09:22:54.551296Z node 14 :SYSTEM_VIEWS DEBUG: tx_top_partitions.cpp:137: [72075186224037899] TTxTopPartitions::Complete 2025-06-30T09:22:54.561221Z node 14 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:14: [72075186224037899] TTxAggregate::Execute 2025-06-30T09:22:54.561248Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:136: [72075186224037899] PersistQueryResults: interval end# 2025-06-30T09:22:54.000000Z, query count# 0 2025-06-30T09:22:54.561256Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 8, interval end# 2025-06-30T09:22:54.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.561260Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 10, interval end# 2025-06-30T09:22:54.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.561263Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 12, interval end# 2025-06-30T09:22:54.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.561268Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 14, interval end# 2025-06-30T09:22:54.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.561271Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 9, interval end# 2025-06-30T10:00:00.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.561275Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 11, interval end# 2025-06-30T10:00:00.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.561278Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 13, interval end# 2025-06-30T10:00:00.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.561281Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 15, interval end# 2025-06-30T10:00:00.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.561925Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715686. Ctx: { TraceId: 01jz02b1xacq5h19ke4zv4rxba, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=ODAyM2E1ZjItNjU0YmQwODItZTJhYzExZDAtMThjN2M1NTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:54.562805Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [12:7521670459760197727:2429], owner: [12:7521670459760197723:2427], scan id: 0, sys view info: Type: ETopPartitionsByTliOneMinute SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-30T09:22:54.563430Z node 15 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:14: [72075186224037893] TTxAggregate::Execute 2025-06-30T09:22:54.563459Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:136: [72075186224037893] PersistQueryResults: interval end# 2025-06-30T09:22:54.000000Z, query count# 0 2025-06-30T09:22:54.563465Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 8, interval end# 2025-06-30T09:22:54.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.563469Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 10, interval end# 2025-06-30T09:22:54.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.563473Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 12, interval end# 2025-06-30T09:22:54.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.563476Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 14, interval end# 2025-06-30T09:22:54.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.563489Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 9, interval end# 2025-06-30T10:00:00.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.563493Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 11, interval end# 2025-06-30T10:00:00.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.563496Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 13, interval end# 2025-06-30T10:00:00.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.563500Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 15, interval end# 2025-06-30T10:00:00.000000Z, query count# 0, persisted# 0 2025-06-30T09:22:54.567085Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:641: [72075186224037893] Reply batch: range# From { IntervalEndUs: 1751275374000000 Rank: 0 } InclusiveFrom: true To { IntervalEndUs: 1751275374000000 Rank: 4294967295 } InclusiveTo: true Type: TOP_PARTITIONS_BY_TLI_ONE_MINUTE , rows# 1, bytes# 63, next# 2025-06-30T09:22:54.567820Z node 15 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:110: [72075186224037893] TTxAggregate::Complete 2025-06-30T09:22:54.568395Z node 14 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:110: [72075186224037899] TTxAggregate::Complete 2025-06-30T09:22:54.566688Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [12:7521670459760197727:2429], schemeshard id: 72075186224037888, hive id: 72057594037968897, database: /Root/Tenant1, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 2], database node count: 2 2025-06-30T09:22:54.567487Z node 12 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [12:7521670459760197727:2429], row count: 1, finished: 1 2025-06-30T09:22:54.567519Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [12:7521670459760197727:2429], owner: [12:7521670459760197723:2427], scan id: 0, sys view info: Type: ETopPartitionsByTliOneMinute SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-30T09:22:54.568584Z node 12 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275374560, txId: 281474976715685] shutting down 2025-06-30T09:22:54.583477Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-06-30T09:22:54.583683Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:54.583723Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 13 2025-06-30T09:22:54.583750Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:54.583771Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 16 2025-06-30T09:22:54.583836Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:54.583851Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 14 2025-06-30T09:22:54.583950Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-30T09:22:54.584774Z node 13 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [13:7521670412025315388:2075], processor id# 72075186224037899, database# /Root/Tenant2 2025-06-30T09:22:54.585007Z node 13 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [13:7521670412025315388:2075], database# /Root/Tenant2, processor id# 72075186224037899 2025-06-30T09:22:54.585319Z node 15 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [15:7521670411817501933:2117], processor id# 72075186224037893, database# /Root/Tenant1 2025-06-30T09:22:54.584037Z node 16 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [16:7521670411537857642:2089], processor id# 72075186224037893, database# /Root/Tenant1 2025-06-30T09:22:54.585435Z node 16 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [16:7521670411537857642:2089], database# /Root/Tenant1, processor id# 72075186224037893 2025-06-30T09:22:54.585971Z node 15 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [15:7521670411817501933:2117], database# /Root/Tenant1, processor id# 72075186224037893 2025-06-30T09:22:54.587083Z node 14 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [14:7521670410586432949:2102], processor id# 72075186224037899, database# /Root/Tenant2 2025-06-30T09:22:54.594819Z node 14 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [14:7521670410586432949:2102], database# /Root/Tenant2, processor id# 72075186224037899 2025-06-30T09:22:55.017119Z node 16 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [16:7521670411537857546:2064] 2025-06-30T09:22:55.066118Z node 16 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [16:7521670411537857642:2089] 2025-06-30T09:22:55.266311Z node 15 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [15:7521670411817501810:2064] 2025-06-30T09:22:55.326800Z node 13 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [13:7521670412025315388:2075] >> KqpBatchUpdate::Returning |82.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |82.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |82.4%| [LD] {RESULT} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |82.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/batch_operations/unittest >> DataShardWrite::UpdateImmediate [GOOD] >> DataShardWrite::RejectOnChangeQueueOverflow >> Yq_1::CreateConnections_With_Idempotency [GOOD] >> DataShardWrite::CancelImmediate [GOOD] >> DataShardWrite::DeletePrepared+Volatile >> Yq_1::Basic_EmptyList [GOOD] >> Yq_1::Basic_EmptyDict >> DataShardWrite::UpsertPreparedNoTxCache+Volatile [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache-Volatile >> KqpBatchDelete::Large_2 >> DataShardWrite::UpsertNoLocksArbiter [GOOD] >> DataShardWrite::UpsertLostPrepareArbiter >> Yq_1::ModifyQuery [GOOD] >> KqpBatchUpdate::NotIdempotent |82.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/fq/ut_integration/unittest >> Yq_1::CreateConnections_With_Idempotency [GOOD] Test command err: 2025-06-30T09:22:51.889732Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670445750346780:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:51.889967Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0630 09:22:51.944377440 362001 dns_resolver_ares.cc:452] no server name supplied in dns URI E0630 09:22:51.944429722 362001 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-30T09:22:51.949909Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:25131: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:25131 } ] 2025-06-30T09:22:51.950264Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:25131: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:25131 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004371/r3tmp/tmpC7af7p/pdisk_1.dat 2025-06-30T09:22:52.445075Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:52.445230Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670450045314583:2274], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:52.463074Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 25131, node 1 TClient is connected to server localhost:16985 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:52.584539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:52.636111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:52.636154Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:52.642897Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:52.797008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:52.797024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:52.797026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:52.797095Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:52.798790Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:52.894839Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:52.962973Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-30T09:22:52.962990Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-30T09:22:52.962993Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-30T09:22:52.963301Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-30T09:22:52.963309Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-30T09:22:52.963312Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-30T09:22:52.963389Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-30T09:22:52.963397Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-30T09:22:52.963399Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-30T09:22:52.963409Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-30T09:22:52.963411Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-30T09:22:52.963412Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" 2025-06-30T09:22:52.963883Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-30T09:22:52.963892Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-30T09:22:52.963894Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-30T09:22:52.964014Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-30T09:22:52.964017Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-30T09:22:52.964018Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-30T09:22:52.964109Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-30T09:22:52.964111Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-30T09:22:52.964112Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-30T09:22:52.964147Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/queries". Create session OK 2025-06-30T09:22:52.964151Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/queries" 2025-06-30T09:22:52.964152Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/queries" 2025-06-30T09:22:52.964260Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/jobs". Create session OK 2025-06-30T09:22:52.964263Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/jobs" 2025-06-30T09:22:52.964264Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/jobs" 2025-06-30T09:22:52.964301Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/quotas". Create session OK 2025-06-30T09:22:52.964303Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/quotas" 2025-06-30T09:22:52.964304Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/quotas" 2025-06-30T09:22:52.964381Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-30T09:22:52.964383Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-30T09:22:52.964383Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-30T09:22:52.964384Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-30T09:22:52.964385Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-30T09:22:52.964386Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-30T09:22:52.965035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:22:52.966190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.966446Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/compute_databases". Create session OK 2025-06-30T09:22:52.966448Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/compute_databases" 2025-06-30T09:22:52.966450Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/compute_databases" 2025-06-30T09:22:52.966687Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "Root/yq" 2025-06-30T09:22:52.966695Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "Root/yq": 2025-06-30T09:22:52.969256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.969486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.969668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 2814749 ... pp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977700Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977734Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977741Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977751Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977760Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977769Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977779Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977785Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977797Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977805Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977815Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977825Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977831Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977843Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977847Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977860Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977865Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977873Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977885Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977890Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977901Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977908Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977918Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977924Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977936Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977944Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977954Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977963Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977971Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977979Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977988Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.977997Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978006Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978014Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978024Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978030Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978041Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978049Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978058Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978068Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978076Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978084Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978094Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978102Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978110Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978119Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978128Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978136Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978144Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978153Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978161Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978170Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978181Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978189Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978198Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978205Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978215Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978222Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978232Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978240Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978248Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978257Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978266Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978304Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978346Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978352Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978365Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978372Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978381Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978390Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978398Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978407Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978416Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978426Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978434Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978441Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978452Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978457Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978467Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978476Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978483Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978493Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978500Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978510Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978519Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978526Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978536Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978540Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978552Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978559Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978571Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978576Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978586Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978595Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978603Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978612Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978617Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978629Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978634Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978645Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978651Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978661Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978671Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-30T09:22:56.978679Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: |82.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |82.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view >> KqpBatchUpdate::Returning [GOOD] >> DataShardWrite::RejectOnChangeQueueOverflow [GOOD] >> DataShardWrite::UpsertBrokenLockArbiter >> TSchemeShardCheckProposeSize::CopyTables [GOOD] >> TSchemeShardDecimalTypesInTables::Parameterless >> KqpBatchDelete::SimplePartitions >> DataShardWrite::UpsertPreparedNoTxCache-Volatile [GOOD] >> DataShardWrite::WriteCommitVersion >> DataShardWrite::DeletePrepared+Volatile [GOOD] >> DataShardWrite::DeletePrepared-Volatile ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/fq/ut_integration/unittest >> Yq_1::ModifyQuery [GOOD] Test command err: 2025-06-30T09:22:51.308053Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670448529682705:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:51.310282Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0630 09:22:51.359703254 361178 dns_resolver_ares.cc:452] no server name supplied in dns URI E0630 09:22:51.359752047 361178 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004375/r3tmp/tmpkZnQlH/pdisk_1.dat 2025-06-30T09:22:51.798129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:51.803800Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670448529682995:2273], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:51.840407Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 23187, node 1 TClient is connected to server localhost:25468 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:52.035579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:52.108654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:52.108695Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:52.113777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:52.192138Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:52.192165Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:52.192167Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:52.192240Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:52.192977Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:52.310831Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:52.379301Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/jobs". Create session OK 2025-06-30T09:22:52.379325Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/jobs" 2025-06-30T09:22:52.379328Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/jobs" 2025-06-30T09:22:52.380108Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-30T09:22:52.380111Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-30T09:22:52.380115Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-30T09:22:52.380117Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-30T09:22:52.380122Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-30T09:22:52.380124Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-30T09:22:52.380233Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-30T09:22:52.380248Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-30T09:22:52.380250Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-30T09:22:52.380349Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-30T09:22:52.380362Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-30T09:22:52.380363Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-30T09:22:52.380385Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-30T09:22:52.380389Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-30T09:22:52.380390Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" 2025-06-30T09:22:52.380437Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/compute_databases". Create session OK 2025-06-30T09:22:52.380441Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/compute_databases" 2025-06-30T09:22:52.380442Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/compute_databases" 2025-06-30T09:22:52.380531Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/queries". Create session OK 2025-06-30T09:22:52.380533Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/queries" 2025-06-30T09:22:52.380534Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/queries" 2025-06-30T09:22:52.380616Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/quotas". Create session OK 2025-06-30T09:22:52.380617Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/quotas" 2025-06-30T09:22:52.380618Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/quotas" 2025-06-30T09:22:52.380667Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-30T09:22:52.380670Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-30T09:22:52.380671Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-30T09:22:52.380702Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-30T09:22:52.380704Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-30T09:22:52.380706Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-30T09:22:52.380762Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-30T09:22:52.380764Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-30T09:22:52.380765Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-30T09:22:52.380787Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-30T09:22:52.380789Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-30T09:22:52.380791Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-30T09:22:52.384187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.384569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.384892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.385094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.385369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.385568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.387257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, s ... RawX1: 7521670474550504905 RawX2: 4503616807242498 } } DstEndpoint { ActorId { RawX1: 7521670474550504906 RawX2: 4503616807242499 } } InMemory: true DstStageId: 1 } Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 2 SrcEndpoint { ActorId { RawX1: 7521670474550504906 RawX2: 4503616807242499 } } DstEndpoint { ActorId { RawX1: 7521670474550504901 RawX2: 4503616807242013 } } InMemory: true } 2025-06-30T09:22:57.214848Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1081: SelfId: [4:7521670474550504906:2819], TxId: 281474976715759, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Update input channelId: 1, peer: [4:7521670474550504905:2818] 2025-06-30T09:22:57.214861Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670474550504906:2819], TxId: 281474976715759, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646926 2025-06-30T09:22:57.214877Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7521670474550504906:2819], TxId: 281474976715759, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 DstTaskId: 2 SrcEndpoint { ActorId { RawX1: 7521670474550504905 RawX2: 4503616807242498 } } DstEndpoint { ActorId { RawX1: 7521670474550504906 RawX2: 4503616807242499 } } InMemory: true DstStageId: 1 } Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 2 SrcEndpoint { ActorId { RawX1: 7521670474550504906 RawX2: 4503616807242499 } } DstEndpoint { ActorId { RawX1: 7521670474550504901 RawX2: 4503616807242013 } } InMemory: true } 2025-06-30T09:22:57.214880Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670474550504906:2819], TxId: 281474976715759, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-30T09:22:57.214903Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:958: TxId: 281474976715759, task: 1, CA Id [4:7521670474550504905:2818]. Recv TEvReadResult from ShardID=72075186224037892, ReadId=0, Status=SUCCESS, Finished=1, RowCount=1, TxLocks= , BrokenTxLocks= 2025-06-30T09:22:57.214912Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1050: TxId: 281474976715759, task: 1, CA Id [4:7521670474550504905:2818]. Taken 0 locks 2025-06-30T09:22:57.214915Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1064: TxId: 281474976715759, task: 1, CA Id [4:7521670474550504905:2818]. new data for read #0 seqno = 1 finished = 1 2025-06-30T09:22:57.214919Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670474550504905:2818], TxId: 281474976715759, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 276037645 2025-06-30T09:22:57.214924Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670474550504905:2818], TxId: 281474976715759, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-30T09:22:57.214927Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715759, task: 1, CA Id [4:7521670474550504905:2818]. enter getasyncinputdata results size 1, freeSpace 8388608 2025-06-30T09:22:57.214930Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1227: TxId: 281474976715759, task: 1, CA Id [4:7521670474550504905:2818]. enter pack cells method shardId: 72075186224037892 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-30T09:22:57.214939Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1308: TxId: 281474976715759, task: 1, CA Id [4:7521670474550504905:2818]. exit pack cells method shardId: 72075186224037892 processedRows: 0 packed rows: 1 freeSpace: 8387511 2025-06-30T09:22:57.214949Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1365: TxId: 281474976715759, task: 1, CA Id [4:7521670474550504905:2818]. returned 1 rows; processed 1 rows 2025-06-30T09:22:57.214963Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1402: TxId: 281474976715759, task: 1, CA Id [4:7521670474550504905:2818]. dropping batch for read #0 2025-06-30T09:22:57.214968Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715759, task: 1, CA Id [4:7521670474550504905:2818]. effective maxinflight 1024 sorted 0 2025-06-30T09:22:57.214970Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715759, task: 1, CA Id [4:7521670474550504905:2818]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-06-30T09:22:57.214973Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715759, task: 1, CA Id [4:7521670474550504905:2818]. returned async data processed rows 1 left freeSpace 8387511 received rows 1 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-30T09:22:57.215035Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7521670474550504905:2818], TxId: 281474976715759, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-30T09:22:57.215043Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670474550504905:2818], TxId: 281474976715759, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-30T09:22:57.215049Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670474550504906:2819], TxId: 281474976715759, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646923 2025-06-30T09:22:57.215051Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715759, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-30T09:22:57.215067Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715759, task: 2. Finish input channelId: 1, from: [4:7521670474550504905:2818] 2025-06-30T09:22:57.215093Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670474550504906:2819], TxId: 281474976715759, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-30T09:22:57.215097Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670474550504905:2818], TxId: 281474976715759, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646927 2025-06-30T09:22:57.215103Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670474550504905:2818], TxId: 281474976715759, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-30T09:22:57.215106Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715759, task: 1. Tasks execution finished 2025-06-30T09:22:57.215108Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7521670474550504905:2818], TxId: 281474976715759, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-30T09:22:57.215137Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715759, task: 1. pass away 2025-06-30T09:22:57.215165Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715759;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-30T09:22:57.215173Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7521670474550504906:2819], TxId: 281474976715759, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-30T09:22:57.215247Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7521670474550504906:2819], TxId: 281474976715759, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-30T09:22:57.215252Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715759, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-30T09:22:57.215254Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715759, task: 2. Tasks execution finished 2025-06-30T09:22:57.215256Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7521670474550504906:2819], TxId: 281474976715759, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NWRiNGQzM2YtOTE4NDM5MzItY2Y1YmMwMDItYzgzMTcwNDE=. TraceId : 01jz02b4gq0tvg4xpky6pm21d9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-30T09:22:57.215276Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715759, task: 2. pass away 2025-06-30T09:22:57.215294Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715759;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Returning [GOOD] Test command err: Trying to start YDB, gRPC: 63646, MsgBus: 64943 2025-06-30T09:22:56.959792Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670470383939326:2163];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:56.959864Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044ba/r3tmp/tmpTpsQe6/pdisk_1.dat 2025-06-30T09:22:57.011743Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:57.013665Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670470383939199:2080] 1751275376956172 != 1751275376956175 TServer::EnableGrpc on GrpcPort 63646, node 1 2025-06-30T09:22:57.032722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:57.032741Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:57.032745Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:57.032800Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64943 TClient is connected to server localhost:64943 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:22:57.093842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:57.093880Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:57.094982Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:57.107294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:57.110642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:57.119808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:57.194523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:57.255472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:57.269182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:57.381607Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670474678908096:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.381639Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.433257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.442069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.455721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.468790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.483559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.497477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.511481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.527254Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670474678908747:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.527292Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.527336Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670474678908752:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.528326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:57.538635Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670474678908754:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:57.607374Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670474678908805:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:57.798264Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670474678909077:2475], status: GENERIC_ERROR, issues:
:2:22: Error: BATCH UPDATE is unsupported with RETURNING 2025-06-30T09:22:57.798529Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=Yzg3MjdjMDMtOGNiODUzNzEtZWMxYjNhYjgtMWRlODI1MzU=, ActorId: [1:7521670474678909068:2469], ActorState: ExecuteState, TraceId: 01jz02b53y7bs9a30qc9dhkphq, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> TSchemeShardDecimalTypesInTables::Parameterless [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-false >> DataShardWrite::UpsertLostPrepareArbiter [GOOD] >> DataShardWrite::UpsertNoLocksArbiterRestart >> SystemView::ShowCreateTableColumnUpsertOptions [GOOD] >> SystemView::ShowCreateTableColumnUpsertIndex >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-false [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-true >> PrivateApi::Nodes [GOOD] >> DataShardWrite::UpsertBrokenLockArbiter [GOOD] >> KqpBatchUpdate::NotIdempotent [GOOD] >> TSchemeShardTest::DropPQAbort [GOOD] >> TSchemeShardTest::DropBlockStoreVolume |82.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage |82.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage >> ReadIteratorExternalBlobs::ExtBlobsWithCompactingMiddleRows [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsEmptyTable >> KqpBatchDelete::SimpleOnePartition >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-true [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-false >> TSchemeShardTest::DropBlockStoreVolume [GOOD] >> TSchemeShardTest::DropBlockStoreVolumeWithNonReplicatedPartitions ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/fq/ut_integration/unittest >> PrivateApi::Nodes [GOOD] Test command err: 2025-06-30T09:22:48.989829Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670433195441935:2182];send_to=[0:7307199536658146131:7762515]; E0630 09:22:49.024502258 358423 dns_resolver_ares.cc:452] no server name supplied in dns URI E0630 09:22:49.024545838 358423 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-30T09:22:49.024381Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16575: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:16575 } ] 2025-06-30T09:22:49.026677Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:49.356161Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:49.356391Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670437490409390:2270], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:49.421361Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670437490409390:2270], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00438b/r3tmp/tmpNFMuuK/pdisk_1.dat 2025-06-30T09:22:49.509086Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 16575, node 1 TClient is connected to server localhost:62636 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:49.687953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:49.729724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:49.729772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:49.731631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:49.899817Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:49.900284Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:49.900293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:49.900295Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:49.900362Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:49.990104Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:50.051279Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-30T09:22:50.051298Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-30T09:22:50.051301Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-30T09:22:50.051769Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-30T09:22:50.051772Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-30T09:22:50.051774Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-30T09:22:50.053825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:50.054268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:50.054338Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-30T09:22:50.054342Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-30T09:22:50.054345Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-30T09:22:50.055383Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-30T09:22:50.055392Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-30T09:22:50.055395Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-30T09:22:50.055736Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-30T09:22:50.055742Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-30T09:22:50.055744Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-30T09:22:50.055936Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-30T09:22:50.055942Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-30T09:22:50.055944Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" 2025-06-30T09:22:50.056074Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/quotas". Create session OK 2025-06-30T09:22:50.056075Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/quotas" 2025-06-30T09:22:50.056077Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/quotas" 2025-06-30T09:22:50.056165Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-30T09:22:50.056167Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-30T09:22:50.056168Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-30T09:22:50.056464Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-30T09:22:50.056477Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-30T09:22:50.056480Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-30T09:22:50.056638Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-30T09:22:50.056640Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-30T09:22:50.056641Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-30T09:22:50.056803Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/queries". Create session OK 2025-06-30T09:22:50.056806Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/queries" 2025-06-30T09:22:50.056807Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/queries" 2025-06-30T09:22:50.056955Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/jobs". Create session OK 2025-06-30T09:22:50.056964Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/jobs" 2025-06-30T09:22:50.056968Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/jobs" 2025-06-30T09:22:50.056974Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/compute_databases". Create session OK 2025-06-30T09:22:50.056977Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/compute_databases" 2025-06-30T09:22:50.056979Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/compute_databases" 2025-06-30T09:22:50.058067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:50.058507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:50.058766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wor ... 303168, finished: 0 2025-06-30T09:22:56.729699Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3311: TxId: 281474976715676, task: 1. Add data: 100 / 100 2025-06-30T09:22:56.729724Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3280: TxId: 281474976715676, task: 1. Send data=100, closed=1, bufferActorId=[7:7521670469072497137:2362] 2025-06-30T09:22:56.729728Z node 7 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:372: SelfId: [7:7521670469072497141:2362], TxId: 281474976715676, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=NTFkZTgxYzgtZTRjNDk4ZWItOTgzNjE4YTAtYTk1MTE0NTc=. CustomerSuppliedId : . TraceId : 01jz02b4287tdfrswrv4kvmhja. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Drain async output 0. Free space decreased: -9223372036787666944, sent data from buffer: 100 2025-06-30T09:22:56.729731Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715676, task: 1. Tasks execution finished 2025-06-30T09:22:56.729733Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [7:7521670469072497141:2362], TxId: 281474976715676, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=NTFkZTgxYzgtZTRjNDk4ZWItOTgzNjE4YTAtYTk1MTE0NTc=. CustomerSuppliedId : . TraceId : 01jz02b4287tdfrswrv4kvmhja. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-30T09:22:56.729737Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7521670469072497141:2362], TxId: 281474976715676, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=NTFkZTgxYzgtZTRjNDk4ZWItOTgzNjE4YTAtYTk1MTE0NTc=. CustomerSuppliedId : . TraceId : 01jz02b4287tdfrswrv4kvmhja. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-30T09:22:56.729739Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [7:7521670469072497141:2362], TxId: 281474976715676, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=NTFkZTgxYzgtZTRjNDk4ZWItOTgzNjE4YTAtYTk1MTE0NTc=. CustomerSuppliedId : . TraceId : 01jz02b4287tdfrswrv4kvmhja. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: 2025-06-30T09:22:56.729740Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715676, task: 1. Tasks execution finished 2025-06-30T09:22:56.729741Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [7:7521670469072497141:2362], TxId: 281474976715676, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=NTFkZTgxYzgtZTRjNDk4ZWItOTgzNjE4YTAtYTk1MTE0NTc=. CustomerSuppliedId : . TraceId : 01jz02b4287tdfrswrv4kvmhja. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-30T09:22:56.729751Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7521670469072497141:2362], TxId: 281474976715676, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=NTFkZTgxYzgtZTRjNDk4ZWItOTgzNjE4YTAtYTk1MTE0NTc=. CustomerSuppliedId : . TraceId : 01jz02b4287tdfrswrv4kvmhja. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:22:56.729752Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715676, task: 1. Tasks execution finished 2025-06-30T09:22:56.729753Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [7:7521670469072497141:2362], TxId: 281474976715676, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=NTFkZTgxYzgtZTRjNDk4ZWItOTgzNjE4YTAtYTk1MTE0NTc=. CustomerSuppliedId : . TraceId : 01jz02b4287tdfrswrv4kvmhja. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-30T09:22:56.729766Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1860: SelfId: [7:7521670469072497137:2362], SessionActorId: [7:7521670464777528496:2362], Create new TableWriteActor for table `Root/yq/nodes` ([72057594046644480:10:1]). lockId=281474976715675. ActorId=[7:7521670469072497144:2362] 2025-06-30T09:22:56.729775Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:388: Table: `Root/yq/nodes` ([72057594046644480:10:1]), SessionActorId: [7:7521670464777528496:2362]Open: token=0 2025-06-30T09:22:56.729779Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1987: SelfId: [7:7521670469072497137:2362], SessionActorId: [7:7521670464777528496:2362], ProcessRequestQueue [OwnerId: 72057594046644480, LocalPathId: 10] NOT READY queue=1 2025-06-30T09:22:56.729787Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:394: SelfId: [7:7521670469072497144:2362], Table: `Root/yq/nodes` ([72057594046644480:10:1]), SessionActorId: [7:7521670464777528496:2362]Write: token=0 2025-06-30T09:22:56.730222Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:402: SelfId: [7:7521670469072497144:2362], Table: `Root/yq/nodes` ([72057594046644480:10:1]), SessionActorId: [7:7521670464777528496:2362]Close: token=0 2025-06-30T09:22:56.730227Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3180: SelfId: [7:7521670469072497143:2362], TxId: 281474976715676, task: 1. TKqpForwardWriteActor recieve EvBufferWriteResult from [7:7521670469072497137:2362] 2025-06-30T09:22:56.730229Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3198: SelfId: [7:7521670469072497143:2362], TxId: 281474976715676, task: 1. Finished 2025-06-30T09:22:56.730233Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7521670469072497141:2362], TxId: 281474976715676, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=NTFkZTgxYzgtZTRjNDk4ZWItOTgzNjE4YTAtYTk1MTE0NTc=. CustomerSuppliedId : . TraceId : 01jz02b4287tdfrswrv4kvmhja. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:22:56.730235Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715676, task: 1. Tasks execution finished 2025-06-30T09:22:56.730237Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [7:7521670469072497141:2362], TxId: 281474976715676, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=NTFkZTgxYzgtZTRjNDk4ZWItOTgzNjE4YTAtYTk1MTE0NTc=. CustomerSuppliedId : . TraceId : 01jz02b4287tdfrswrv4kvmhja. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-30T09:22:56.730254Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715676, task: 1. pass away 2025-06-30T09:22:56.730271Z node 7 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715676;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-30T09:22:56.730338Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2110: SelfId: [7:7521670469072497137:2362], SessionActorId: [7:7521670464777528496:2362], Start immediate commit 2025-06-30T09:22:56.730340Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:923: SelfId: [7:7521670469072497144:2362], Table: `Root/yq/nodes` ([72057594046644480:10:1]), SessionActorId: [7:7521670464777528496:2362]SetImmediateCommit 2025-06-30T09:22:56.730342Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2052: SelfId: [7:7521670469072497137:2362], SessionActorId: [7:7521670464777528496:2362], Flush data 2025-06-30T09:22:56.730374Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1050: SelfId: [7:7521670469072497144:2362], Table: `Root/yq/nodes` ([72057594046644480:10:1]), SessionActorId: [7:7521670464777528496:2362]Send EvWrite to ShardID=72075186224037892, isPrepare=0, isImmediateCommit=1, TxId=0, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715675 DataShard: 72075186224037892 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 10, Size=212, Cookie=1, OperationsCount=1, IsFinal=1, Attempts=0, Mode=3, BufferMemory=212 2025-06-30T09:22:56.733915Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [7:7521670469072497144:2362], Table: `Root/yq/nodes` ([72057594046644480:10:1]), SessionActorId: [7:7521670464777528496:2362]Recv EvWriteResult from ShardID=72075186224037892, Status=STATUS_COMPLETED, TxId=3, Locks= , Cookie=1 2025-06-30T09:22:56.733934Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [7:7521670469072497144:2362], Table: `Root/yq/nodes` ([72057594046644480:10:1]), SessionActorId: [7:7521670464777528496:2362]Got completed result TxId=3, TabletId=72075186224037892, Cookie=1, Mode=3, Locks= 2025-06-30T09:22:56.733966Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [7:7521670469072497137:2362], SessionActorId: [7:7521670464777528496:2362], Committed TxId=0 2025-06-30T09:22:56.736635Z node 7 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (1): Cancelled on the server side
: Error: Grpc error response on endpoint [::]:16849 2025-06-30T09:22:57.172112Z node 7 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:16849: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:16849 2025-06-30T09:22:57.661296Z node 7 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create directory "Root/yq" 2025-06-30T09:22:57.661312Z node 7 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:383: Call create directory "Root/yq" 2025-06-30T09:22:57.664665Z node 7 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16849: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:16849 } ] 2025-06-30T09:22:57.671297Z node 7 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage.cpp:398: DB Error, Status: TRANSPORT_UNAVAILABLE, Issues: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16849: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:16849 } ], Query: --!syntax_v1 -- Query name: GetTask(read stale ro) PRAGMA TablePathPrefix("Root/yq"); DECLARE $tenant as String; DECLARE $from as Timestamp; DECLARE $tasks_limit as Uint64; SELECT `scope`, `query_id`, `owner`, `last_seen_at`, `retry_counter`, `retry_counter_updated_at`, `retry_rate`, `query_type` FROM `pending_small` WHERE `tenant` = $tenant AND `assigned_until` < $from ORDER BY `query_id` DESC LIMIT $tasks_limit; 2025-06-30T09:22:57.672882Z node 7 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: GetTaskRequest - GetTaskResult: {tenant: "TestTenant" owner_id: "a8412538-716aba6c-f3a36683-659666a71" host: "ghrun-x4qzxsyz2q" } ERROR: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16849: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:16849 } ] 2025-06-30T09:22:57.672947Z node 7 :YQL_PRIVATE_PROXY ERROR: task_get.cpp:72: PrivateGetTask - Owner: a8412538-716aba6c-f3a36683-659666a71, Host: ghrun-x4qzxsyz2q, Tenant: TestTenant, Failed with code: GENERIC_ERROR Details:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16849: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:16849
: Error: ControlPlane::GetTaskError 2025-06-30T09:22:58.177632Z node 7 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:16849: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:16849 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::UpsertBrokenLockArbiter [GOOD] Test command err: 2025-06-30T09:22:52.180700Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:52.180777Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:52.180791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00405b/r3tmp/tmp9Xstj4/pdisk_1.dat 2025-06-30T09:22:52.285031Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:52.286221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:52.305196Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:52.306688Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275371729523 != 1751275371729527 2025-06-30T09:22:52.353289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:52.353335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:52.364731Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:52.441946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.459383Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:52.459664Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:52.459750Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:22:52.459809Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:22:52.469308Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:52.469585Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:22:52.469620Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:22:52.469836Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:22:52.469852Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:22:52.469860Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:22:52.469944Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:22:52.469980Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:22:52.469998Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:22:52.480358Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:22:52.485474Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:22:52.485594Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:22:52.485628Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:22:52.485634Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:22:52.485640Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:22:52.485647Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:22:52.485752Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:22:52.485762Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:22:52.485888Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:22:52.485917Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:22:52.485932Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:22:52.485941Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:22:52.485950Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:22:52.485958Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:22:52.485966Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:22:52.485972Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:22:52.485979Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:22:52.486004Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:52.486011Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:52.486020Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:22:52.486152Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:22:52.486159Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:22:52.486188Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:22:52.486244Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:22:52.486256Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:22:52.486276Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:22:52.486285Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:22:52.486290Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:22:52.486297Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:22:52.486302Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:22:52.486378Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:22:52.486384Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:22:52.486389Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:22:52.486394Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:22:52.486408Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:22:52.486412Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:22:52.486417Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:22:52.486422Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:22:52.486430Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:22:52.486788Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:22:52.486805Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:22:52.497158Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:22:52.497196Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:22:52.497205Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:22:52.497220Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 80:2560]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:59.592456Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:59.592460Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [7:892:2732], serverId# [7:893:2733], sessionId# [0:0:0] 2025-06-30T09:22:59.592471Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553169, Sender [7:891:2731], Recipient [7:680:2560]: NKikimrTxDataShard.TEvGetInfoRequest 2025-06-30T09:22:59.592550Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [7:896:2736], Recipient [7:680:2560]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:59.592557Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:59.592562Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [7:895:2735], serverId# [7:896:2736], sessionId# [0:0:0] 2025-06-30T09:22:59.592582Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [7:894:2734], Recipient [7:680:2560]: NKikimrTxDataShard.TEvRead ReadId: 1002 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-06-30T09:22:59.592595Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 0 2025-06-30T09:22:59.592599Z node 7 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037890 CompleteEdge# v1004/1000004 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-30T09:22:59.592602Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037890 changed HEAD read to non-repeatable v1004/18446744073709551615 2025-06-30T09:22:59.592606Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037890 on unit CheckRead 2025-06-30T09:22:59.592612Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037890 is Executed 2025-06-30T09:22:59.592615Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037890 executing on unit CheckRead 2025-06-30T09:22:59.592617Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037890 to execution unit BuildAndWaitDependencies 2025-06-30T09:22:59.592620Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037890 on unit BuildAndWaitDependencies 2025-06-30T09:22:59.592624Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037890 2025-06-30T09:22:59.592628Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037890 is Executed 2025-06-30T09:22:59.592630Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-30T09:22:59.592633Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037890 to execution unit ExecuteRead 2025-06-30T09:22:59.592636Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037890 on unit ExecuteRead 2025-06-30T09:22:59.592642Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037890 Execute read# 1, request: { ReadId: 1002 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-06-30T09:22:59.592656Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037890 Complete read# {[7:894:2734], 1002} after executionsCount# 1 2025-06-30T09:22:59.592660Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037890 read iterator# {[7:894:2734], 1002} sends rowCount# 0, bytes# 0, quota rows left# 18446744073709551615, quota bytes left# 18446744073709551615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:22:59.592664Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037890 read iterator# {[7:894:2734], 1002} finished in read 2025-06-30T09:22:59.592668Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037890 is Executed 2025-06-30T09:22:59.592671Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037890 executing on unit ExecuteRead 2025-06-30T09:22:59.592673Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037890 to execution unit CompletedOperations 2025-06-30T09:22:59.592675Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037890 on unit CompletedOperations 2025-06-30T09:22:59.592681Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037890 is Executed 2025-06-30T09:22:59.592683Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037890 executing on unit CompletedOperations 2025-06-30T09:22:59.592686Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037890 has finished 2025-06-30T09:22:59.592688Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2025-06-30T09:22:59.592696Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2025-06-30T09:22:59.592752Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [7:899:2739], Recipient [7:678:2558]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:59.592755Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:59.592758Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [7:898:2738], serverId# [7:899:2739], sessionId# [0:0:0] 2025-06-30T09:22:59.592769Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553169, Sender [7:897:2737], Recipient [7:678:2558]: NKikimrTxDataShard.TEvGetInfoRequest 2025-06-30T09:22:59.592879Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [7:902:2742], Recipient [7:678:2558]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:59.592887Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:59.592893Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [7:901:2741], serverId# [7:902:2742], sessionId# [0:0:0] 2025-06-30T09:22:59.592948Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [7:900:2740], Recipient [7:678:2558]: NKikimrTxDataShard.TEvRead ReadId: 1003 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-06-30T09:22:59.592986Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037891, FollowerId 0 2025-06-30T09:22:59.592993Z node 7 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037891 CompleteEdge# v1004/1000004 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-30T09:22:59.592999Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037891 changed HEAD read to non-repeatable v1004/18446744073709551615 2025-06-30T09:22:59.593008Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037891 on unit CheckRead 2025-06-30T09:22:59.593021Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037891 is Executed 2025-06-30T09:22:59.593025Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037891 executing on unit CheckRead 2025-06-30T09:22:59.593031Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037891 to execution unit BuildAndWaitDependencies 2025-06-30T09:22:59.593035Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037891 on unit BuildAndWaitDependencies 2025-06-30T09:22:59.593044Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037891 2025-06-30T09:22:59.593050Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037891 is Executed 2025-06-30T09:22:59.593057Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037891 executing on unit BuildAndWaitDependencies 2025-06-30T09:22:59.593061Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037891 to execution unit ExecuteRead 2025-06-30T09:22:59.593066Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037891 on unit ExecuteRead 2025-06-30T09:22:59.593079Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037891 Execute read# 1, request: { ReadId: 1003 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-06-30T09:22:59.593104Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037891 Complete read# {[7:900:2740], 1003} after executionsCount# 1 2025-06-30T09:22:59.593110Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037891 read iterator# {[7:900:2740], 1003} sends rowCount# 0, bytes# 0, quota rows left# 18446744073709551615, quota bytes left# 18446744073709551615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:22:59.593120Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037891 read iterator# {[7:900:2740], 1003} finished in read 2025-06-30T09:22:59.593128Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037891 is Executed 2025-06-30T09:22:59.593132Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037891 executing on unit ExecuteRead 2025-06-30T09:22:59.593136Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037891 to execution unit CompletedOperations 2025-06-30T09:22:59.593140Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037891 on unit CompletedOperations 2025-06-30T09:22:59.593148Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037891 is Executed 2025-06-30T09:22:59.593152Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037891 executing on unit CompletedOperations 2025-06-30T09:22:59.593156Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037891 has finished 2025-06-30T09:22:59.593161Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037891 2025-06-30T09:22:59.593176Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037891 >> DataShardWrite::WriteCommitVersion [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-false [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-true >> DataShardWrite::DeletePrepared-Volatile [GOOD] >> DataShardWrite::DelayedVolatileTxAndEvWrite >> TSchemeShardTest::DropBlockStoreVolumeWithNonReplicatedPartitions [GOOD] >> TSchemeShardTest::DropBlockStoreVolume2 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::NotIdempotent [GOOD] Test command err: Trying to start YDB, gRPC: 2632, MsgBus: 9125 2025-06-30T09:22:58.159012Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670475361304955:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:58.160254Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044a9/r3tmp/tmpK8QjQ0/pdisk_1.dat 2025-06-30T09:22:58.257246Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2632, node 1 2025-06-30T09:22:58.278309Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:58.278326Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:58.278329Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:58.278380Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:58.298276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:58.298308Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:58.299298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9125 TClient is connected to server localhost:9125 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:58.389295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:58.407957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:58.419347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:58.524684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:58.596489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:58.632162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:58.930639Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670475361306509:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:58.930704Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:59.036740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.067812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.095687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.120507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.153414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.158907Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:59.177428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.253477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.289470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670479656274470:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:59.289500Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:59.289678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670479656274475:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:59.290826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:59.297838Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670479656274477:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:59.352161Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670479656274528:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:59.653830Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670479656274800:2476], status: GENERIC_ERROR, issues:
: Error: Table intent determination, code: 1040
:3:43: Error: Batch update is only supported for idempotent updates. 2025-06-30T09:22:59.654538Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YWE3YTNjMDItZjEzNDQ5YjUtZjk1YTg4NDgtMjkyMjI1OWY=, ActorId: [1:7521670479656274791:2470], ActorState: ExecuteState, TraceId: 01jz02b6xsfffd3jzbf8cn5vqz, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:22:59.659733Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670479656274804:2478], status: GENERIC_ERROR, issues:
: Error: Table intent determination, code: 1040
:3:43: Error: Batch update is only supported for idempotent updates. 2025-06-30T09:22:59.660852Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YWE3YTNjMDItZjEzNDQ5YjUtZjk1YTg4NDgtMjkyMjI1OWY=, ActorId: [1:7521670479656274791:2470], ActorState: ExecuteState, TraceId: 01jz02b6y821f15wqg1bsg9cxr, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:22:59.665630Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670479656274808:2480], status: GENERIC_ERROR, issues:
: Error: Table intent determination, code: 1040
:3:51: Error: Batch update is only supported for idempotent updates. 2025-06-30T09:22:59.666263Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YWE3YTNjMDItZjEzNDQ5YjUtZjk1YTg4NDgtMjkyMjI1OWY=, ActorId: [1:7521670479656274791:2470], ActorState: ExecuteState, TraceId: 01jz02b6ye3sxw0b9zya43yz5w, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-true [GOOD] >> TSchemeShardDecimalTypesInTables::CopyTableShouldNotFailOnDisabledFeatureFlag >> BsControllerConfig::DeleteStoragePool [GOOD] >> TSchemeShardTest::DropBlockStoreVolume2 [GOOD] >> TSchemeShardTest::DropBlockStoreVolumeWithFillGeneration >> TPersQueueTest::BadTopic [GOOD] >> TPersQueueTest::CloseActiveWriteSessionOnClusterDisable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::WriteCommitVersion [GOOD] Test command err: 2025-06-30T09:22:51.717593Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:51.717662Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:51.717674Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004063/r3tmp/tmpmgw30X/pdisk_1.dat 2025-06-30T09:22:51.818063Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:51.819198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:51.839695Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:51.841172Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275371287668 != 1751275371287672 2025-06-30T09:22:51.883647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:51.883693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:51.894455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:51.974192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:51.997740Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:51.998046Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:51.998171Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:22:51.998260Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:22:52.014712Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:52.015041Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:22:52.015083Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:22:52.015324Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:22:52.015337Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:22:52.015346Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:22:52.015427Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:22:52.015456Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:22:52.015475Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:22:52.026192Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:22:52.032943Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:22:52.033072Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:22:52.033110Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:22:52.033117Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:22:52.033124Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:22:52.033131Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:22:52.033231Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:22:52.033241Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:22:52.033368Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:22:52.033397Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:22:52.033413Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:22:52.033425Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:22:52.033435Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:22:52.033442Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:22:52.033448Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:22:52.033455Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:22:52.033462Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:22:52.033487Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:52.033494Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:52.033503Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:22:52.033638Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:22:52.033646Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:22:52.033671Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:22:52.033748Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:22:52.033762Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:22:52.033784Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:22:52.033795Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:22:52.033801Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:22:52.033808Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:22:52.033814Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:22:52.033873Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:22:52.033879Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:22:52.033884Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:22:52.033889Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:22:52.033901Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:22:52.033905Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:22:52.033912Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:22:52.033916Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:22:52.033922Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:22:52.034257Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:22:52.034267Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:22:52.044669Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:22:52.044709Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:22:52.044719Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:22:52.044733Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 1474976715666] at 72075186224037890 is Executed 2025-06-30T09:23:00.291752Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1506:281474976715666] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-30T09:23:00.291756Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1506:281474976715666] at 72075186224037890 to execution unit ExecuteWrite 2025-06-30T09:23:00.291761Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1506:281474976715666] at 72075186224037890 on unit ExecuteWrite 2025-06-30T09:23:00.291765Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [1506:281474976715666] at 72075186224037890 2025-06-30T09:23:00.291770Z node 7 :TX_DATASHARD TRACE: datashard_kqp.cpp:694: Send commit decision from 72075186224037890 to 72075186224037889 2025-06-30T09:23:00.291775Z node 7 :TX_DATASHARD TRACE: datashard_kqp.cpp:725: Will wait for volatile decision from 72075186224037889 to 72075186224037890 2025-06-30T09:23:00.291790Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [1506:281474976715666] at 72075186224037890, row count=1 2025-06-30T09:23:00.291815Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:180: Deleted RS at 72075186224037890 source 72075186224037890 dest 72075186224037888 consumer 72075186224037888 seqno 1 txId 281474976715660 2025-06-30T09:23:00.291824Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:180: Deleted RS at 72075186224037890 source 72075186224037890 dest 72075186224037889 consumer 72075186224037889 seqno 2 txId 281474976715660 2025-06-30T09:23:00.291836Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-30T09:23:00.291843Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1506:281474976715666] at 72075186224037890 is DelayCompleteNoMoreRestarts 2025-06-30T09:23:00.291847Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1506:281474976715666] at 72075186224037890 executing on unit ExecuteWrite 2025-06-30T09:23:00.291852Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1506:281474976715666] at 72075186224037890 to execution unit CompleteWrite 2025-06-30T09:23:00.291856Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1506:281474976715666] at 72075186224037890 on unit CompleteWrite 2025-06-30T09:23:00.291871Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1506:281474976715666] at 72075186224037890 is DelayComplete 2025-06-30T09:23:00.291875Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1506:281474976715666] at 72075186224037890 executing on unit CompleteWrite 2025-06-30T09:23:00.291879Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1506:281474976715666] at 72075186224037890 to execution unit CompletedOperations 2025-06-30T09:23:00.291884Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1506:281474976715666] at 72075186224037890 on unit CompletedOperations 2025-06-30T09:23:00.291889Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1506:281474976715666] at 72075186224037890 is Executed 2025-06-30T09:23:00.291893Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1506:281474976715666] at 72075186224037890 executing on unit CompletedOperations 2025-06-30T09:23:00.291897Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1506:281474976715666] at 72075186224037890 has finished 2025-06-30T09:23:00.291901Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:23:00.291905Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-30T09:23:00.291909Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-30T09:23:00.291913Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-30T09:23:00.292057Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:668:2553], Recipient [7:673:2555]: {TEvReadSet step# 1506 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-30T09:23:00.292066Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:23:00.292073Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715666 2025-06-30T09:23:00.292084Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 1506 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-30T09:23:00.292128Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:673:2555], Recipient [7:668:2553]: {TEvReadSet step# 1506 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-30T09:23:00.292133Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:23:00.292138Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037890 dest 72075186224037889 producer 72075186224037890 txId 281474976715666 2025-06-30T09:23:00.292145Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 1506 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-30T09:23:00.292175Z node 7 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1506} 2025-06-30T09:23:00.292241Z node 7 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 1506} 2025-06-30T09:23:00.292292Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:23:00.292298Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1506:281474976715666] at 72075186224037889 on unit ExecuteWrite 2025-06-30T09:23:00.292306Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 3 at 72075186224037889 from 72075186224037889 to 72075186224037890 txId 281474976715666 2025-06-30T09:23:00.292312Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1506:281474976715666] at 72075186224037889 on unit CompleteWrite 2025-06-30T09:23:00.292328Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:23:00.292345Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-30T09:23:00.292394Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:668:2553], Recipient [7:673:2555]: {TEvReadSet step# 1506 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-30T09:23:00.292399Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:23:00.292403Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715666 2025-06-30T09:23:00.292410Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 1506 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-30T09:23:00.292512Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-30T09:23:00.292519Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1506:281474976715666] at 72075186224037890 on unit ExecuteWrite 2025-06-30T09:23:00.292524Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 3 at 72075186224037890 from 72075186224037890 to 72075186224037889 txId 281474976715666 2025-06-30T09:23:00.292529Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1506:281474976715666] at 72075186224037890 on unit CompleteWrite 2025-06-30T09:23:00.292542Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:755: Complete volatile write [1506 : 281474976715666] from 72075186224037890 at tablet 72075186224037890 send result to client [7:1033:2793] 2025-06-30T09:23:00.292548Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:23:00.292556Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-30T09:23:00.292623Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:673:2555], Recipient [7:668:2553]: {TEvReadSet step# 1506 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-30T09:23:00.292631Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:23:00.292637Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037890 dest 72075186224037889 producer 72075186224037890 txId 281474976715666 2025-06-30T09:23:00.292645Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 1506 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-30T09:23:00.292661Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:755: Complete volatile write [1506 : 281474976715666] from 72075186224037889 at tablet 72075186224037889 send result to client [7:1033:2793] 2025-06-30T09:23:00.292684Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-30T09:23:00.292778Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:23:00.292796Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:23:00.292819Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [7:673:2555], Recipient [7:668:2553]: {TEvReadSet step# 1506 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletConsumer# 72075186224037890 Flags# 0 Seqno# 3} 2025-06-30T09:23:00.292825Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:00.292832Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715666 >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeDir >> BackupPathTest::ExportWholeDatabase >> TPersQueueTest::DirectReadPreCached [GOOD] >> TPersQueueTest::DirectReadNotCached >> TSchemeShardDecimalTypesInTables::CopyTableShouldNotFailOnDisabledFeatureFlag [GOOD] >> TSchemeShardDecimalTypesInTables::CreateWithWrongParameters >> TSchemeShardTest::DropBlockStoreVolumeWithFillGeneration [GOOD] >> TSchemeShardTest::DocumentApiVersion >> DataShardWrite::UpsertNoLocksArbiterRestart [GOOD] >> DataShardWrite::UpsertLostPrepareArbiterRestart ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::DeleteStoragePool [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:199:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:199:2077] Leader for TabletID 72057594037932033 is [1:235:2079] sender: [1:236:2066] recipient: [1:199:2077] 2025-06-30T09:22:38.965189Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:38.966079Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:38.966146Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:38.966454Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:38.966548Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:38.966571Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:38.966577Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:38.966620Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:38.968989Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:38.969025Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:38.969059Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:38.969076Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:38.969089Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:38.969099Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:235:2079] sender: [1:257:2066] recipient: [1:20:2067] 2025-06-30T09:22:38.979498Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:22:38.979540Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:38.989819Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:38.989864Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:38.989892Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:38.989905Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:38.989931Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:38.989940Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:38.989947Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:38.989956Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:39.000262Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:39.000311Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:39.010641Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:39.010709Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:22:39.010991Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:22:39.011002Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:22:39.011048Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:22:39.011057Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:22:39.013739Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {} Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:204:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:204:2077] Leader for TabletID 72057594037932033 is [11:233:2079] sender: [11:236:2066] recipient: [11:204:2077] 2025-06-30T09:22:40.764094Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:40.764326Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:40.764383Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:40.764464Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:40.764610Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:40.764630Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:40.764633Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:40.764666Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:40.765326Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:40.765357Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:40.765379Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:40.765393Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:40.765404Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:40.765410Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:233:2079] sender: [11:257:2066] recipient: [11:20:2067] 2025-06-30T09:22:40.775747Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:22:40.775796Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:40.786132Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:40.786182Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:40.786199Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:40.786211Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:40.786236Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:40.786246Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:40.786252Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:40.786261Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:40.796586Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:40.796640Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:40.807003Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:40.807057Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:22:40.807274Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:22:40.807283Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:22:40.807328Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:22:40.807337Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:22:40.807516Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {} Leader for TabletID 72057594037932033 is [0:0:0] sender: [21:3066:2106] recipient: [21:2964:2117] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [21:3066:2106] recipient: [21:2964:2117] Leader for TabletID 72057594037932033 is [21:3113:2119] sender: [21:3115:2106] recipient: [21:2964:2117] 2025-06-30T09:22:42.833269Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:42.833539Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:42.833606Z n ... 1 Path# /dev/disk2 2025-06-30T09:22:51.859578Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 96:1002 Path# /dev/disk3 2025-06-30T09:22:51.859584Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 97:1000 Path# /dev/disk1 2025-06-30T09:22:51.859591Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 97:1001 Path# /dev/disk2 2025-06-30T09:22:51.859597Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 97:1002 Path# /dev/disk3 2025-06-30T09:22:51.859603Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 98:1000 Path# /dev/disk1 2025-06-30T09:22:51.859609Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 98:1001 Path# /dev/disk2 2025-06-30T09:22:51.859615Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 98:1002 Path# /dev/disk3 2025-06-30T09:22:51.859621Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 99:1000 Path# /dev/disk1 2025-06-30T09:22:51.859629Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 99:1001 Path# /dev/disk2 2025-06-30T09:22:51.859635Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 99:1002 Path# /dev/disk3 2025-06-30T09:22:51.859641Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 100:1000 Path# /dev/disk1 2025-06-30T09:22:51.859647Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 100:1001 Path# /dev/disk2 2025-06-30T09:22:51.859653Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 100:1002 Path# /dev/disk3 2025-06-30T09:22:51.859659Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 101:1000 Path# /dev/disk1 2025-06-30T09:22:51.859664Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 101:1001 Path# /dev/disk2 2025-06-30T09:22:51.859672Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 101:1002 Path# /dev/disk3 2025-06-30T09:22:51.859678Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 102:1000 Path# /dev/disk1 2025-06-30T09:22:51.859684Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 102:1001 Path# /dev/disk2 2025-06-30T09:22:51.859691Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 102:1002 Path# /dev/disk3 2025-06-30T09:22:51.859697Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 103:1000 Path# /dev/disk1 2025-06-30T09:22:51.859704Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 103:1001 Path# /dev/disk2 2025-06-30T09:22:51.859710Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 103:1002 Path# /dev/disk3 2025-06-30T09:22:51.859717Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 104:1000 Path# /dev/disk1 2025-06-30T09:22:51.859722Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 104:1001 Path# /dev/disk2 2025-06-30T09:22:51.859729Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 104:1002 Path# /dev/disk3 2025-06-30T09:22:51.859735Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 105:1000 Path# /dev/disk1 2025-06-30T09:22:51.859742Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 105:1001 Path# /dev/disk2 2025-06-30T09:22:51.859748Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 105:1002 Path# /dev/disk3 2025-06-30T09:22:51.859754Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 106:1000 Path# /dev/disk1 2025-06-30T09:22:51.859760Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 106:1001 Path# /dev/disk2 2025-06-30T09:22:51.859766Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 106:1002 Path# /dev/disk3 2025-06-30T09:22:51.859772Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 107:1000 Path# /dev/disk1 2025-06-30T09:22:51.859778Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 107:1001 Path# /dev/disk2 2025-06-30T09:22:51.859784Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 107:1002 Path# /dev/disk3 2025-06-30T09:22:51.859790Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 108:1000 Path# /dev/disk1 2025-06-30T09:22:51.859798Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 108:1001 Path# /dev/disk2 2025-06-30T09:22:51.859804Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 108:1002 Path# /dev/disk3 2025-06-30T09:22:51.859810Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 109:1000 Path# /dev/disk1 2025-06-30T09:22:51.859816Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 109:1001 Path# /dev/disk2 2025-06-30T09:22:51.859823Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 109:1002 Path# /dev/disk3 2025-06-30T09:22:51.859833Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 110:1000 Path# /dev/disk1 2025-06-30T09:22:51.859839Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 110:1001 Path# /dev/disk2 2025-06-30T09:22:51.859845Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 110:1002 Path# /dev/disk3 2025-06-30T09:22:51.859852Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 111:1000 Path# /dev/disk1 2025-06-30T09:22:51.859858Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 111:1001 Path# /dev/disk2 2025-06-30T09:22:51.859865Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 111:1002 Path# /dev/disk3 2025-06-30T09:22:51.859870Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 112:1000 Path# /dev/disk1 2025-06-30T09:22:51.859878Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 112:1001 Path# /dev/disk2 2025-06-30T09:22:51.859885Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 112:1002 Path# /dev/disk3 2025-06-30T09:22:51.859891Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 113:1000 Path# /dev/disk1 2025-06-30T09:22:51.859898Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 113:1001 Path# /dev/disk2 2025-06-30T09:22:51.859905Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 113:1002 Path# /dev/disk3 2025-06-30T09:22:51.859911Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 114:1000 Path# /dev/disk1 2025-06-30T09:22:51.859917Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 114:1001 Path# /dev/disk2 2025-06-30T09:22:51.859924Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 114:1002 Path# /dev/disk3 2025-06-30T09:22:51.859930Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 115:1000 Path# /dev/disk1 2025-06-30T09:22:51.859937Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 115:1001 Path# /dev/disk2 2025-06-30T09:22:51.859943Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 115:1002 Path# /dev/disk3 2025-06-30T09:22:51.859949Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 116:1000 Path# /dev/disk1 2025-06-30T09:22:51.859955Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 116:1001 Path# /dev/disk2 2025-06-30T09:22:51.859961Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 116:1002 Path# /dev/disk3 2025-06-30T09:22:51.859968Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 117:1000 Path# /dev/disk1 2025-06-30T09:22:51.859976Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 117:1001 Path# /dev/disk2 2025-06-30T09:22:51.859982Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 117:1002 Path# /dev/disk3 2025-06-30T09:22:51.859988Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 118:1000 Path# /dev/disk1 2025-06-30T09:22:51.859994Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 118:1001 Path# /dev/disk2 2025-06-30T09:22:51.860001Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 118:1002 Path# /dev/disk3 2025-06-30T09:22:51.860006Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 119:1000 Path# /dev/disk1 2025-06-30T09:22:51.860013Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 119:1001 Path# /dev/disk2 2025-06-30T09:22:51.860019Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 119:1002 Path# /dev/disk3 2025-06-30T09:22:51.860025Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 120:1000 Path# /dev/disk1 2025-06-30T09:22:51.860031Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 120:1001 Path# /dev/disk2 2025-06-30T09:22:51.860038Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 120:1002 Path# /dev/disk3 2025-06-30T09:22:51.873574Z node 71 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool 1" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 50 PDiskFilter { Property { Type: ROT } } } } } 2025-06-30T09:22:51.915404Z node 71 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 71 Type# 268639257 2025-06-30T09:22:51.917496Z node 71 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 StoragePoolId: 2 Name: "storage pool 2" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 50 PDiskFilter { Property { Type: SSD } } } } Command { DeleteStoragePool { BoxId: 1 StoragePoolId: 2 ItemConfigGeneration: 1 } } } 2025-06-30T09:22:51.950470Z node 71 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DeleteStoragePool { BoxId: 1 StoragePoolId: 1 ItemConfigGeneration: 1 } } Command { QueryBaseConfig { } } } 2025-06-30T09:22:51.967891Z node 71 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 71 Type# 268639257 >> TSchemeShardDecimalTypesInTables::CreateWithWrongParameters [GOOD] >> TSchemeShardDecimalTypesInTables::AlterWithWrongParameters >> ReadIteratorExternalBlobs::ExtBlobsEmptyTable [GOOD] >> ReadIteratorExternalBlobs::NotExtBlobs >> BackupRestoreS3::TestAllPrimitiveTypes-PRIMITIVE_TYPE_ID_UNSPECIFIED [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT8 |82.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |82.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk >> TSchemeShardTest::DocumentApiVersion [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Dir >> Yq_1::Basic_EmptyDict [GOOD] >> DataShardWrite::DelayedVolatileTxAndEvWrite [GOOD] >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit >> TSchemeShardDecimalTypesInTables::AlterWithWrongParameters [GOOD] >> TSchemeShardInfoTypesTest::EmptyFamilies [GOOD] >> TSchemeShardInfoTypesTest::LostId [GOOD] >> TSchemeShardInfoTypesTest::DeduplicationOrder [GOOD] >> TSchemeShardInfoTypesTest::MultipleDeduplications [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-false >> TSchemeShardTest::DisablePublicationsOfDropping_Dir [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Table >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeDir [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeBlockStoreVolume [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExtSubDomain [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeColumnStore [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeColumnTable [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeCdcStream >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedExport >> SystemView::ShowCreateTablePartitionPolicyIndexTable [GOOD] >> SystemView::StoragePoolsFields >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeInvalid [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeDir [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypePersQueueGroup [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeRtmrVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeBlockStoreVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeKesus [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeExtSubDomain [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeFileStore [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeColumnStore [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeColumnTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeCdcStream >> BackupPathTest::ExportWholeDatabase [GOOD] |82.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |82.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-false [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTable >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/fq/ut_integration/unittest >> Yq_1::Basic_EmptyDict [GOOD] Test command err: 2025-06-30T09:22:48.554707Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670433320732739:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:48.554733Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0630 09:22:48.601558692 357580 dns_resolver_ares.cc:452] no server name supplied in dns URI E0630 09:22:48.601619962 357580 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-30T09:22:49.052397Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670437615700299:2270], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:49.052472Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00438e/r3tmp/tmp8sf4mx/pdisk_1.dat 2025-06-30T09:22:49.127246Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 9845, node 1 TClient is connected to server localhost:24082 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:49.264945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:49.275933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:49.275972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:49.277700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:49.421022Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:49.421492Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:49.421504Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:49.421508Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:49.421589Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:49.556752Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:49.608387Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-30T09:22:49.608406Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-30T09:22:49.608409Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-30T09:22:49.608432Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-30T09:22:49.608437Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-30T09:22:49.608439Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-30T09:22:49.608872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:22:49.608877Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/compute_databases". Create session OK 2025-06-30T09:22:49.608881Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/compute_databases" 2025-06-30T09:22:49.608883Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/compute_databases" 2025-06-30T09:22:49.608943Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-30T09:22:49.608951Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-30T09:22:49.608952Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-30T09:22:49.609668Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-30T09:22:49.609673Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-30T09:22:49.609678Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-30T09:22:49.609679Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-30T09:22:49.609681Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-30T09:22:49.609681Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-30T09:22:49.609864Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/jobs". Create session OK 2025-06-30T09:22:49.609876Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/jobs" 2025-06-30T09:22:49.609878Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/jobs" 2025-06-30T09:22:49.609960Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-30T09:22:49.609963Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-30T09:22:49.609965Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-30T09:22:49.609965Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-30T09:22:49.609968Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-30T09:22:49.609969Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-30T09:22:49.610307Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/queries". Create session OK 2025-06-30T09:22:49.610320Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/queries" 2025-06-30T09:22:49.610322Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/queries" 2025-06-30T09:22:49.610344Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "Root/yq" 2025-06-30T09:22:49.610350Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "Root/yq": 2025-06-30T09:22:49.610358Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-30T09:22:49.610361Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-30T09:22:49.610362Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-30T09:22:49.610450Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-30T09:22:49.610452Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-30T09:22:49.610453Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" 2025-06-30T09:22:49.610509Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/quotas". Create session OK 2025-06-30T09:22:49.610511Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/quotas" 2025-06-30T09:22:49.610513Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/quotas" 2025-06-30T09:22:49.612243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.612591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.612787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.613228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.613476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:49.613707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itsel ... Set execution timeout 299.999371s 2025-06-30T09:23:01.486109Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1452: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Create sink for output 0 { Sink { Type: "KqpTableSink" Settings { type_url: "type.googleapis.com/NKikimrKqp.TKqpTableSinkSettings" value: "\032\036\n\016Root/yq/quotas\020\200\202\224\204\200\200\200\200\001\030\005(\001\"\023\n\014subject_type\020\001 \201 \"\021\n\nsubject_id\020\002 \201 \"\022\n\013metric_name\020\003 \201 *\026\n\020limit_updated_at\020\005 2*\022\n\014metric_limit\020\004 \004*\022\n\013metric_name\020\003 \201 *\022\n\014metric_usage\020\006 \004*\021\n\nsubject_id\020\002 \201 *\023\n\014subject_type\020\001 \201 *\026\n\020usage_updated_at\020\007 20\336\247\200\200\200\200@8\007@\000H\001R\022\tu\0207\205uWbh\0211\t\000\000\007\000\020\000X\000`\000h\004h\003h\002h\005h\001h\000h\006x\000" } } } 2025-06-30T09:23:01.486142Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-30T09:23:01.486149Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: 2025-06-30T09:23:01.486179Z node 7 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:358: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. About to drain async output 0. FreeSpace: 67108864, allowedOvercommit: 4194304, toSend: 71303168, finished: 0 2025-06-30T09:23:01.486196Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3311: TxId: 281474976715744, task: 1. Add data: 78 / 78 2025-06-30T09:23:01.486206Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3280: TxId: 281474976715744, task: 1. Send data=78, closed=1, bufferActorId=[7:7521670489919918197:2353] 2025-06-30T09:23:01.486213Z node 7 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:372: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Drain async output 0. Free space decreased: -9223372036787666944, sent data from buffer: 78 2025-06-30T09:23:01.486216Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715744, task: 1. Tasks execution finished 2025-06-30T09:23:01.486218Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-30T09:23:01.486222Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-30T09:23:01.486225Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: 2025-06-30T09:23:01.486227Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715744, task: 1. Tasks execution finished 2025-06-30T09:23:01.486229Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-30T09:23:01.486237Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:23:01.486239Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715744, task: 1. Tasks execution finished 2025-06-30T09:23:01.486241Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-30T09:23:01.486257Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1860: SelfId: [7:7521670489919918197:2353], SessionActorId: [7:7521670477035013418:2353], Create new TableWriteActor for table `Root/yq/quotas` ([72057594046644480:5:1]). lockId=281474976715742. ActorId=[7:7521670489919918204:2353] 2025-06-30T09:23:01.486268Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:388: Table: `Root/yq/quotas` ([72057594046644480:5:1]), SessionActorId: [7:7521670477035013418:2353]Open: token=0 2025-06-30T09:23:01.486274Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1987: SelfId: [7:7521670489919918197:2353], SessionActorId: [7:7521670477035013418:2353], ProcessRequestQueue [OwnerId: 72057594046644480, LocalPathId: 5] NOT READY queue=1 2025-06-30T09:23:01.486285Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:394: SelfId: [7:7521670489919918204:2353], Table: `Root/yq/quotas` ([72057594046644480:5:1]), SessionActorId: [7:7521670477035013418:2353]Write: token=0 2025-06-30T09:23:01.486297Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:402: SelfId: [7:7521670489919918204:2353], Table: `Root/yq/quotas` ([72057594046644480:5:1]), SessionActorId: [7:7521670477035013418:2353]Close: token=0 2025-06-30T09:23:01.486306Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3180: SelfId: [7:7521670489919918203:2353], TxId: 281474976715744, task: 1. TKqpForwardWriteActor recieve EvBufferWriteResult from [7:7521670489919918197:2353] 2025-06-30T09:23:01.486310Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3198: SelfId: [7:7521670489919918203:2353], TxId: 281474976715744, task: 1. Finished 2025-06-30T09:23:01.486314Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:23:01.486316Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715744, task: 1. Tasks execution finished 2025-06-30T09:23:01.486318Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [7:7521670489919918201:2353], TxId: 281474976715744, task: 1. Ctx: { TraceId : 01jz02b8qdcjrjbny2f1277reh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=MzNmMDlkYjAtN2JmOTY4OWYtNTQ2MTUxNjEtOTQ3ZjY1ZDI=. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-30T09:23:01.486341Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715744, task: 1. pass away 2025-06-30T09:23:01.486359Z node 7 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715744;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-30T09:23:01.486435Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2110: SelfId: [7:7521670489919918197:2353], SessionActorId: [7:7521670477035013418:2353], Start immediate commit 2025-06-30T09:23:01.486437Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:923: SelfId: [7:7521670489919918204:2353], Table: `Root/yq/quotas` ([72057594046644480:5:1]), SessionActorId: [7:7521670477035013418:2353]SetImmediateCommit 2025-06-30T09:23:01.486440Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2052: SelfId: [7:7521670489919918197:2353], SessionActorId: [7:7521670477035013418:2353], Flush data 2025-06-30T09:23:01.486476Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1050: SelfId: [7:7521670489919918204:2353], Table: `Root/yq/quotas` ([72057594046644480:5:1]), SessionActorId: [7:7521670477035013418:2353]Send EvWrite to ShardID=72075186224037890, isPrepare=0, isImmediateCommit=1, TxId=0, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715742 DataShard: 72075186224037890 Generation: 1 Counter: 11 SchemeShard: 72057594046644480 PathId: 5, Size=136, Cookie=1, OperationsCount=1, IsFinal=1, Attempts=0, Mode=3, BufferMemory=136 2025-06-30T09:23:01.487624Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [7:7521670489919918193:2354], Table: `Root/yq/quotas` ([72057594046644480:5:1]), SessionActorId: [7:7521670477035013419:2354]Recv EvWriteResult from ShardID=72075186224037890, Status=STATUS_COMPLETED, TxId=25, Locks= , Cookie=1 2025-06-30T09:23:01.487633Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [7:7521670489919918193:2354], Table: `Root/yq/quotas` ([72057594046644480:5:1]), SessionActorId: [7:7521670477035013419:2354]Got completed result TxId=25, TabletId=72075186224037890, Cookie=1, Mode=3, Locks= 2025-06-30T09:23:01.487649Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [7:7521670489919918186:2354], SessionActorId: [7:7521670477035013419:2354], Committed TxId=0 2025-06-30T09:23:01.488082Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [7:7521670489919918204:2353], Table: `Root/yq/quotas` ([72057594046644480:5:1]), SessionActorId: [7:7521670477035013418:2353]Recv EvWriteResult from ShardID=72075186224037890, Status=STATUS_COMPLETED, TxId=26, Locks= , Cookie=1 2025-06-30T09:23:01.488087Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [7:7521670489919918204:2353], Table: `Root/yq/quotas` ([72057594046644480:5:1]), SessionActorId: [7:7521670477035013418:2353]Got completed result TxId=26, TabletId=72075186224037890, Cookie=1, Mode=3, Locks= 2025-06-30T09:23:01.488097Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [7:7521670489919918197:2353], SessionActorId: [7:7521670477035013418:2353], Committed TxId=0 >> TSchemeShardTest::DisablePublicationsOfDropping_Table [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_IndexedTable >> BackupPathTest::ExportWholeDatabaseWithEncryption >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedExport [GOOD] >> BackupRestoreS3::RestoreTablePartitioningSettings |82.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |82.5%| [LD] {RESULT} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut >> TSchemeShardTest::DisablePublicationsOfDropping_IndexedTable [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Pq ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:44.759423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:44.759444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.759450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:44.759454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:44.759459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:44.759464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:44.759472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.766291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:44.766419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:44.766500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:44.787946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:44.787970Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:44.791520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:44.791573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:44.791595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:44.793487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:44.793562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:44.793651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.793704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:44.801618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.801703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:44.802032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:44.802072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:44.802078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:44.802095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.803730Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:44.824076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:44.824147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.824203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:44.824210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:44.824265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:44.824279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:44.824961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.825000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:44.825038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.825048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:44.825053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:44.825058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:44.825488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.825500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:44.825506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:44.825887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.825899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.825906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.825912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:44.832637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:44.833292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:44.833326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:44.833514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.833612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:44.833622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.833655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:44.833666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:44.834143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.834152Z node 1 :FLAT_TX_SCHEMESHARD ... 4046678944 2025-06-30T09:23:02.799420Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 102 ready parts: 1/1 2025-06-30T09:23:02.799472Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409546 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 5000003 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:23:02.800217Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-30T09:23:02.800269Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 102 at step: 5000003 2025-06-30T09:23:02.800494Z node 12 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:02.800523Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 51539609709 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:02.800553Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:357: TAlterTable TPropose operationId# 102:0 HandleReply TEvOperationPlan, operationId: 102:0, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-30T09:23:02.800674Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 128 -> 129 2025-06-30T09:23:02.800715Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-30T09:23:02.802451Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:23:02.802466Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:23:02.802543Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:02.802550Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [12:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-30T09:23:02.802674Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:23:02.802686Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-30T09:23:02.802858Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:23:02.802876Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:23:02.802882Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:23:02.802890Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-30T09:23:02.802899Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:23:02.802919Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-30T09:23:02.803211Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6376: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 368 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-30T09:23:02.803225Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:23:02.803251Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 368 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-30T09:23:02.803271Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 368 } } CommitVersion { Step: 5000003 TxId: 102 } FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:23:02.803723Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 314 RawX2: 51539609851 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-30T09:23:02.803735Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:23:02.803755Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 314 RawX2: 51539609851 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-30T09:23:02.803764Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-30T09:23:02.803776Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 314 RawX2: 51539609851 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-30T09:23:02.803796Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:02.803802Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:23:02.803809Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:23:02.803817Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 129 -> 240 2025-06-30T09:23:02.804550Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:23:02.804609Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:23:02.804641Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:23:02.804709Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:23:02.804719Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:23:02.804735Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:23:02.804741Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:23:02.804748Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:23:02.804752Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:23:02.804758Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:23:02.804774Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [12:342:2319] message: TxId: 102 2025-06-30T09:23:02.804782Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:23:02.804789Z node 12 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:23:02.804795Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:23:02.804828Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:23:02.805261Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:23:02.805275Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [12:400:2370] TestWaitNotification: OK eventTxId 102 |82.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |82.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk >> DataShardWrite::UpsertLostPrepareArbiterRestart [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePathSpecified >> BackupRestoreS3::TestAllPrimitiveTypes-INT8 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT8 >> SelfHeal::TestThreeMaintenanceRequestsMirror3dc [GOOD] >> SelfHeal::TestThreeMaintenanceRequests4Plus2Block >> TSchemeShardTest::DisablePublicationsOfDropping_Pq [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Solomon >> BackupPathTest::ExportWholeDatabaseWithEncryption [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::UpsertLostPrepareArbiterRestart [GOOD] Test command err: 2025-06-30T09:22:53.001151Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:53.001250Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:53.001268Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00404f/r3tmp/tmpzFSy6P/pdisk_1.dat 2025-06-30T09:22:53.104873Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:53.105894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:53.123843Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:53.124927Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275372529783 != 1751275372529787 2025-06-30T09:22:53.167094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:53.167133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:53.177783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:53.251786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:53.270981Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:53.271270Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:53.271395Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:22:53.271486Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:22:53.280710Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:53.280999Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:22:53.281033Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:22:53.281260Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:22:53.281271Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:22:53.281279Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:22:53.281363Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:22:53.281386Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:22:53.281401Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:22:53.291772Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:22:53.297405Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:22:53.297497Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:22:53.297526Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:22:53.297532Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:22:53.297538Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:22:53.297545Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:22:53.297618Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:22:53.297627Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:22:53.297743Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:22:53.297773Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:22:53.297786Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:22:53.297799Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:22:53.297808Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:22:53.297816Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:22:53.297820Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:22:53.297826Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:22:53.297833Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:22:53.297856Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:53.297862Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:53.297870Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:22:53.297984Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:22:53.297991Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:22:53.298017Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:22:53.298078Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:22:53.298090Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:22:53.298113Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:22:53.298122Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:22:53.298128Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:22:53.298134Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:22:53.298139Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:22:53.298211Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:22:53.298216Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:22:53.298221Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:22:53.298226Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:22:53.298240Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:22:53.298244Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:22:53.298251Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:22:53.298274Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:22:53.298280Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:22:53.298598Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:22:53.298609Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:22:53.308972Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:22:53.309005Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:22:53.309024Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:22:53.309039Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... : NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:03.624576Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:03.624582Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [7:927:2743], serverId# [7:928:2744], sessionId# [0:0:0] 2025-06-30T09:23:03.624601Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553169, Sender [7:926:2742], Recipient [7:680:2560]: NKikimrTxDataShard.TEvGetInfoRequest 2025-06-30T09:23:03.624702Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [7:931:2747], Recipient [7:680:2560]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:03.624708Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:03.624714Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [7:930:2746], serverId# [7:931:2747], sessionId# [0:0:0] 2025-06-30T09:23:03.624744Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [7:929:2745], Recipient [7:680:2560]: NKikimrTxDataShard.TEvRead ReadId: 1002 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-06-30T09:23:03.624767Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 0 2025-06-30T09:23:03.624773Z node 7 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037890 CompleteEdge# v1001/1000001 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-30T09:23:03.624779Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037890 changed HEAD read to non-repeatable v4000/18446744073709551615 2025-06-30T09:23:03.624786Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit CheckRead 2025-06-30T09:23:03.624796Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-30T09:23:03.624801Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit CheckRead 2025-06-30T09:23:03.624806Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit BuildAndWaitDependencies 2025-06-30T09:23:03.624810Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit BuildAndWaitDependencies 2025-06-30T09:23:03.624819Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037890 2025-06-30T09:23:03.624825Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-30T09:23:03.624829Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-30T09:23:03.624833Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit ExecuteRead 2025-06-30T09:23:03.624838Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit ExecuteRead 2025-06-30T09:23:03.624850Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037890 Execute read# 1, request: { ReadId: 1002 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-06-30T09:23:03.624880Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037890 Complete read# {[7:929:2745], 1002} after executionsCount# 1 2025-06-30T09:23:03.624886Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037890 read iterator# {[7:929:2745], 1002} sends rowCount# 0, bytes# 0, quota rows left# 18446744073709551615, quota bytes left# 18446744073709551615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:23:03.624895Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037890 read iterator# {[7:929:2745], 1002} finished in read 2025-06-30T09:23:03.624903Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-30T09:23:03.624907Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit ExecuteRead 2025-06-30T09:23:03.624911Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit CompletedOperations 2025-06-30T09:23:03.624914Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit CompletedOperations 2025-06-30T09:23:03.624921Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-30T09:23:03.624925Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit CompletedOperations 2025-06-30T09:23:03.624929Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037890 has finished 2025-06-30T09:23:03.624933Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2025-06-30T09:23:03.624946Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2025-06-30T09:23:03.625068Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [7:934:2750], Recipient [7:678:2558]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:03.625076Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:03.625083Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [7:933:2749], serverId# [7:934:2750], sessionId# [0:0:0] 2025-06-30T09:23:03.625098Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553169, Sender [7:932:2748], Recipient [7:678:2558]: NKikimrTxDataShard.TEvGetInfoRequest 2025-06-30T09:23:03.625202Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [7:937:2753], Recipient [7:678:2558]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:03.625208Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:03.625213Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [7:936:2752], serverId# [7:937:2753], sessionId# [0:0:0] 2025-06-30T09:23:03.625243Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [7:935:2751], Recipient [7:678:2558]: NKikimrTxDataShard.TEvRead ReadId: 1003 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-06-30T09:23:03.625266Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037891, FollowerId 0 2025-06-30T09:23:03.625277Z node 7 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037891 CompleteEdge# v1000/281474976715657 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-30T09:23:03.625282Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037891 changed HEAD read to non-repeatable v4000/18446744073709551615 2025-06-30T09:23:03.625289Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit CheckRead 2025-06-30T09:23:03.625300Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-30T09:23:03.625305Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit CheckRead 2025-06-30T09:23:03.625310Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit BuildAndWaitDependencies 2025-06-30T09:23:03.625313Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit BuildAndWaitDependencies 2025-06-30T09:23:03.625322Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 72075186224037891 2025-06-30T09:23:03.625328Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-30T09:23:03.625332Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit BuildAndWaitDependencies 2025-06-30T09:23:03.625336Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit ExecuteRead 2025-06-30T09:23:03.625340Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit ExecuteRead 2025-06-30T09:23:03.625394Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037891 Execute read# 1, request: { ReadId: 1003 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-06-30T09:23:03.625416Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037891 Complete read# {[7:935:2751], 1003} after executionsCount# 1 2025-06-30T09:23:03.625423Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037891 read iterator# {[7:935:2751], 1003} sends rowCount# 0, bytes# 0, quota rows left# 18446744073709551615, quota bytes left# 18446744073709551615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:23:03.625432Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037891 read iterator# {[7:935:2751], 1003} finished in read 2025-06-30T09:23:03.625439Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-30T09:23:03.625443Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit ExecuteRead 2025-06-30T09:23:03.625447Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit CompletedOperations 2025-06-30T09:23:03.625451Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit CompletedOperations 2025-06-30T09:23:03.625458Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-30T09:23:03.625462Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit CompletedOperations 2025-06-30T09:23:03.625466Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037891 has finished 2025-06-30T09:23:03.625473Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037891 2025-06-30T09:23:03.625487Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037891 >> KqpBatchUpdate::SimpleOnePartition [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeCdcStream [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeBlobDepot [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalDataSource >> TSchemeShardTest::DisablePublicationsOfDropping_Solomon [GOOD] >> BackupPathTest::RecursiveDirectoryPlusExplicitTable >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePathSpecified [GOOD] >> IndexBuildTestReboots::DropIndex [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeCdcStream [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeReplication [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeBlobDepot [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeExternalTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeExternalDataSource [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeResourcePool [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeBackupCollection [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UTF8 >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSubDomain [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSolomonVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTableIndex >> IndexBuildTestReboots::DropIndexWithDataColumns [GOOD] >> BackupPathTest::ExportWithCommonSourcePath ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::SimpleOnePartition [GOOD] Test command err: Trying to start YDB, gRPC: 16451, MsgBus: 17435 2025-06-30T09:22:55.415576Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670465350572803:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:55.415634Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044e8/r3tmp/tmpuYOi79/pdisk_1.dat 2025-06-30T09:22:55.474608Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670465350572683:2080] 1751275375408656 != 1751275375408659 2025-06-30T09:22:55.475637Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16451, node 1 2025-06-30T09:22:55.498281Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:55.498295Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:55.498298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:55.498360Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17435 2025-06-30T09:22:55.555028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:55.555066Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:55.559512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17435 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:55.586060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:55.594589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:55.660172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:55.686847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:55.706264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:55.881988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670465350574305:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:55.882030Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:55.939282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:55.963745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:55.979772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:55.992870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:56.013277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:56.032215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:56.045975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:56.059658Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670469645542255:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:56.059683Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:56.060377Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670469645542260:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:56.061234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:56.068569Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670469645542262:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:56.134884Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670469645542313:3406] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:56.408183Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 2693, MsgBus: 24333 2025-06-30T09:22:57.637121Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670471769614821:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:57.637163Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044e8/r3tmp/tmpaeQKA7/pdisk_1.dat 2025-06-30T09:22:57.658394Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:57.658784Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:752167 ... RVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670490287877836:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:23:01.216730Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670490287877887:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:01.519676Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 61578, MsgBus: 23815 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044e8/r3tmp/tmprZqZCH/pdisk_1.dat 2025-06-30T09:23:02.580969Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:02.583089Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:02.584340Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521670494902067474:2080] 1751275382558659 != 1751275382558662 TServer::EnableGrpc on GrpcPort 61578, node 4 2025-06-30T09:23:02.598103Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:02.598114Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:02.598117Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:02.598162Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23815 TClient is connected to server localhost:23815 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:23:02.667238Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:02.667276Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:02.669011Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:02.671806Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:02.679776Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:23:02.690206Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:02.708178Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:02.731659Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:02.748165Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:03.021978Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670499197036369:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.022079Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.025929Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:03.036932Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:03.048366Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:03.063890Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:03.079452Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:03.092265Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:03.105122Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:03.119686Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670499197037023:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.119714Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670499197037028:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.119719Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.120431Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:03.129164Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521670499197037030:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:23:03.179598Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521670499197037081:3406] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:03.567064Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> SystemView::ShowCreateTableColumnUpsertIndex [GOOD] >> SystemView::ShowCreateTableColumnAlterObject >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPrefixSpecified ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::DisablePublicationsOfDropping_Solomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:44.758335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:44.758356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.758361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:44.758364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:44.758368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:44.758370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:44.758376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.766265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:44.766373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:44.766444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:44.788048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:44.788069Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:44.791483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:44.791549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:44.791577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:44.793449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:44.793537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:44.793650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.793732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:44.801606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.801692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:44.802028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:44.802067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:44.802071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:44.802087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.803732Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:44.819415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:44.819511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.819570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:44.819578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:44.819637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:44.819652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:44.823107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.823151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:44.823191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.823201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:44.823205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:44.823209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:44.823759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.823771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:44.823778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:44.824128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.824136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.824140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.824147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:44.832509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:44.833217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:44.833260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:44.833453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.833542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:44.833549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.833584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:44.833597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:44.834076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.834086Z node 1 :FLAT_TX_SCHEMESHARD ... 30T09:23:04.739124Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:04.739156Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 64424511598 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:04.739167Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_solomon.cpp:47: TDropSolomon TPropose operationId# 104:0 HandleReply TEvOperationPlan, step: 5000005, at schemeshard: 72057594046678944 2025-06-30T09:23:04.739180Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5309: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 3] name: Obj type: EPathTypeSolomonVolume state: EPathStateDrop stepDropped: 0 droppedTxId: 104 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:23:04.739189Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5325: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:23:04.739227Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:23:04.739253Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 104:0 128 -> 130 2025-06-30T09:23:04.739283Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:23:04.739294Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:23:04.739509Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:23:04.739534Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 FAKE_COORDINATOR: Erasing txId 104 2025-06-30T09:23:04.739968Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:23:04.739980Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:23:04.740018Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:23:04.740045Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:04.740052Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:451:2409], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-30T09:23:04.740059Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:451:2409], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-30T09:23:04.740142Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-30T09:23:04.740153Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:418: [72057594046678944] TDeleteParts opId# 104:0 ProgressState 2025-06-30T09:23:04.740164Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:23:04.740170Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:23:04.740177Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-30T09:23:04.740181Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:23:04.740188Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-30T09:23:04.740194Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-30T09:23:04.740204Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-30T09:23:04.740210Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 104:0 2025-06-30T09:23:04.740244Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:23:04.740252Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-30T09:23:04.740257Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-30T09:23:04.740262Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-30T09:23:04.740356Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:23:04.740374Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:23:04.740381Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:23:04.740387Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-30T09:23:04.740393Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:23:04.740456Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:23:04.740470Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-30T09:23:04.740475Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-30T09:23:04.740481Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-30T09:23:04.740486Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:23:04.740497Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-30T09:23:04.741112Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:23:04.741292Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:23:04.741467Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-30T09:23:04.741500Z node 15 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-30T09:23:04.741560Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-30T09:23:04.741634Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409547 2025-06-30T09:23:04.742006Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:23:04.742017Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-30T09:23:04.742034Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:23:04.742814Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:23:04.742835Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-30T09:23:04.742970Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-30T09:23:04.743053Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-30T09:23:04.743064Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-30T09:23:04.743148Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-30T09:23:04.743171Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-30T09:23:04.743178Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [15:581:2520] TestWaitNotification: OK eventTxId 104 >> BackupRestore::TestAllIndexTypes-EIndexTypeInvalid [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree >> BackupRestoreS3::RestoreTablePartitioningSettings [GOOD] >> BackupRestoreS3::RestoreIndexTablePartitioningSettings >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT8 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT16 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::DropIndex [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:31.788755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:31.788784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:31.788790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:31.788796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:31.788815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:31.788820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:31.788832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:31.788849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:31.788989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:31.789084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:31.805437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:31.805466Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:31.805601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:31.809320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:31.810554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:31.810604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:31.813114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:31.813196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:31.813367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:31.813428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:31.814493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:31.814548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:31.814942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:31.814958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:31.814969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:31.814980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:31.814986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:31.815042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:31.816783Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:31.842448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:31.842564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.842657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:31.842667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:31.842726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:31.842762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:31.843895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:31.843955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:31.844047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.844060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:31.844086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:31.844095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:31.844753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.844770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:31.844777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:31.845345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.845359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.845367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:31.845376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:31.846319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:31.846922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:31.846986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:31.847241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:31.847269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... SHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-30T09:23:04.796459Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:23:04.796473Z node 110 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:23:04.796683Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:23:04.797104Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:23:04.797136Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:23:04.797144Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:23:04.797965Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:23:04.798402Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5640: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 352 RawX2: 472446404894 } TabletId: 72075186233409546 State: 4 2025-06-30T09:23:04.798427Z node 110 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-06-30T09:23:04.798850Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:23:04.798944Z node 110 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-06-30T09:23:04.799721Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-30T09:23:04.799793Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-30T09:23:04.799905Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:23:04.799911Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:23:04.799926Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:23:04.799934Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:23:04.799941Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:23:04.800593Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:23:04.800609Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409546 2025-06-30T09:23:04.800691Z node 110 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:23:04.800743Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:23:04.800747Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:23:04.800790Z node 110 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:23:04.800802Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:23:04.800805Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [110:584:2544] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:23:04.800861Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:23:04.800905Z node 110 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 53us result status StatusSuccess 2025-06-30T09:23:04.801008Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:04.801070Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndexByValue0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:23:04.801088Z node 110 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndexByValue0" took 19us result status StatusPathDoesNotExist 2025-06-30T09:23:04.801102Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/UserDefinedIndexByValue0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 3]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Table/UserDefinedIndexByValue0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:23:04.801132Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:23:04.801141Z node 110 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable" took 11us result status StatusPathDoesNotExist 2025-06-30T09:23:04.801152Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 3]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::DropIndexWithDataColumns [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:31.317152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:31.317183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:31.317190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:31.317197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:31.317218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:31.317223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:31.317232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:31.317248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:31.317374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:31.317466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:31.332838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:31.332872Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:31.333013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:31.337073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:31.338165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:31.338209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:31.341363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:31.341427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:31.341591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:31.341645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:31.343078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:31.343136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:31.343478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:31.343492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:31.343502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:31.343511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:31.343518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:31.343550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:31.345138Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:31.364990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:31.365082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.365142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:31.365149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:31.365192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:31.365205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:31.366064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:31.366104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:31.366158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.366166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:31.366170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:31.366174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:31.366570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.366579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:31.366584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:31.367092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.367109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.367116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:31.367124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:31.367844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:31.368342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:31.368385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:31.368601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:31.368626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... SHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-30T09:23:04.884289Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:23:04.884302Z node 110 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-30T09:23:04.884562Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:23:04.884946Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:23:04.884977Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:23:04.884985Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:23:04.886045Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:23:04.887367Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5640: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 352 RawX2: 472446404894 } TabletId: 72075186233409546 State: 4 2025-06-30T09:23:04.887396Z node 110 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-06-30T09:23:04.887917Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:23:04.888020Z node 110 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-06-30T09:23:04.888801Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-30T09:23:04.888887Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-30T09:23:04.889012Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:23:04.889020Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:23:04.889035Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:23:04.889044Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:23:04.889052Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:23:04.889829Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-30T09:23:04.889848Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409546 2025-06-30T09:23:04.889929Z node 110 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-30T09:23:04.889998Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:23:04.890007Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:23:04.890075Z node 110 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:23:04.890094Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:23:04.890101Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [110:584:2544] TestWaitNotification: OK eventTxId 1003 2025-06-30T09:23:04.890187Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:23:04.890242Z node 110 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 68us result status StatusSuccess 2025-06-30T09:23:04.890385Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:04.890498Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndexByValue0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:23:04.890527Z node 110 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndexByValue0" took 31us result status StatusPathDoesNotExist 2025-06-30T09:23:04.890552Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/UserDefinedIndexByValue0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 3]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Table/UserDefinedIndexByValue0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-30T09:23:04.890606Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:23:04.890622Z node 110 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable" took 19us result status StatusPathDoesNotExist 2025-06-30T09:23:04.890642Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 3]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalDataSource [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeBackupCollection [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT8 >> TKeyValueTracingTest::ReadSmall >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPrefixSpecified [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit [GOOD] Test command err: 2025-06-30T09:22:52.413677Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:22:52.413817Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:22:52.413840Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004056/r3tmp/tmpx9FN5X/pdisk_1.dat 2025-06-30T09:22:52.522240Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:22:52.523270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:52.542273Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:52.543596Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275371848980 != 1751275371848984 2025-06-30T09:22:52.586997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:52.587042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:52.597956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:52.677100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:52.697822Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:52.698041Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:52.698120Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:22:52.698182Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:22:52.713375Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:52.713637Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:22:52.713666Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:22:52.713905Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:22:52.713917Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:22:52.713926Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:22:52.714001Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:22:52.714028Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:22:52.714043Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:22:52.724390Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:22:52.729774Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:22:52.729870Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:22:52.729903Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:22:52.729910Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:22:52.729916Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:22:52.729922Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:22:52.730000Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:22:52.730009Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:22:52.730122Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:22:52.730145Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:22:52.730160Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:22:52.730169Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:22:52.730178Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:22:52.730185Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:22:52.730192Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:22:52.730198Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:22:52.730203Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:22:52.730226Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:52.730232Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:52.730240Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:22:52.730353Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:22:52.730361Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:22:52.730383Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:22:52.730436Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:22:52.730448Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:22:52.730468Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:22:52.730477Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:22:52.730482Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:22:52.730488Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:22:52.730494Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:22:52.730560Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:22:52.730566Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:22:52.730571Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:22:52.730576Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:22:52.730588Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:22:52.730592Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:22:52.730596Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:22:52.730601Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:22:52.730609Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:22:52.730948Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:22:52.730960Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:22:52.741389Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:22:52.741415Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:22:52.741421Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:22:52.741431Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 888 on unit CompletedOperations 2025-06-30T09:23:05.462320Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-30T09:23:05.462324Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:23:05.462328Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-30T09:23:05.462333Z node 9 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-30T09:23:05.462345Z node 9 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-30T09:23:05.462659Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [9:1571:2403], Recipient [9:1232:2354]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-30T09:23:05.462673Z node 9 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-30T09:23:05.462699Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [9:1573:2404], Recipient [9:1232:2354]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-30T09:23:05.462705Z node 9 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-30T09:23:05.464347Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [8:1556:2900], Recipient [9:1501:2398] 2025-06-30T09:23:05.464365Z node 9 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-30T09:23:05.464413Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [9:1232:2354], Recipient [9:1232:2354]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-30T09:23:05.464420Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-30T09:23:05.464440Z node 9 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-30T09:23:05.464480Z node 9 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true } SendingShards: 72075186224037888 ReceivingShards: 72075186224037888 Op: Commit } 2025-06-30T09:23:05.464498Z node 9 :TX_DATASHARD TRACE: key_validator.cpp:33: -- AddReadRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-30T09:23:05.464507Z node 9 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-30T09:23:05.464522Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckWrite 2025-06-30T09:23:05.464535Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:23:05.464540Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckWrite 2025-06-30T09:23:05.464545Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:23:05.464550Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:23:05.464559Z node 9 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v1500/18446744073709551615 ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-30T09:23:05.464574Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-30T09:23:05.464580Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:23:05.464584Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:23:05.464589Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteWrite 2025-06-30T09:23:05.464594Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteWrite 2025-06-30T09:23:05.464600Z node 9 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:7] at 72075186224037888 2025-06-30T09:23:05.464608Z node 9 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v1500/18446744073709551615 ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-30T09:23:05.464627Z node 9 :TX_DATASHARD TRACE: datashard_kqp.cpp:806: KqpCommitLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true 2025-06-30T09:23:05.464636Z node 9 :TX_DATASHARD TRACE: datashard_user_db.cpp:469: Committing changes lockId# 281474976715661 in localTid# 1001 shard# 72075186224037888 2025-06-30T09:23:05.464654Z node 9 :TX_DATASHARD DEBUG: execute_write_unit.cpp:421: Skip empty write operation for [0:7] at 72075186224037888 2025-06-30T09:23:05.464691Z node 9 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-30T09:23:05.464705Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:23:05.464713Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteWrite 2025-06-30T09:23:05.464718Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-30T09:23:05.464723Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit FinishProposeWrite 2025-06-30T09:23:05.464742Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:23:05.464746Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-30T09:23:05.464750Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:23:05.464754Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:23:05.464764Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:23:05.464768Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:23:05.464773Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-30T09:23:05.466763Z node 9 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-30T09:23:05.466783Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:7] at 72075186224037888 on unit FinishProposeWrite 2025-06-30T09:23:05.466793Z node 9 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 7 at tablet 72075186224037888 send to client, propose latency: 1 ms, status: STATUS_COMPLETED 2025-06-30T09:23:05.466808Z node 9 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-30T09:23:05.466835Z node 9 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:23:05.467282Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270977, Sender [9:61:2064], Recipient [9:1232:2354]: {TEvNotifyPlanStep TabletId# 72075186224037888 PlanStep# 1501} 2025-06-30T09:23:05.467296Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3172: StateWork, processing event TEvMediatorTimecast::TEvNotifyPlanStep 2025-06-30T09:23:05.467302Z node 9 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-30T09:23:05.467310Z node 9 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 { items { int64_value: 0 } items { int64_value: 1000 } }, { items { int64_value: 1 } items { int64_value: 1001 } }, { items { int64_value: 2 } items { int64_value: 1002 } }, { items { int64_value: 3 } items { int64_value: 1003 } }, { items { int64_value: 4 } items { int64_value: 1004 } }, { items { int64_value: 5 } items { int64_value: 1005 } }, { items { int64_value: 6 } items { int64_value: 5001 } } { items { int64_value: 0 } items { int64_value: 2000 } }, { items { int64_value: 1 } items { int64_value: 2001 } }, { items { int64_value: 2 } items { int64_value: 2002 } }, { items { int64_value: 3 } items { int64_value: 2003 } }, { items { int64_value: 4 } items { int64_value: 2004 } }, { items { int64_value: 5 } items { int64_value: 2005 } }, { items { int64_value: 6 } items { int64_value: 5002 } } result_sets { columns { name: "index" type { optional_type { item { type_id: INT64 } } } } columns { name: "value" type { optional_type { item { type_id: INT64 } } } } rows { items { int64_value: 0 } items { int64_value: 1000 } } rows { items { int64_value: 1 } items { int64_value: 1001 } } rows { items { int64_value: 2 } items { int64_value: 1002 } } rows { items { int64_value: 3 } items { int64_value: 1003 } } rows { items { int64_value: 4 } items { int64_value: 1004 } } rows { items { int64_value: 5 } items { int64_value: 1005 } } rows { items { int64_value: 6 } items { int64_value: 5001 } } } result_sets { columns { name: "index" type { optional_type { item { type_id: INT64 } } } } columns { name: "value" type { optional_type { item { type_id: INT64 } } } } rows { items { int64_value: 0 } items { int64_value: 2000 } } rows { items { int64_value: 1 } items { int64_value: 2001 } } rows { items { int64_value: 2 } items { int64_value: 2002 } } rows { items { int64_value: 3 } items { int64_value: 2003 } } rows { items { int64_value: 4 } items { int64_value: 2004 } } rows { items { int64_value: 5 } items { int64_value: 2005 } } rows { items { int64_value: 6 } items { int64_value: 5002 } } } tx_meta { } >> BackupPathTest::RecursiveDirectoryPlusExplicitTable [GOOD] >> BackupPathTest::ExportWithCommonSourcePath [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTableIndex [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSequence >> TPersQueueTest::FetchRequest [GOOD] >> TPersQueueTest::Init >> BackupRestoreS3::TestAllPrimitiveTypes-UTF8 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-YSON >> TSchemeShardViewTest::AsyncCreateSameView >> BackupRestore::RestoreTablePartitioningSettings >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedImport >> TSchemeShardViewTest::AsyncCreateSameView [GOOD] |82.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |82.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 >> KqpBatchUpdate::Large_1 [GOOD] >> BackupPathTest::ExportWithCommonSourcePathAndExplicitTableInside |82.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_view/unittest |82.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |82.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order >> TStateStorageRingGroupState::TestProxyNotifyReplicaConfigChanged1 >> TSchemeShardViewTest::EmptyQueryText >> TStateStorageRingGroupState::TestProxyNotifyReplicaConfigChanged1 [GOOD] >> TKeyValueTracingTest::ReadSmall [FAIL] >> BackupRestoreS3::TestAllPrimitiveTypes-INT16 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT16 >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedImport [GOOD] |82.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TSchemeShardTest::CopyTableForBackup [GOOD] >> TSchemeShardTest::ConsistentCopyTablesForBackup >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree [GOOD] >> BackupRestore::TestAllPrimitiveTypes-PRIMITIVE_TYPE_ID_UNSPECIFIED [GOOD] >> BackupRestore::TestAllPrimitiveTypes-BOOL ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncCreateSameView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:23:07.063723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:23:07.063750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:23:07.063755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:23:07.063759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:23:07.063763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:23:07.063770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:23:07.063778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:23:07.063793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:23:07.063900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:23:07.063977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:23:07.079769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:23:07.079803Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:07.083859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:23:07.083958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:23:07.084003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:23:07.085766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:23:07.085836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:23:07.085966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:07.086031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:23:07.086622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:07.086675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:23:07.087040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:23:07.087056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:07.087090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:23:07.087104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:23:07.087111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:23:07.087135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.088813Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:23:07.113296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:23:07.113426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.113509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:23:07.113518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:23:07.113570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:23:07.113586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:07.114574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:07.114622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:23:07.114706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.114717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:23:07.114724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:23:07.114730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:23:07.123717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.123779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:23:07.123793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:23:07.124719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.124742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.124750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:23:07.124760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:23:07.125685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:23:07.126481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:23:07.126535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:23:07.126802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:07.126842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:07.126851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:23:07.126934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:23:07.126944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:23:07.126991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:23:07.127007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:23:07.127702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:23:07.127716Z node 1 :FLAT_TX_SCHEMESHARD ... _TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:23:07.138070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:23:07.138085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:23:07.138143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:23:07.138170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:07.138176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-30T09:23:07.138183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: Erasing txId 101 2025-06-30T09:23:07.138291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.138302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-30T09:23:07.138318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:23:07.138323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:23:07.138329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:23:07.138333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:23:07.138338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-30T09:23:07.138344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:23:07.138350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-30T09:23:07.138355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 101:0 2025-06-30T09:23:07.138371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:23:07.138379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-30T09:23:07.138384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-30T09:23:07.138387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-30T09:23:07.138541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:23:07.138558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:23:07.138563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-30T09:23:07.138569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-30T09:23:07.138575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:23:07.138693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:23:07.138706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:23:07.138714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-30T09:23:07.138719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-30T09:23:07.138723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:23:07.138765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-30T09:23:07.139714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:23:07.139812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 2025-06-30T09:23:07.139892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-30T09:23:07.139901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-30T09:23:07.139919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:23:07.139922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-30T09:23:07.139932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-30T09:23:07.139935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-30T09:23:07.140045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-30T09:23:07.140067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-30T09:23:07.140077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-30T09:23:07.140082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:308:2297] 2025-06-30T09:23:07.140123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-30T09:23:07.140127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:308:2297] 2025-06-30T09:23:07.140142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:23:07.140159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:23:07.140166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:308:2297] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 102 2025-06-30T09:23:07.140252Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:23:07.140305Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 69us result status StatusSuccess 2025-06-30T09:23:07.140439Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyView" PathDescription { Self { Name: "MyView" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "MyView" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> BackupRestore::TestAllPrimitiveTypes-UINT8 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT16 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Large_1 [GOOD] Test command err: Trying to start YDB, gRPC: 7201, MsgBus: 26521 2025-06-30T09:22:56.563227Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670468126045089:2240];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:56.563296Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044dc/r3tmp/tmpKHXO68/pdisk_1.dat 2025-06-30T09:22:56.613696Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7201, node 1 2025-06-30T09:22:56.639016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:56.639037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:56.639040Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:56.639100Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26521 2025-06-30T09:22:56.692940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:56.692973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:56.693861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26521 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:56.745838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:56.749495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:56.754698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:56.832729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:56.892283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:56.907861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:57.046627Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670472421013776:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.046661Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.105928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.118068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.127620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.140512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.197277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.210406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.223877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.243565Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670472421014430:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.243612Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.243637Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670472421014435:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.244526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:57.250791Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670472421014437:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:57.351296Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670472421014489:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:57.529491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:57.551036Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:01.552913Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521670468126045089:2240];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:01.553188Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YD ... task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 5985, MsgBus: 62535 2025-06-30T09:23:05.722556Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521670505934186544:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:05.722583Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044dc/r3tmp/tmpLAYxNR/pdisk_1.dat 2025-06-30T09:23:05.756323Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:05.757676Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521670505934186523:2080] 1751275385722308 != 1751275385722311 TServer::EnableGrpc on GrpcPort 5985, node 4 2025-06-30T09:23:05.770943Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:05.770960Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:05.770963Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:05.771031Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62535 2025-06-30T09:23:05.834926Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:05.834968Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:05.836034Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62535 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:05.843715Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:05.847263Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:23:05.861114Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:05.878393Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:05.906873Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:05.920458Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:06.143246Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670510229155422:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.143276Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.153622Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.163161Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.179744Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.193166Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.206379Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.221423Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.279042Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.308004Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670510229156079:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.308030Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.308197Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670510229156084:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.309197Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:06.313904Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:23:06.314024Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521670510229156086:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:23:06.369745Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521670510229156137:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:06.502027Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:06.726011Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TSchemeShardViewTest::EmptyQueryText [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestProxyNotifyReplicaConfigChanged1 [GOOD] Test command err: RandomSeed# 7294739751091288382 2025-06-30T09:23:07.566465Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [8:269:20] SessionId# [7:28:7] Cookie# 13616329370319804502 2025-06-30T09:23:07.566500Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 8 SessionId# [7:28:7] Inserted# false Subscription# {SessionId# [7:28:7] SubscriptionCookie# 0} NextSubscribeCookie# 3 2025-06-30T09:23:07.568000Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 8 Cookie# 13616329370319804502 SessionId# [7:28:7] Binding# {8.9/13616329370319804502@[7:28:7]} Record# {RootNodeId: 5 } 2025-06-30T09:23:07.568044Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {8.5/13616329370319804502@[7:28:7]} PrevRootNodeId# 9 ConfigUpdate# false 2025-06-30T09:23:07.568090Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [8:269:20] SessionId# [3:100:7] Cookie# 8600792876159162271 2025-06-30T09:23:07.568101Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 8 SessionId# [3:100:7] Inserted# false Subscription# {SessionId# [3:100:7] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-30T09:23:07.568119Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 8 Cookie# 8600792876159162271 SessionId# [3:100:7] Binding# {8.9/8600792876159162271@[3:100:7]} Record# {RootNodeId: 5 } 2025-06-30T09:23:07.568125Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {8.5/8600792876159162271@[3:100:7]} PrevRootNodeId# 9 ConfigUpdate# false 2025-06-30T09:23:07.568181Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [5:248:20] SessionId# [9:64:5] Cookie# 2074847721621126954 2025-06-30T09:23:07.568188Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 5 SessionId# [9:64:5] Inserted# false Subscription# {SessionId# [9:64:5] SubscriptionCookie# 0} NextSubscribeCookie# 4 2025-06-30T09:23:07.568197Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 5 Cookie# 2074847721621126954 SessionId# [9:64:5] Binding# {5.5/2074847721621126954@[9:64:5]} Record# {RootNodeId: 5 } 2025-06-30T09:23:07.568210Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [5:248:20] SessionId# [2:114:4] Cookie# 3629366022875748 2025-06-30T09:23:07.568217Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 5 SessionId# [2:114:4] Inserted# false Subscription# {SessionId# [2:114:4] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-30T09:23:07.568225Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 5 Cookie# 3629366022875748 SessionId# [2:114:4] Binding# {5.5/3629366022875748@[2:114:4]} Record# {RootNodeId: 5 } 2025-06-30T09:23:07.568238Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [5:248:20] SessionId# [4:70:4] Cookie# 11582369580826987509 2025-06-30T09:23:07.568245Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 5 SessionId# [4:70:4] Inserted# false Subscription# {SessionId# [4:70:4] SubscriptionCookie# 0} NextSubscribeCookie# 4 2025-06-30T09:23:07.568255Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 5 Cookie# 11582369580826987509 SessionId# [4:70:4] Binding# {5.5/11582369580826987509@[4:70:4]} Record# {RootNodeId: 5 } 2025-06-30T09:23:07.568268Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [5:248:20] SessionId# [6:55:5] Cookie# 9778385088140029597 2025-06-30T09:23:07.568275Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 5 SessionId# [6:55:5] Inserted# false Subscription# {SessionId# [6:55:5] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-30T09:23:07.568287Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 5 Cookie# 9778385088140029597 SessionId# [6:55:5] Binding# {5.5/9778385088140029597@[6:55:5]} Record# {RootNodeId: 5 } 2025-06-30T09:23:07.568605Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [7:262:20] SessionId# [1:145:6] Cookie# 455456844850983190 2025-06-30T09:23:07.568616Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [1:145:6] Inserted# false Subscription# {SessionId# [1:145:6] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-30T09:23:07.568628Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 7 Cookie# 455456844850983190 SessionId# [1:145:6] Binding# {7.9/455456844850983190@[1:145:6]} Record# {RootNodeId: 5 } 2025-06-30T09:23:07.568636Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {7.5/455456844850983190@[1:145:6]} PrevRootNodeId# 9 ConfigUpdate# false 2025-06-30T09:23:07.570317Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-30T09:23:07.570344Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-30T09:23:07.570363Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-30T09:23:07.570372Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-30T09:23:07.570381Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-30T09:23:07.572978Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639257 Sender# [1:310:42] SessionId# [0:0:0] Cookie# 0 2025-06-30T09:23:07.573097Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:23:07.573117Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [1:220:36] SessionId# [7:146:1] Cookie# 455456844850983190 2025-06-30T09:23:07.573132Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 1 SessionId# [7:146:1] Inserted# false Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} NextSubscribeCookie# 3 2025-06-30T09:23:07.573165Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 1 Cookie# 455456844850983190 SessionId# [7:146:1] Binding# {8.5/13616329370319804502@[7:28:7]} Record# {CacheUpdate { } } 2025-06-30T09:23:07.573173Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 1 SessionId# [7:146:1] Inserted# false Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} NextSubscribeCookie# 3 2025-06-30T09:23:07.573196Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-30T09:23:07.573206Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-30T09:23:07.573213Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-30T09:23:07.573220Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2025-06-30T09:23:07.573227Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2025-06-30T09:23:07.573253Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.573266Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.573272Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.573277Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.573283Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.573311Z 8 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [7:262:20] SessionId# [8:29:7] Cookie# 13616329370319804502 2025-06-30T09:23:07.573319Z 8 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [8:29:7] Inserted# false Subscription# {SessionId# [8:29:7] SubscriptionCookie# 0} NextSubscribeCookie# 5 2025-06-30T09:23:07.573330Z 8 00h00m00.002048s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 7 Cookie# 13616329370319804502 SessionId# [8:29:7] Binding# {9.5/13480359510043938731@[8:18:8]} Record# {CacheUpdate { } } 2025-06-30T09:23:07.573337Z 8 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [8:29:7] Inserted# false Subscription# {SessionId# [8:29:7] SubscriptionCookie# 0} NextSubscribeCookie# 5 2025-06-30T09:23:07.573356Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [7:262:20] SessionId# [1:145:6] Cookie# 455456844850983190 2025-06-30T09:23:07.573362Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [1:145:6] Inserted# false Subscription# {SessionId# [1:145:6] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-30T09:23:07.573382Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 7 Cookie# 455456844850983190 SessionId# [1:145:6] Binding# {7.5/455456844850983190@[1:145:6]} Record# {RootNodeId: 5 CacheUpdate { } } 2025-06-30T09:23:07.573431Z 9 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [8:269:20] SessionId# [9:19:8] Cookie# 13480359510043938731 2025-06-30T09:23:07.573438Z 9 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 8 SessionId# [9:19:8] Inserted# false Subscription# {SessionId# [9:19:8] SubscriptionCookie# 0} NextSubscribeCookie# 4 2025-06-30T09:23:07.573448Z 9 00h00m00.002048s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 8 Cookie# 13480359510043938731 SessionId# [9:19:8] Binding# {5.5/2074847721621126954@[9:64:5]} Record# {CacheUpdate { } } 2025-06-30T09:23:07.573455Z 9 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 8 SessionId# [9:19:8] Inserted# false Subscription# {SessionId# [9:19:8] SubscriptionCookie# 0} NextSubscribeCookie# 4 2025-06-30T09:23:07.573470Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [8:269:20] SessionId# [7:28:7] Cookie# 13616329370319804502 2025-06-30T09:23:07.573478Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 8 SessionId# [7:28:7] Inserted# false Subscription# {Ses ... ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:306:40] CurrentLeaderTablet: [1:310:42] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:23:07.583205Z 1 00h00m15.235211s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:23:07.583231Z 1 00h00m15.235211s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-30T09:23:07.583239Z 1 00h00m15.235211s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-30T09:23:07.583245Z 1 00h00m15.235211s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-30T09:23:07.583251Z 1 00h00m15.235211s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2025-06-30T09:23:07.583257Z 1 00h00m15.235211s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2025-06-30T09:23:07.583281Z 1 00h00m15.235211s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.583290Z 1 00h00m15.235211s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.630836Z 1 00h00m15.235211s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.630873Z 1 00h00m15.235211s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.630881Z 1 00h00m15.235211s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.632882Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:23:07.632922Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-30T09:23:07.632936Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-30T09:23:07.632943Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-30T09:23:07.632950Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2025-06-30T09:23:07.632957Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2025-06-30T09:23:07.632973Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:306:40] CurrentLeaderTablet: [1:310:42] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:23:07.632986Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:306:40] CurrentLeaderTablet: [1:310:42] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:23:07.632994Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:306:40] CurrentLeaderTablet: [1:310:42] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:23:07.636865Z 1 00h00m30.169877s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:23:07.636907Z 1 00h00m30.169877s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-30T09:23:07.636920Z 1 00h00m30.169877s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-30T09:23:07.636928Z 1 00h00m30.169877s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 msgGeneration=0 Info->ClusterStateGuid=2 msgGuid=0 2025-06-30T09:23:07.636940Z 1 00h00m30.169877s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-30T09:23:07.636947Z 1 00h00m30.169877s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2025-06-30T09:23:07.636954Z 1 00h00m30.169877s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2025-06-30T09:23:07.636969Z 1 00h00m30.169877s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:306:40] CurrentLeaderTablet: [1:310:42] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:23:07.636988Z 1 00h00m30.169877s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 0 2025-06-30T09:23:07.636996Z 1 00h00m30.169877s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 2 CurrentLeader: [1:306:40] CurrentLeaderTablet: [1:310:42] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:23:07.637001Z 1 00h00m30.169877s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=0 Info->ClusterStateGuid=0 clusterStateGuid=2 2025-06-30T09:23:07.637015Z 1 00h00m30.169877s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 2 2025-06-30T09:23:07.637415Z 1 00h00m31.804429s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:23:07.637435Z 1 00h00m31.804429s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-30T09:23:07.637443Z 1 00h00m31.804429s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-30T09:23:07.637449Z 1 00h00m31.804429s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-30T09:23:07.637455Z 1 00h00m31.804429s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2025-06-30T09:23:07.637462Z 1 00h00m31.804429s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2025-06-30T09:23:07.637471Z 1 00h00m31.804429s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.637478Z 1 00h00m31.804429s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.637484Z 1 00h00m31.804429s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.637490Z 1 00h00m31.804429s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.637495Z 1 00h00m31.804429s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-30T09:23:07.645119Z 1 00h00m40.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:23:07.645162Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-30T09:23:07.645175Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-30T09:23:07.645183Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-30T09:23:07.645190Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2025-06-30T09:23:07.645198Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2025-06-30T09:23:07.645215Z 1 00h00m40.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:306:40] CurrentLeaderTablet: [1:310:42] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:23:07.645230Z 1 00h00m40.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:306:40] CurrentLeaderTablet: [1:310:42] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:23:07.645237Z 1 00h00m40.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:306:40] CurrentLeaderTablet: [1:310:42] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:23:07.652845Z 1 00h00m50.300000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-30T09:23:07.652889Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-30T09:23:07.652902Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-30T09:23:07.652910Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-30T09:23:07.652917Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2025-06-30T09:23:07.652924Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2025-06-30T09:23:07.652940Z 1 00h00m50.300000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:306:40] CurrentLeaderTablet: [1:310:42] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:23:07.652954Z 1 00h00m50.300000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 1 ClusterStateGuid: 0 CurrentLeader: [1:306:40] CurrentLeaderTablet: [1:310:42] CurrentGeneration: 2 CurrentStep: 0} 2025-06-30T09:23:07.652961Z 1 00h00m50.300000s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=1 Info->ClusterStateGuid=0 clusterStateGuid=0 2025-06-30T09:23:07.652980Z 1 00h00m50.300000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 0 >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePrefixSpecified >> TSchemeShardTest::ConsistentCopyTablesForBackup [GOOD] >> TSchemeShardTest::CopyLockedTableForBackup >> SystemView::StoragePoolsFields [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::EmptyQueryText [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:23:07.911391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:23:07.911481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:23:07.911489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:23:07.911495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:23:07.911502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:23:07.911507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:23:07.911518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:23:07.911535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:23:07.911662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:23:07.911739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:23:07.929960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:23:07.929989Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:07.934255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:23:07.934342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:23:07.934381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:23:07.936365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:23:07.936451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:23:07.936600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:07.936681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:23:07.937433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:07.937481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:23:07.937817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:23:07.937833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:07.937859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:23:07.937873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:23:07.937880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:23:07.937901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.939534Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:23:07.965322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:23:07.965427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.965501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:23:07.965512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:23:07.965566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:23:07.965583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:07.966523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:07.966574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:23:07.966651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.966664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:23:07.966669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:23:07.966675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:23:07.967284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.967302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:23:07.967308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:23:07.967731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.967745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.967751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:23:07.967760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:23:07.968525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:23:07.969057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:23:07.969101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:23:07.969294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:07.969322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:07.969331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:23:07.969384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:23:07.969390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:23:07.969419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:23:07.969428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:23:07.969970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:23:07.969983Z node 1 :FLAT_TX_SCHEMESHARD ... : actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-30T09:23:07.974599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "MyView" QueryText: "" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:23:07.974663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/MyView, opId: 101:0 2025-06-30T09:23:07.974673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/MyView, opId: 101:0, viewDescription: Name: "MyView" QueryText: "" 2025-06-30T09:23:07.974700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: MyView, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-30T09:23:07.974718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-30T09:23:07.974724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 101:0 type: TxCreateView target path: [OwnerId: 72057594046678944, LocalPathId: 2] source path: 2025-06-30T09:23:07.974730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 101:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:23:07.975152Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:23:07.978612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 101, response: Status: StatusAccepted TxId: 101 SchemeshardId: 72057594046678944 PathId: 2, at schemeshard: 72057594046678944 2025-06-30T09:23:07.978696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE VIEW, path: /MyRoot/MyView 2025-06-30T09:23:07.978833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.978845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:30: [72057594046678944] TCreateView::TPropose, opId: 101:0 ProgressState 2025-06-30T09:23:07.978860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 101 ready parts: 1/1 2025-06-30T09:23:07.978893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 101 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:23:07.979217Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:23:07.980034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-30T09:23:07.980084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-06-30T09:23:07.980202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:07.980255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:07.980268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000002 2025-06-30T09:23:07.980313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 101:0 128 -> 240 2025-06-30T09:23:07.980359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:23:07.980377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:23:07.981050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:23:07.981068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:23:07.981127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:23:07.981161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:07.981168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-30T09:23:07.981177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: Erasing txId 101 2025-06-30T09:23:07.981315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-30T09:23:07.981326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-30T09:23:07.981344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:23:07.981350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:23:07.981357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-30T09:23:07.981361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:23:07.981367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-30T09:23:07.981374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-30T09:23:07.981380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-30T09:23:07.981386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 101:0 2025-06-30T09:23:07.981405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:23:07.981413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-30T09:23:07.981419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-30T09:23:07.981424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-30T09:23:07.981552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:23:07.981570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:23:07.981577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-30T09:23:07.981583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-30T09:23:07.981593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:23:07.981826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:23:07.981859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-30T09:23:07.981866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-30T09:23:07.981884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-30T09:23:07.981891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-30T09:23:07.981909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-30T09:23:07.983439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-30T09:23:07.983581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSequence [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeView >> BackupPathTest::ExportWithCommonSourcePathAndExplicitTableInside [GOOD] >> KqpBatchDelete::SimpleOnePartition [GOOD] >> BackupRestore::RestoreTablePartitioningSettings [GOOD] >> BackupRestore::RestoreIndexTablePartitioningSettings >> KqpBatchDelete::Large_2 [GOOD] |82.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |82.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-YSON [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UUID >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePrefixSpecified [GOOD] >> TSchemeShardTest::CopyLockedTableForBackup [GOOD] >> TSchemeShardTest::ConfigColumnFamily |82.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::SimpleOnePartition [GOOD] Test command err: Trying to start YDB, gRPC: 21762, MsgBus: 28073 2025-06-30T09:23:00.179932Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670487332022841:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:00.181888Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044a3/r3tmp/tmpfDuFso/pdisk_1.dat 2025-06-30T09:23:00.280547Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:00.289322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:00.289358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:00.293231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21762, node 1 2025-06-30T09:23:00.314414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:00.314430Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:00.314432Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:00.314493Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28073 TClient is connected to server localhost:28073 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:00.421786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:00.425389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:23:00.429072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:00.456188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:00.480749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:00.495823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:00.711407Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670487332024433:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:00.711441Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:00.764455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:00.787472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:00.812344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:00.830256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:00.846225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:00.861887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:00.874453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:00.896104Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670487332025087:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:00.896125Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:00.896248Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670487332025092:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:00.897274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:00.902199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:23:00.902436Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670487332025094:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:23:00.969655Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670487332025145:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:01.184241Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 8077, MsgBus: 21977 2025-06-30T09:23:02.603889Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670494926550428:2178];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:02.603924Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044a3/r3tmp/tmpgG1r4x/pdisk_1.dat 2025-06-30T09:23:02.618910Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Ta ... ction 281474976715669 completed, doublechecking } 2025-06-30T09:23:05.598042Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670505196097622:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:05.822846Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 24943, MsgBus: 31509 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044a3/r3tmp/tmpDixCWM/pdisk_1.dat 2025-06-30T09:23:06.573605Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 24943, node 4 2025-06-30T09:23:06.591805Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:06.592555Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521670511731152223:2080] 1751275386561173 != 1751275386561176 2025-06-30T09:23:06.599570Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:06.599591Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:06.599594Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:06.599651Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31509 TClient is connected to server localhost:31509 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:23:06.669102Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:06.669135Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:06.670715Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:23:06.671286Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:06.683665Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:23:06.703511Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:06.736068Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:06.807997Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:06.842781Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:07.195210Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670516026121129:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:07.195237Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:07.204516Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:07.224379Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:07.247763Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:07.266287Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:07.282718Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:07.303974Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:07.325355Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:07.345868Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670516026121782:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:07.345991Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:07.346115Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670516026121787:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:07.347061Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:07.349989Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:23:07.350066Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521670516026121789:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:23:07.437882Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521670516026121840:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:07.571314Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> BackupPathTest::EmptyDirectoryIsOk >> TxUsage::Sinks_Oltp_WriteToTopics_4_Query [GOOD] >> BackupRestoreS3::RestoreIndexTablePartitioningSettings [GOOD] >> BackupRestoreS3::RestoreIndexTableReadReplicasSettings >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerless ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::Large_2 [GOOD] Test command err: Trying to start YDB, gRPC: 8096, MsgBus: 64911 2025-06-30T09:22:57.672955Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670472774940553:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:57.711918Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044b2/r3tmp/tmpxZpZw1/pdisk_1.dat 2025-06-30T09:22:57.739572Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8096, node 1 2025-06-30T09:22:57.770333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:57.770346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:57.770349Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:57.770394Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64911 2025-06-30T09:22:57.823353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:57.823397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:57.827025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64911 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:57.884258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:57.892541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:22:57.908161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:57.991820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:58.080070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:58.109512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:58.251085Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670477069909254:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:58.251135Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:58.311133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:58.319691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:58.333576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:58.349191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:58.361893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:58.393487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:58.418820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:58.454543Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670477069909912:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:58.454608Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:58.458912Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670477069909917:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:58.463978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:58.467960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-30T09:22:58.468065Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670477069909919:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:22:58.536247Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670477069909970:3397] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:58.672058Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:58.921452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 11826, MsgBus: 5656 2025-06-30T09:23:02.499676Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670492465766645:2146];send_to=[0:730719953665814613 ... eration type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 21257, MsgBus: 26755 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044b2/r3tmp/tmpMrAsNo/pdisk_1.dat 2025-06-30T09:23:05.686896Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:05.688932Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:05.692322Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670508287178983:2080] 1751275385656307 != 1751275385656310 TServer::EnableGrpc on GrpcPort 21257, node 3 2025-06-30T09:23:05.702660Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:05.702675Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:05.702678Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:05.702754Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26755 2025-06-30T09:23:05.763815Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:05.763850Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:05.764915Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26755 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:05.810392Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:05.815441Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:23:05.823582Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:05.850689Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:05.922036Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:05.938796Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:06.165732Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670512582147904:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.165759Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.180386Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.192904Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.206512Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.262844Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.277234Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.292496Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.307843Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.380485Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670512582148563:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.380509Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.380613Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670512582148568:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.381445Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:06.383724Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:23:06.384027Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670512582148570:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:23:06.465122Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670512582148621:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:06.645795Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:06.673870Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> BackupRestoreS3::TestAllPrimitiveTypes-UINT16 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT32 >> BackupRestore::TestAllPrimitiveTypes-UINT16 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT32 >> BackupRestore::TestAllPrimitiveTypes-BOOL [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT8 >> TSchemeShardTest::ConfigColumnFamily [GOOD] >> TSchemeShardTest::ConsistentCopyAfterDropIndexes >> GroupWriteTest::Simple >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other >> TPersQueueTest::CloseActiveWriteSessionOnClusterDisable [GOOD] >> TPersQueueTest::Cache >> EncryptedBackupParamsValidationTestFeatureDisabled::SrcPrefixAndSrcPathSpecified >> BackupRestore::RestoreIndexTablePartitioningSettings [GOOD] >> BackupRestore::RestoreIndexTableReadReplicasSettings |82.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |82.6%| [LD] {RESULT} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |82.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |82.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeView [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTransfer [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSysView [GOOD] >> CommonEncryptionRequirementsTest::CommonEncryptionRequirements >> BSCReadOnlyPDisk::ReadOnlySlay >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive >> BSCReadOnlyPDisk::ReadOnlyNotAllowed >> BackupPathTest::EmptyDirectoryIsOk [GOOD] >> TSchemeShardTest::ConsistentCopyAfterDropIndexes [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::SrcPrefixAndSrcPathSpecified [GOOD] |82.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-UUID [GOOD] >> BsControllerConfig::MoveGroups [GOOD] >> BackupPathTest::CommonPrefixButExplicitImportItems ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::ReadSmall [FAIL] Test command err: equal assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:124, void TestOneRead(TString, TString): env.WilsonUploader->Traces.size() == 1 TBackTrace::Capture()+28 (0xC701EBC) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+137 (0xC8BB669) TestOneRead(TBasicString>, TBasicString>)+2035 (0xC5F9153) NTestSuiteTKeyValueTracingTest::TTestCaseReadSmall::Execute_(NUnitTest::TTestContext&)+157 (0xC5FC12D) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()::'lambda'()::operator()() const+71 (0xC603DF7) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+126 (0xC8BD51E) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+426 (0xC60365A) NUnitTest::TTestFactory::Execute()+803 (0xC8BDC93) NUnitTest::RunMain(int, char**)+3021 (0xC8CBFAD) ??+0 (0x7FE094ECFD90) __libc_start_main+128 (0x7FE094ECFE40) _start+41 (0xB741029) |82.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> EncryptedExportTest::EncryptedExportAndImport >> BackupRestore::TestAllPrimitiveTypes-INT8 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT16 >> BackupRestoreS3::RestoreIndexTableReadReplicasSettings [GOOD] >> BackupRestoreS3::RestoreTableSplitBoundaries >> BackupRestoreS3::TestAllPrimitiveTypes-INT32 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT32 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::ConsistentCopyAfterDropIndexes [GOOD] Test command err: canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } canonic: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } canonic: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } canonic: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@sta ... bType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:10.889429Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Copy1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:23:10.889462Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Copy1" took 34us result status StatusSuccess 2025-06-30T09:23:10.889559Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Copy1" PathDescription { Self { Name: "Copy1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000007 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Copy1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Sync" LocalPathId: 6 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:10.889660Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Copy2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:23:10.889685Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Copy2" took 27us result status StatusSuccess 2025-06-30T09:23:10.889790Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Copy2" PathDescription { Self { Name: "Copy2" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Copy2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:10.889889Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Copy3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:23:10.889916Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Copy3" took 31us result status StatusSuccess 2025-06-30T09:23:10.890011Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Copy3" PathDescription { Self { Name: "Copy3" PathId: 9 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Copy3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Sync" LocalPathId: 10 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 9 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TPQTestSlow::TestWriteVeryBigMessage [GOOD] >> SystemView::ShowCreateTableColumnAlterObject [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-UUID [GOOD] Test command err: 2025-06-30T09:23:02.681880Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670493955987691:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:02.681900Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0026d0/r3tmp/tmp80lMIW/pdisk_1.dat 2025-06-30T09:23:02.769506Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:02.781703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:02.781753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:02.787913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:02.789002Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 23275, node 1 2025-06-30T09:23:02.799020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:02.799034Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:02.799037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:02.799088Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21248 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:02.834520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:03.147810Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670498250955883:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.147841Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.186600Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7521670493955987860:2141] Handle TEvProposeTransaction 2025-06-30T09:23:03.186621Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7521670493955987860:2141] TxId# 281474976715658 ProcessProposeTransaction 2025-06-30T09:23:03.186643Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7521670493955987860:2141] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7521670498250955904:2603] 2025-06-30T09:23:03.196138Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7521670498250955904:2603] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-06-30T09:23:03.196171Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7521670498250955904:2603] txid# 281474976715658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:23:03.196319Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7521670498250955904:2603] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:23:03.196332Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7521670498250955904:2603] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:23:03.196354Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7521670498250955904:2603] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:23:03.196383Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7521670498250955904:2603] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:23:03.196403Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7521670498250955904:2603] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-30T09:23:03.196462Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7521670498250955904:2603] txid# 281474976715658 HANDLE EvClientConnected 2025-06-30T09:23:03.196878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:03.203635Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7521670498250955904:2603] txid# 281474976715658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715658} 2025-06-30T09:23:03.203663Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7521670498250955904:2603] txid# 281474976715658 SEND to# [1:7521670498250955903:2302] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 53} 2025-06-30T09:23:03.283389Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670498250956047:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.283430Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.293464Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7521670493955987860:2141] Handle TEvProposeTransaction 2025-06-30T09:23:03.293482Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7521670493955987860:2141] TxId# 281474976715659 ProcessProposeTransaction 2025-06-30T09:23:03.293502Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7521670493955987860:2141] Cookie# 0 userReqId# "" txid# 281474976715659 SEND to# [1:7521670498250956059:2722] 2025-06-30T09:23:03.294583Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7521670498250956059:2722] txid# 281474976715659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateCdcStream CreateCdcStream { TableName: "table" StreamDescription { Name: "a" Mode: ECdcStreamModeUpdate Format: ECdcStreamFormatJson VirtualTimestamps: false AwsRegion: "" SchemaChanges: false } } } } UserToken: "" DatabaseName: "" PeerName: "" 2025-06-30T09:23:03.294607Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7521670498250956059:2722] txid# 281474976715659 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:23:03.294626Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7521670498250956059:2722] txid# 281474976715659 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:23:03.294755Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7521670498250956059:2722] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:23:03.294795Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7521670498250956059:2722] HANDLE EvNavigateKeySetResult, txid# 281474976715659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:23:03.294811Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7521670498250956059:2722] txid# 281474976715659 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715659 TabletId# 72057594046644480} 2025-06-30T09:23:03.294858Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7521670498250956059:2722] txid# 281474976715659 HANDLE EvClientConnected 2025-06-30T09:23:03.297031Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7521670498250956059:2722] txid# 281474976715659 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-30T09:23:03.297051Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7521670498250956059:2722] txid# 281474976715659 SEND to# [1:7521670498250956058:2314] Source {TEvProposeTransactionStatus txid# 281474976715659 Status# 53} 2025-06-30T09:23:03.328118Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][1:7521670498250956226:2324] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:4:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-30T09:23:03.341223Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670498250956321:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.341250Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09 ... 461610Z node 10 :IMPORT DEBUG: schemeshard_import_getters.cpp:346: HandleScheme TEvExternalStorage::TEvHeadObjectResponse: self# [10:7521670526566545787:2198], result# HeadObjectResult { ETag: 625d2681cf599ca2d3b1f18a7a5a3ae6 ContentLength: 356 } REQUEST: GET /test_bucket/UuidTable/scheme.pb HTTP/1.1 HEADERS: Host: localhost:18339 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 1EE4B864-C351-4DD1-A66C-E38C4301613F amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250630/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=b57b9bf0a88c0b1a23bc8750571a4880924414ba6408a64bb9d336433ba29c13 content-type: application/xml range: bytes=0-355 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250630T092310Z S3_MOCK::HttpServeRead: /test_bucket/UuidTable/scheme.pb / 356 2025-06-30T09:23:10.466965Z node 10 :IMPORT DEBUG: schemeshard_import_getters.cpp:475: HandleScheme TEvExternalStorage::TEvGetObjectResponse: self# [10:7521670526566545787:2198], result# 625d2681cf599ca2d3b1f18a7a5a3ae6 2025-06-30T09:23:10.467135Z node 10 :IMPORT INFO: schemeshard_import_getters.cpp:692: Reply: self# [10:7521670526566545787:2198], success# 1, error# 2025-06-30T09:23:10.467182Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:10.467186Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:993: TImport::TTxProgress: OnSchemeResult: id# 281474976715665, itemIdx# 0, success# 1 2025-06-30T09:23:10.467314Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:633: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-30T09:23:10.470916Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:10.470956Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:10.470960Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1222: TImport::TTxProgress: OnAllocateResult: txId# 281474976710760, id# 281474976715665 2025-06-30T09:23:10.470983Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:423: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710760 2025-06-30T09:23:10.471027Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:10.471336Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:10.472126Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:10.472138Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1318: TImport::TTxProgress: OnModifyResult: txId# 281474976710760, status# StatusAccepted 2025-06-30T09:23:10.472176Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:647: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976710760 Issue: '' } 2025-06-30T09:23:10.473883Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:10.518173Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:10.518188Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1476: TImport::TTxProgress: OnNotifyResult: txId# 281474976710760 2025-06-30T09:23:10.518226Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:633: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-30T09:23:10.522678Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:10.522724Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:10.522728Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1222: TImport::TTxProgress: OnAllocateResult: txId# 281474976710761, id# 281474976715665 2025-06-30T09:23:10.522769Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:524: TImport::TTxProgress: Restore propose: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710761 2025-06-30T09:23:10.522934Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:10.523066Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976710761:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h:563) 2025-06-30T09:23:10.523951Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:10.523960Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1318: TImport::TTxProgress: OnModifyResult: txId# 281474976710761, status# StatusAccepted 2025-06-30T09:23:10.523992Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:647: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976710761 Issue: '' } 2025-06-30T09:23:10.524748Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete REQUEST: HEAD /test_bucket/UuidTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:18339 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 4AC1A9AA-6B9E-48A9-8778-2537EEF1E601 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250630/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=abae148d5fcc7a809b32be5e10a5cac2197c80bb91387ee75e3d3f147bc7ac6f content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250630T092310Z S3_MOCK::HttpServeRead: /test_bucket/UuidTable/data_00.csv / 39 REQUEST: GET /test_bucket/UuidTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:18339 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: E74327CD-27DA-4D2B-A53F-C7BD75856AFB amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250630/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=39e368eb05bfb875cbf3055566435f805a40f3c35b4d5f5a4c6c4460aa20c578 content-type: application/xml range: bytes=0-38 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250630T092310Z S3_MOCK::HttpServeRead: /test_bucket/UuidTable/data_00.csv / 39 2025-06-30T09:23:10.590062Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:10.590077Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1476: TImport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-30T09:23:10.595943Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:10.646264Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [10:7521670526566545972:2355] [0] Resolve database: name# /Root 2025-06-30T09:23:10.648602Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [10:7521670526566545972:2355] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:23:10.648620Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [10:7521670526566545972:2355] [0] Send request: schemeShardId# 72057594046644480 2025-06-30T09:23:10.649115Z node 10 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [10:7521670526566545972:2355] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715665 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:18339" scheme: HTTP bucket: "test_bucket" items { source_prefix: "UuidTable" destination_path: "/Root/UuidTable" } } StartTime { seconds: 1751275390 } EndTime { seconds: 1751275390 } } 2025-06-30T09:23:10.678398Z node 10 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [10:7521670522271577071:2140] Handle TEvExecuteKqpTransaction 2025-06-30T09:23:10.678413Z node 10 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [10:7521670522271577071:2140] TxId# 281474976715666 ProcessProposeKqpTransaction 2025-06-30T09:23:10.679041Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jz02bhnv5xzjcdkvdrdmdhhx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MjIxMGRjYWUtZjEwZWZmYzQtNTk0NzllMDQtZWE5MGY4ZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> BackupRestore::TestAllPrimitiveTypes-UINT32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT64 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::StoragePoolsFields [GOOD] Test command err: 2025-06-30T09:22:15.103191Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670294100371167:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:15.103227Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003135/r3tmp/tmpnbADBR/pdisk_1.dat 2025-06-30T09:22:15.163230Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:15.176434Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 16210, node 1 2025-06-30T09:22:15.188274Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:15.188288Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:15.188291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:15.188348Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:15.203657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:15.203695Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:15.205311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26417 TClient is connected to server localhost:26417 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:15.242432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:15.450375Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:276: Subscribed for config changes 2025-06-30T09:22:15.450403Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:331: Updated config 2025-06-30T09:22:15.471837Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670294100372185:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.471865Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670294100372174:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.471890Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:15.473018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:15.479546Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670294100372188:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:22:15.537491Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670294100372261:2727] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:15.537938Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-30T09:22:15.537995Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:413: Perform request, TraceId.SpanIdPtr: 0x000073EFEA27C108 2025-06-30T09:22:15.538013Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:423: Received compile request, sender: [1:7521670294100372170:2298], queryUid: , queryText: "\n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n ", keepInCache: 1, split: 0{ TraceId: 01jz029vse935tgy1pgwwpgcnz, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2E3OTBkYmEtYTZkOTY4Y2QtNTNiMTMxZTQtODg1N2Y2MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default} 2025-06-30T09:22:15.538029Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-30T09:22:15.538045Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:519: Added request to queue, sender: [1:7521670294100372170:2298], queueSize: 1 2025-06-30T09:22:15.538225Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:880: Created compile actor, sender: [1:7521670294100372170:2298], compileActor: [1:7521670294100372280:2309] 2025-06-30T09:22:15.588323Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jz029vse935tgy1pgwwpgcnz, SessionId: CompileActor 2025-06-30 09:22:15.588 INFO ydb-core-sys_view-ut(pid=302861, tid=0x00007F8F3F09D640) [core dq] kqp_host.cpp:1377: Good place to weld in 2025-06-30T09:22:15.588598Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jz029vse935tgy1pgwwpgcnz, SessionId: CompileActor 2025-06-30 09:22:15.588 INFO ydb-core-sys_view-ut(pid=302861, tid=0x00007F8F3F09D640) [core dq] kqp_host.cpp:1382: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake.fake/olap-tier1") '('"source_type" '"ObjectStorage"))) (return (Write! $2 (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/tier1")) '('typeId (String '"EXTERNAL_DATA_SOURCE"))) (Void) '('('mode 'createObject) '('features $3)))) ) 2025-06-30T09:22:15.588702Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jz029vse935tgy1pgwwpgcnz, SessionId: CompileActor 2025-06-30 09:22:15.588 INFO ydb-core-sys_view-ut(pid=302861, tid=0x00007F8F3F09D640) [KQP] kqp_host.cpp:1388: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake ... rvice] [TPoolFetcherActor] ActorId: [41:7521670493325876728:2287], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:02.959269Z node 41 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:02.960255Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:02.963219Z node 41 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [41:7521670493325876757:2291], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:23:03.020655Z node 41 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [41:7521670497620844104:2326] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:03.043624Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02b9vb4p2dgr4mg9n7ayhn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=ZDdhMTBhNDgtYjRiZGM0N2QtZmUyZDlhNzYtMmUzM2ZjMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:03.044605Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [41:7521670497620844145:2299], owner: [41:7521670497620844142:2297], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:03.058118Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [41:7521670497620844145:2299], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:23:03.058430Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670497620844145:2299], row count: 0, finished: 1 2025-06-30T09:23:03.058485Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7521670497620844145:2299], owner: [41:7521670497620844142:2297], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:03.062643Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275383042, txId: 281474976715660] shutting down 2025-06-30T09:23:03.508514Z node 41 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:04.092083Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz02bb8216qt4f5wwnvf37r8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=OTY0MTc0YjctZjE3YmE4ZTEtOGI4MTgyNTktN2FiOTU4MDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:04.092801Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [41:7521670501915811493:2313], owner: [41:7521670501915811489:2311], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:04.097339Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [41:7521670501915811493:2313], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:23:04.097484Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670501915811493:2313], row count: 0, finished: 1 2025-06-30T09:23:04.097532Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7521670501915811493:2313], owner: [41:7521670501915811489:2311], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:04.098616Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275384091, txId: 281474976715662] shutting down 2025-06-30T09:23:05.129811Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz02bc8g8zmdkprrhr509xgj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=NzZmMjQ5N2QtYmQ4MzEyYmEtOGQwYzA1YTUtZGI2ODk0MTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:05.130705Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [41:7521670506210778825:2324], owner: [41:7521670506210778822:2322], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:05.143204Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [41:7521670506210778825:2324], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:23:05.143399Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670506210778825:2324], row count: 0, finished: 1 2025-06-30T09:23:05.143448Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7521670506210778825:2324], owner: [41:7521670506210778822:2322], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:05.144694Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275385125, txId: 281474976715664] shutting down 2025-06-30T09:23:06.167522Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jz02bd941wsjaq9fmwe25a5h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=ODViYjRkYzMtNDRmMmFhOTAtYWI2M2ExNmUtOGIzMGU4NTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:06.168034Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [41:7521670510505746159:2335], owner: [41:7521670510505746155:2333], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:06.169509Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [41:7521670510505746159:2335], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:23:06.169666Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670510505746159:2335], row count: 0, finished: 1 2025-06-30T09:23:06.169697Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7521670510505746159:2335], owner: [41:7521670510505746155:2333], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:06.170583Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275386166, txId: 281474976715666] shutting down 2025-06-30T09:23:07.236532Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jz02be97b43eqxrcdsgq3njp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=OGI1NzIwYzgtOTYwODFkZGYtYTdhODA3MGMtNjRkYjg1MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:07.237307Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [41:7521670514800713491:2346], owner: [41:7521670514800713488:2344], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:07.243532Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [41:7521670514800713491:2346], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:23:07.243718Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670514800713491:2346], row count: 0, finished: 1 2025-06-30T09:23:07.243764Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7521670514800713491:2346], owner: [41:7521670514800713488:2344], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:07.245050Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275387233, txId: 281474976715668] shutting down 2025-06-30T09:23:07.511045Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[41:7521670493325876169:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:07.512024Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:23:08.283687Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jz02bfaz8nbxxb44r324tans, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=NzJkZDUzNmItMzQ4NDg5NjEtZjFlYjI5ZGQtZjVjMGIzM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:08.284837Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [41:7521670519095680829:2359], owner: [41:7521670519095680826:2357], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:08.285293Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [41:7521670519095680829:2359], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:23:08.288296Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7521670519095680829:2359], row count: 1, finished: 1 2025-06-30T09:23:08.288579Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7521670519095680829:2359], owner: [41:7521670519095680826:2357], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:08.291132Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275388278, txId: 281474976715670] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::MoveGroups [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3065:2106] recipient: [1:2963:2117] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3065:2106] recipient: [1:2963:2117] Leader for TabletID 72057594037932033 is [1:3112:2119] sender: [1:3115:2106] recipient: [1:2963:2117] 2025-06-30T09:22:38.417801Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:38.418827Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:38.418912Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:38.419220Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:38.419332Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:38.419377Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:38.419383Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:38.419446Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:38.420345Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:38.420381Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:38.420421Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:38.420438Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:38.420449Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:38.420457Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:3112:2119] sender: [1:3138:2106] recipient: [1:60:2107] 2025-06-30T09:22:38.431021Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:22:38.431081Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:38.441485Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:38.441554Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:38.441570Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:38.441582Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:38.441606Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:38.441615Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:38.441619Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:38.441631Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:38.452051Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:38.452129Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:38.462502Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:38.462560Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:22:38.462789Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:22:38.462796Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:22:38.462831Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:22:38.462838Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:22:38.465053Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 150 PDiskFilter { Property { Type: ROT } } } } } 2025-06-30T09:22:38.465323Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2025-06-30T09:22:38.465333Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2025-06-30T09:22:38.465341Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2025-06-30T09:22:38.465350Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2025-06-30T09:22:38.465354Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2025-06-30T09:22:38.465361Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2025-06-30T09:22:38.465365Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2025-06-30T09:22:38.465371Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2025-06-30T09:22:38.465376Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2025-06-30T09:22:38.465381Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2025-06-30T09:22:38.465385Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2025-06-30T09:22:38.465390Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2025-06-30T09:22:38.465395Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2025-06-30T09:22:38.465399Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2025-06-30T09:22:38.465404Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2025-06-30T09:22:38.465409Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2025-06-30T09:22:38.465414Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2025-06-30T09:22:38.465419Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2025-06-30T09:22:38.465424Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2025-06-30T09:22:38.465429Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2025-06-30T09:22:38.465434Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:3 ... t_pdisks.cpp:340} Create new pdisk PDiskId# 177:1002 Path# /dev/disk3 2025-06-30T09:23:02.555179Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 178:1000 Path# /dev/disk1 2025-06-30T09:23:02.555185Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 178:1001 Path# /dev/disk2 2025-06-30T09:23:02.555191Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 178:1002 Path# /dev/disk3 2025-06-30T09:23:02.555199Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 179:1000 Path# /dev/disk1 2025-06-30T09:23:02.555204Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 179:1001 Path# /dev/disk2 2025-06-30T09:23:02.555210Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 179:1002 Path# /dev/disk3 2025-06-30T09:23:02.555215Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 180:1000 Path# /dev/disk1 2025-06-30T09:23:02.555220Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 180:1001 Path# /dev/disk2 2025-06-30T09:23:02.555224Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 180:1002 Path# /dev/disk3 2025-06-30T09:23:02.555229Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 181:1000 Path# /dev/disk1 2025-06-30T09:23:02.555234Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 181:1001 Path# /dev/disk2 2025-06-30T09:23:02.555240Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 181:1002 Path# /dev/disk3 2025-06-30T09:23:02.555246Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 182:1000 Path# /dev/disk1 2025-06-30T09:23:02.555252Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 182:1001 Path# /dev/disk2 2025-06-30T09:23:02.555258Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 182:1002 Path# /dev/disk3 2025-06-30T09:23:02.555264Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 183:1000 Path# /dev/disk1 2025-06-30T09:23:02.555270Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 183:1001 Path# /dev/disk2 2025-06-30T09:23:02.555275Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 183:1002 Path# /dev/disk3 2025-06-30T09:23:02.555280Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 184:1000 Path# /dev/disk1 2025-06-30T09:23:02.555286Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 184:1001 Path# /dev/disk2 2025-06-30T09:23:02.555291Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 184:1002 Path# /dev/disk3 2025-06-30T09:23:02.555296Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 185:1000 Path# /dev/disk1 2025-06-30T09:23:02.555301Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 185:1001 Path# /dev/disk2 2025-06-30T09:23:02.555306Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 185:1002 Path# /dev/disk3 2025-06-30T09:23:02.555313Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 186:1000 Path# /dev/disk1 2025-06-30T09:23:02.555319Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 186:1001 Path# /dev/disk2 2025-06-30T09:23:02.555325Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 186:1002 Path# /dev/disk3 2025-06-30T09:23:02.555330Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 187:1000 Path# /dev/disk1 2025-06-30T09:23:02.555335Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 187:1001 Path# /dev/disk2 2025-06-30T09:23:02.555340Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 187:1002 Path# /dev/disk3 2025-06-30T09:23:02.555347Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 188:1000 Path# /dev/disk1 2025-06-30T09:23:02.555352Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 188:1001 Path# /dev/disk2 2025-06-30T09:23:02.555358Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 188:1002 Path# /dev/disk3 2025-06-30T09:23:02.555362Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 189:1000 Path# /dev/disk1 2025-06-30T09:23:02.555368Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 189:1001 Path# /dev/disk2 2025-06-30T09:23:02.555373Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 189:1002 Path# /dev/disk3 2025-06-30T09:23:02.555377Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 190:1000 Path# /dev/disk1 2025-06-30T09:23:02.555382Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 190:1001 Path# /dev/disk2 2025-06-30T09:23:02.555387Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 190:1002 Path# /dev/disk3 2025-06-30T09:23:02.555392Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 191:1000 Path# /dev/disk1 2025-06-30T09:23:02.555398Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 191:1001 Path# /dev/disk2 2025-06-30T09:23:02.555402Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 191:1002 Path# /dev/disk3 2025-06-30T09:23:02.555407Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 192:1000 Path# /dev/disk1 2025-06-30T09:23:02.555413Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 192:1001 Path# /dev/disk2 2025-06-30T09:23:02.555419Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 192:1002 Path# /dev/disk3 2025-06-30T09:23:02.555424Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 193:1000 Path# /dev/disk1 2025-06-30T09:23:02.555429Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 193:1001 Path# /dev/disk2 2025-06-30T09:23:02.555434Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 193:1002 Path# /dev/disk3 2025-06-30T09:23:02.555440Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 194:1000 Path# /dev/disk1 2025-06-30T09:23:02.555445Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 194:1001 Path# /dev/disk2 2025-06-30T09:23:02.555450Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 194:1002 Path# /dev/disk3 2025-06-30T09:23:02.555455Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 195:1000 Path# /dev/disk1 2025-06-30T09:23:02.555461Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 195:1001 Path# /dev/disk2 2025-06-30T09:23:02.555466Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 195:1002 Path# /dev/disk3 2025-06-30T09:23:02.555471Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 196:1000 Path# /dev/disk1 2025-06-30T09:23:02.555477Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 196:1001 Path# /dev/disk2 2025-06-30T09:23:02.555484Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 196:1002 Path# /dev/disk3 2025-06-30T09:23:02.555490Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 197:1000 Path# /dev/disk1 2025-06-30T09:23:02.555496Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 197:1001 Path# /dev/disk2 2025-06-30T09:23:02.555502Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 197:1002 Path# /dev/disk3 2025-06-30T09:23:02.555507Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 198:1000 Path# /dev/disk1 2025-06-30T09:23:02.555512Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 198:1001 Path# /dev/disk2 2025-06-30T09:23:02.555518Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 198:1002 Path# /dev/disk3 2025-06-30T09:23:02.555523Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 199:1000 Path# /dev/disk1 2025-06-30T09:23:02.555527Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 199:1001 Path# /dev/disk2 2025-06-30T09:23:02.555533Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 199:1002 Path# /dev/disk3 2025-06-30T09:23:02.555537Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 200:1000 Path# /dev/disk1 2025-06-30T09:23:02.555543Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 200:1001 Path# /dev/disk2 2025-06-30T09:23:02.555550Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 200:1002 Path# /dev/disk3 2025-06-30T09:23:02.625134Z node 151 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 151 Type# 268639257 2025-06-30T09:23:02.629213Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2025-06-30T09:23:02.655623Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 1 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 1 ExplicitGroupId: 2147483748 } } } 2025-06-30T09:23:02.676399Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2025-06-30T09:23:02.699103Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 2 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 2 ExplicitGroupId: 2147483749 } } } 2025-06-30T09:23:02.713795Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2025-06-30T09:23:02.744413Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 3 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 3 } } } 2025-06-30T09:23:02.760360Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } >> BackupRestore::RestoreIndexTableReadReplicasSettings [GOOD] >> BackupRestore::RestoreTableSplitBoundaries ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/slow/unittest >> TPQTestSlow::TestWriteVeryBigMessage [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] 2025-06-30T09:22:33.158053Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:33.158088Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927938 is [1:157:2176] sender: [1:158:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-30T09:22:33.167903Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:33.171301Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-30T09:22:33.171675Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-30T09:22:33.172417Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-30T09:22:33.173001Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-30T09:22:33.173519Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:190:2201] 2025-06-30T09:22:33.176014Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6e691902-c759d464-9694bf36-3876ade7_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:33.244245Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3cafe51e-3982a500-436579a8-4c050309_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:33.351706Z node 1 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:22:33.375260Z node 1 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:22:33.459243Z node 1 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:22:33.551587Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|7645ff3-5ac889ab-5b8db3fe-58059626_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:33.620945Z node 1 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:22:33.636658Z node 1 :PERSQUEUE WARN: cache_eviction.h:141: Cropped PQ response. Tablet: [1:189:2200]cookie 2 partition 0 size 41944795. Cropped 3 blobs of 6 2025-06-30T09:22:33.654268Z node 1 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:22:33.693933Z node 1 :PERSQUEUE ERROR: cache_eviction.h:422: Can't evict. No such blob in L1. Partition 0 offset 0 size 8191635 cause it's been evicted from L2. Actual L1 size: 4 2025-06-30T09:22:33.693971Z node 1 :PERSQUEUE ERROR: cache_eviction.h:422: Can't evict. No such blob in L1. Partition 0 offset 0 size 8191635 cause it's been evicted from L2. Actual L1 size: 4 2025-06-30T09:22:33.693980Z node 1 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:426:2057] recipient: [1:103:2136] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:429:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:430:2057] recipient: [1:428:2404] Leader for TabletID 72057594037927937 is [1:431:2405] sender: [1:432:2057] recipient: [1:428:2404] 2025-06-30T09:22:33.764671Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:33.764704Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-30T09:22:33.764808Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:480:2446] 2025-06-30T09:22:33.765541Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:481:2447] 2025-06-30T09:22:33.771089Z node 1 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:22:33.771131Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [1:480:2446] 2025-06-30T09:22:33.771850Z node 1 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:22:33.771872Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [1:481:2447] Leader for TabletID 72057594037927937 is [1:431:2405] sender: [1:511:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:112:2057] recipient: [2:105:2137] 2025-06-30T09:22:34.058020Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:34.058059Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:158:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:181:2057] recipient: [2:14:2061] 2025-06-30T09:22:34.063309Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:34.063604Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 2 actor [2:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-30T09:22:34.063792Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:187:2198] 2025-06-30T09:22:34.064440Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:187:2198] 2025-06-30T09:22:34.064959Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:188:2199] 2025-06-30T09:22:34.065474Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:188:2199] 2025-06-30T09:22:34.067564Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1e533d96-ddcf5299-13293023-7941d4bb_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:34.121421Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|22866ab7-47e0253e-7555166d-93b88635_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:34.242785Z node 2 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:22:34.276572Z node 2 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:22:34.299748Z node 2 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:22:34.375247Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d9f948b7-fde3bfbc-cfe6d31c-f97e849e_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:34.425224Z node 2 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:22:34.436436Z node 2 :PERSQUEUE WARN: cache_eviction.h:141: Cropped PQ response. Tablet: [2:187:2198]cookie 2 partition 0 size 41944795. Cropped 3 blobs of 6 2025-06-30T09:22:34.436507Z node 2 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:22:34.473539Z node 2 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 !Reboot 72057594037927937 (actor [2:111:2141]) on event NKikimr::TEvPersQueue::TEvOffsets ! Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:423:2057] recipient: [2:103:2136] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:426:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:427:2057] recipient: [2:425:2401] Leader for TabletID 72057594037927937 is [2:428:2402] sender: [2:429:2057] recipient: [2:425:2401] 2025-06-30T09:22:34.557778Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:34.557809Z node 2 ... is [54:157:2176] sender: [54:158:2057] recipient: [54:151:2172] Leader for TabletID 72057594037927937 is [54:111:2141] sender: [54:181:2057] recipient: [54:14:2061] 2025-06-30T09:23:10.200817Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:23:10.201109Z node 54 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 54 actor [54:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 54 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 54 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 54 Important: false } 2025-06-30T09:23:10.201299Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [54:187:2198] 2025-06-30T09:23:10.202201Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [54:187:2198] 2025-06-30T09:23:10.202767Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [54:188:2199] 2025-06-30T09:23:10.203350Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [54:188:2199] 2025-06-30T09:23:10.205467Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|565a0648-4c03f5c7-902847f0-771e8513_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:10.226821Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|4a32c5d4-82de57e3-457f4512-a2785502_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:10.336186Z node 54 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:23:10.365013Z node 54 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:23:10.414583Z node 54 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:23:10.488810Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a8d4e6e9-11d6ca21-7e39a4fe-da5c53dd_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:10.557571Z node 54 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:23:10.581656Z node 54 :PERSQUEUE WARN: cache_eviction.h:141: Cropped PQ response. Tablet: [54:187:2198]cookie 2 partition 0 size 41944795. Cropped 3 blobs of 6 2025-06-30T09:23:10.605442Z node 54 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:23:10.653508Z node 54 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 Leader for TabletID 72057594037927937 is [54:111:2141] sender: [54:415:2057] recipient: [54:103:2136] Leader for TabletID 72057594037927937 is [54:111:2141] sender: [54:418:2057] recipient: [54:417:2393] Leader for TabletID 72057594037927937 is [54:111:2141] sender: [54:419:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [54:420:2394] sender: [54:421:2057] recipient: [54:417:2393] 2025-06-30T09:23:10.725826Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:23:10.725861Z node 54 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-30T09:23:10.726055Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [54:469:2435] 2025-06-30T09:23:10.726856Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [54:470:2436] 2025-06-30T09:23:10.733765Z node 54 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:23:10.733808Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [54:469:2435] 2025-06-30T09:23:10.734535Z node 54 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:23:10.734558Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [54:470:2436] Leader for TabletID 72057594037927937 is [54:420:2394] sender: [54:500:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:107:2057] recipient: [55:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:107:2057] recipient: [55:105:2137] Leader for TabletID 72057594037927937 is [55:111:2141] sender: [55:112:2057] recipient: [55:105:2137] 2025-06-30T09:23:11.237984Z node 55 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:23:11.238017Z node 55 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [55:153:2057] recipient: [55:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [55:153:2057] recipient: [55:151:2172] Leader for TabletID 72057594037927938 is [55:157:2176] sender: [55:158:2057] recipient: [55:151:2172] Leader for TabletID 72057594037927937 is [55:111:2141] sender: [55:183:2057] recipient: [55:14:2061] 2025-06-30T09:23:11.243179Z node 55 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:23:11.243442Z node 55 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 55 actor [55:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 55 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 55 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 55 Important: false } 2025-06-30T09:23:11.243607Z node 55 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [55:189:2200] 2025-06-30T09:23:11.244333Z node 55 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [55:189:2200] 2025-06-30T09:23:11.244839Z node 55 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [55:190:2201] 2025-06-30T09:23:11.245351Z node 55 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [55:190:2201] 2025-06-30T09:23:11.247351Z node 55 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|7b286aed-af2ec17d-b97d2965-95c9e9a2_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:11.267228Z node 55 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6691dfee-63e94fc8-e1a4570d-b736d2de_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:11.340402Z node 55 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:23:11.368606Z node 55 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:23:11.410863Z node 55 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:23:11.499804Z node 55 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a1ad629d-1f9bf3fe-ea81bcfe-2cf67e37_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:11.564797Z node 55 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-30T09:23:11.581454Z node 55 :PERSQUEUE WARN: cache_eviction.h:141: Cropped PQ response. Tablet: [55:189:2200]cookie 2 partition 0 size 41944795. Cropped 3 blobs of 6 2025-06-30T09:23:11.581544Z node 55 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 Leader for TabletID 72057594037927937 is [55:111:2141] sender: [55:426:2057] recipient: [55:103:2136] Leader for TabletID 72057594037927937 is [55:111:2141] sender: [55:429:2057] recipient: [55:14:2061] Leader for TabletID 72057594037927937 is [55:111:2141] sender: [55:430:2057] recipient: [55:428:2404] Leader for TabletID 72057594037927937 is [55:431:2405] sender: [55:432:2057] recipient: [55:428:2404] 2025-06-30T09:23:11.704523Z node 55 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:23:11.704552Z node 55 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-30T09:23:11.704709Z node 55 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [55:480:2446] 2025-06-30T09:23:11.705382Z node 55 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [55:481:2447] 2025-06-30T09:23:11.709938Z node 55 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:23:11.709970Z node 55 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [55:480:2446] 2025-06-30T09:23:11.710731Z node 55 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:23:11.710772Z node 55 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [55:481:2447] Leader for TabletID 72057594037927937 is [55:431:2405] sender: [55:511:2057] recipient: [55:14:2061] |82.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |82.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |82.6%| [LD] {RESULT} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut >> DataShardOutOfOrder::TestImmediateQueueThenSplit+UseSink >> BackupPathTest::CommonPrefixButExplicitImportItems [GOOD] >> TPersQueueTest::DirectReadNotCached [GOOD] >> TPersQueueTest::DirectReadBudgetOnRestart >> EncryptedExportTest::EncryptedExportAndImport [GOOD] >> BSCReadOnlyPDisk::ReadOnlySlay [GOOD] >> DataShardOutOfOrder::TestUnprotectedReadsThenWriteVisibility >> GroupWriteTest::Simple [GOOD] >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLock-EvWrite >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed+EvWrite >> BSCReadOnlyPDisk::ReadOnlyNotAllowed [GOOD] |82.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |82.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |82.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors >> BackupPathTest::ExportDirectoryWithEncryption >> BackupRestore::TestAllPrimitiveTypes-INT16 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT32 >> TExportToS3WithRebootsTests::ShouldSucceedOnMultiShardTable >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive [GOOD] >> DataShardTxOrder::DelayData ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/load_test/ut/unittest >> GroupWriteTest::Simple [GOOD] Test command err: RandomSeed# 5368560667551819097 2025-06-30T09:23:10.213472Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 1 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-30T09:23:10.219578Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-30T09:23:10.219603Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 1 going to send TEvBlock {TabletId# 1 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-30T09:23:10.220317Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-30T09:23:10.235153Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 2 going to send TEvCollectGarbage {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-30T09:23:10.236492Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-30T09:23:12.966944Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-30T09:23:12.966979Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-30T09:23:12.966990Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-30T09:23:12.966995Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-30T09:23:12.981893Z 1 00h01m30.010512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [1:2:33:0:11:3800255:4] barrier# {Soft# {Gen# 2 Step# 28} Hard# {Gen# 2 Step# 4294967295}} 2025-06-30T09:23:12.982567Z 3 00h01m30.010512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [1:2:33:0:11:3800255:6] barrier# {Soft# {Gen# 2 Step# 28} Hard# {Gen# 2 Step# 4294967295}} 2025-06-30T09:23:12.982599Z 8 00h01m30.010512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [1:2:33:0:11:3800255:3] barrier# {Soft# {Gen# 2 Step# 28} Hard# {Gen# 2 Step# 4294967295}} 2025-06-30T09:23:12.982619Z 7 00h01m30.010512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [1:2:33:0:11:3800255:2] barrier# {Soft# {Gen# 2 Step# 28} Hard# {Gen# 2 Step# 4294967295}} 2025-06-30T09:23:12.982636Z 2 00h01m30.010512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [1:2:33:0:11:3800255:5] barrier# {Soft# {Gen# 2 Step# 28} Hard# {Gen# 2 Step# 4294967295}} 2025-06-30T09:23:12.982658Z 6 00h01m30.010512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [1:2:33:0:11:3800255:1] barrier# {Soft# {Gen# 2 Step# 28} Hard# {Gen# 2 Step# 4294967295}} 2025-06-30T09:23:12.983223Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Status# OK} 2025-06-30T09:23:12.983253Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Status# OK} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlySlay [GOOD] Test command err: RandomSeed# 15772323108426458652 2025-06-30T09:23:11.103171Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-30T09:23:11.103616Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2672771713449046764] 2025-06-30T09:23:11.105002Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> DataShardTxOrder::ImmediateBetweenOnline_oo8_dirty >> EncryptedExportTest::EncryptionAndCompression >> DataShardOutOfOrder::UncommittedReadSetAck >> BackupRestoreS3::TestAllPrimitiveTypes-UINT32 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT64 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive [GOOD] Test command err: RandomSeed# 15411257973700105081 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyNotAllowed [GOOD] Test command err: RandomSeed# 1974757576772522138 >> DataShardTxOrder::ReadWriteReorder >> BackupRestoreS3::RestoreTableSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreIndexTableSplitBoundaries >> BackupRestore::TestAllPrimitiveTypes-UINT64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TZ_DATE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TZ_DATETIME [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TZ_TIMESTAMP [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP64 |82.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |82.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |82.6%| [LD] {RESULT} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut >> BackupRestore::RestoreTableSplitBoundaries [GOOD] >> BackupRestore::ImportDataShouldHandleErrors >> BackupPathTest::ExportDirectoryWithEncryption [GOOD] >> EncryptedExportTest::EncryptionAndCompression [GOOD] >> BsControllerConfig::ExtendBoxAndStoragePool [GOOD] >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed+EvWrite >> DataShardOutOfOrder::TestImmediateQueueThenSplit+UseSink [GOOD] >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink >> DataShardTxOrder::ReadWriteReorder [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT64 >> BackupPathTest::EncryptedExportWithExplicitDestinationPath >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites+EvWrite >> CommonEncryptionRequirementsTest::CommonEncryptionRequirements [FAIL] >> TOlapReboots::AlterTtlSettings [GOOD] >> EncryptedExportTest::EncryptionAndChecksum ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::ExtendBoxAndStoragePool [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3065:2106] recipient: [1:2963:2117] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3065:2106] recipient: [1:2963:2117] Leader for TabletID 72057594037932033 is [1:3112:2119] sender: [1:3115:2106] recipient: [1:2963:2117] 2025-06-30T09:22:38.806487Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:38.807687Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:38.807789Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:38.808172Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:38.808288Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:38.808337Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:38.808343Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:38.808454Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:38.809738Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:38.809779Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:38.809827Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:38.809853Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:38.809870Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:38.809881Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:3112:2119] sender: [1:3138:2106] recipient: [1:60:2107] 2025-06-30T09:22:38.820538Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:22:38.820600Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:38.831045Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:38.831114Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:38.831136Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:38.831153Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:38.831191Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:38.831204Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:38.831212Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:38.831224Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:38.841650Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:38.841731Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:38.852111Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:38.852165Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:22:38.852370Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:22:38.852377Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:22:38.852421Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:22:38.852428Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:22:38.854975Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 60 PDiskFilter { Property { Type: ROT } } } } Command { QueryBaseConfig { } } } 2025-06-30T09:22:38.855219Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2025-06-30T09:22:38.855233Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2025-06-30T09:22:38.855239Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2025-06-30T09:22:38.855245Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2025-06-30T09:22:38.855250Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2025-06-30T09:22:38.855255Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2025-06-30T09:22:38.855260Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2025-06-30T09:22:38.855267Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2025-06-30T09:22:38.855272Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2025-06-30T09:22:38.855276Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2025-06-30T09:22:38.855281Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2025-06-30T09:22:38.855286Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2025-06-30T09:22:38.855291Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2025-06-30T09:22:38.855296Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2025-06-30T09:22:38.855309Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2025-06-30T09:22:38.855314Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2025-06-30T09:22:38.855319Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2025-06-30T09:22:38.855326Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2025-06-30T09:22:38.855331Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2025-06-30T09:22:38.855336Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2025-06-30T09:22:38.855341Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1002 Path# /dev/disk3 2025-06-30T09:22:38.855346Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1000 Path# /dev/disk1 2025-06-30T09:22:38.855351Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1001 Path# /dev/disk2 2025-06-30T09:22:38.855356Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1002 Path# /dev/disk3 2025-06-30T09:22:38.855361Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1000 Path# /dev/disk1 2025-06-30T09:22:38.855366Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1001 Path# /dev/disk2 2025-06-30T09:22:38.855371Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1002 Path# /dev/disk3 2025-06-30T09:22:38.855376Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1000 Path# /dev/disk1 2025-06-30T09:22:38.855381Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1001 Path# /dev/disk2 2025-06-30T09:22:38.855386Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1002 Path# /dev/disk3 2025-06-30T09:22:38.855391Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1000 Path# /dev/disk1 2025-06-30T09:22:38.855396Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1001 Path# /dev/disk2 2025-06-30T09:22:38.855401Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Cr ... 2025-06-30T09:23:05.650768Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 204:1000 Path# /dev/disk1 2025-06-30T09:23:05.650774Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 204:1001 Path# /dev/disk2 2025-06-30T09:23:05.650780Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 204:1002 Path# /dev/disk3 2025-06-30T09:23:05.650786Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 205:1000 Path# /dev/disk1 2025-06-30T09:23:05.650791Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 205:1001 Path# /dev/disk2 2025-06-30T09:23:05.650796Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 205:1002 Path# /dev/disk3 2025-06-30T09:23:05.650804Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 206:1000 Path# /dev/disk1 2025-06-30T09:23:05.650807Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 206:1001 Path# /dev/disk2 2025-06-30T09:23:05.650811Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 206:1002 Path# /dev/disk3 2025-06-30T09:23:05.650815Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 207:1000 Path# /dev/disk1 2025-06-30T09:23:05.650821Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 207:1001 Path# /dev/disk2 2025-06-30T09:23:05.650825Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 207:1002 Path# /dev/disk3 2025-06-30T09:23:05.650829Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 208:1000 Path# /dev/disk1 2025-06-30T09:23:05.650833Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 208:1001 Path# /dev/disk2 2025-06-30T09:23:05.650836Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 208:1002 Path# /dev/disk3 2025-06-30T09:23:05.650840Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 209:1000 Path# /dev/disk1 2025-06-30T09:23:05.650843Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 209:1001 Path# /dev/disk2 2025-06-30T09:23:05.650847Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 209:1002 Path# /dev/disk3 2025-06-30T09:23:05.650850Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 210:1000 Path# /dev/disk1 2025-06-30T09:23:05.650855Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 210:1001 Path# /dev/disk2 2025-06-30T09:23:05.650859Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 210:1002 Path# /dev/disk3 2025-06-30T09:23:05.743632Z node 161 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 161 Type# 268639257 2025-06-30T09:23:05.750193Z node 161 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 4 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12051 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12052 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12053 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12054 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12055 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12056 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12057 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12058 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12059 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12060 } HostConfigId: 4 } ItemConfigGeneration: 1 } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 180 PDiskFilter { Property { Type: ROT } } ItemConfigGeneration: 1 } } Command { QueryBaseConfig { } } } 2025-06-30T09:23:05.752693Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 211:1000 Path# /dev/disk1 2025-06-30T09:23:05.752734Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 211:1001 Path# /dev/disk2 2025-06-30T09:23:05.752742Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 211:1002 Path# /dev/disk3 2025-06-30T09:23:05.752747Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 212:1000 Path# /dev/disk1 2025-06-30T09:23:05.752753Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 212:1001 Path# /dev/disk2 2025-06-30T09:23:05.752759Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 212:1002 Path# /dev/disk3 2025-06-30T09:23:05.752764Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 213:1000 Path# /dev/disk1 2025-06-30T09:23:05.752770Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 213:1001 Path# /dev/disk2 2025-06-30T09:23:05.752776Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 213:1002 Path# /dev/disk3 2025-06-30T09:23:05.752786Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 214:1000 Path# /dev/disk1 2025-06-30T09:23:05.752792Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 214:1001 Path# /dev/disk2 2025-06-30T09:23:05.752798Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 214:1002 Path# /dev/disk3 2025-06-30T09:23:05.752804Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 215:1000 Path# /dev/disk1 2025-06-30T09:23:05.752809Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 215:1001 Path# /dev/disk2 2025-06-30T09:23:05.752815Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 215:1002 Path# /dev/disk3 2025-06-30T09:23:05.752820Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 216:1000 Path# /dev/disk1 2025-06-30T09:23:05.752826Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 216:1001 Path# /dev/disk2 2025-06-30T09:23:05.752832Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 216:1002 Path# /dev/disk3 2025-06-30T09:23:05.752837Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 217:1000 Path# /dev/disk1 2025-06-30T09:23:05.752843Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 217:1001 Path# /dev/disk2 2025-06-30T09:23:05.752849Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 217:1002 Path# /dev/disk3 2025-06-30T09:23:05.752854Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 218:1000 Path# /dev/disk1 2025-06-30T09:23:05.752860Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 218:1001 Path# /dev/disk2 2025-06-30T09:23:05.752867Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 218:1002 Path# /dev/disk3 2025-06-30T09:23:05.752872Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 219:1000 Path# /dev/disk1 2025-06-30T09:23:05.752878Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 219:1001 Path# /dev/disk2 2025-06-30T09:23:05.752884Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 219:1002 Path# /dev/disk3 2025-06-30T09:23:05.752889Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 220:1000 Path# /dev/disk1 2025-06-30T09:23:05.752896Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 220:1001 Path# /dev/disk2 2025-06-30T09:23:05.752902Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 220:1002 Path# /dev/disk3 2025-06-30T09:23:05.842718Z node 161 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 161 Type# 268639257 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ReadWriteReorder [GOOD] Test command err: 2025-06-30T09:23:13.944906Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:134:2155]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:13.968637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:23:13.968663Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:13.968838Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:134:2155]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:13.968945Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:134:2155] 2025-06-30T09:23:13.969004Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:23:13.981508Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:134:2155]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:13.983546Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:23:13.983622Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:23:13.983845Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-30T09:23:13.983855Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-30T09:23:13.983864Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-30T09:23:13.983940Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:23:13.984021Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:23:13.984033Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:200:2155] in generation 2 2025-06-30T09:23:14.021438Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:23:14.031673Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-30T09:23:14.031758Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:23:14.031786Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-30T09:23:14.031792Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-30T09:23:14.031798Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-30T09:23:14.031804Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:23:14.031897Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:134:2155], Recipient [1:134:2155]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:14.031906Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:14.031993Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-30T09:23:14.032019Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-30T09:23:14.032031Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:23:14.032039Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:23:14.032048Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-30T09:23:14.032054Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-30T09:23:14.032061Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-30T09:23:14.032067Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-30T09:23:14.032073Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:23:14.032085Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:134:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:14.032091Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:14.032098Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-30T09:23:14.032727Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:134:2155]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\n\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-30T09:23:14.032741Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:23:14.032759Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:23:14.032794Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-30T09:23:14.032806Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-30T09:23:14.032817Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-30T09:23:14.032827Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-30T09:23:14.032833Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-30T09:23:14.032839Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-30T09:23:14.032845Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:23:14.032927Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-30T09:23:14.032931Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-30T09:23:14.032936Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-30T09:23:14.032940Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:23:14.032951Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-30T09:23:14.032956Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-30T09:23:14.032960Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-30T09:23:14.032965Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-30T09:23:14.032971Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-30T09:23:14.054324Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-30T09:23:14.054357Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:23:14.054367Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:23:14.054381Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-30T09:23:14.054399Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-30T09:23:14.054544Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:134:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:14.054554Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:14.054563Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-30T09:23:14.054588Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:134:2155]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-30T09:23:14.054594Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-30T09:23:14.054634Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-30T09:23:14.054643Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-30T09:23:14.054648Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-30T09:23:14.054654Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-30T09:23:14.055598Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-30T09:23:14.055617Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:23:14.055679Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:134:2155], Recipient [1:134:2155]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:14.055686Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:14.055698Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:23:14.055707Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:23:14.055712Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-30T09:23:14.055722Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-30T09:23:14.055729Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000001: ... 7185 executing on unit CompletedOperations 2025-06-30T09:23:14.720265Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:12] at 9437185 has finished 2025-06-30T09:23:14.720271Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:23:14.720275Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-30T09:23:14.720280Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-30T09:23:14.720284Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-30T09:23:14.720329Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:238:2229], Recipient [1:238:2229]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:14.720336Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:14.720344Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:23:14.720349Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:23:14.720354Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-30T09:23:14.720360Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000005:12] in PlanQueue unit at 9437184 2025-06-30T09:23:14.720365Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit PlanQueue 2025-06-30T09:23:14.720371Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-30T09:23:14.720375Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit PlanQueue 2025-06-30T09:23:14.720379Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit LoadTxDetails 2025-06-30T09:23:14.720384Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit LoadTxDetails 2025-06-30T09:23:14.720524Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437184 loaded tx from db 1000005:12 keys extracted: 3 2025-06-30T09:23:14.720531Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-30T09:23:14.720537Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit LoadTxDetails 2025-06-30T09:23:14.720542Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit FinalizeDataTxPlan 2025-06-30T09:23:14.720546Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit FinalizeDataTxPlan 2025-06-30T09:23:14.720551Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-30T09:23:14.720555Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit FinalizeDataTxPlan 2025-06-30T09:23:14.720559Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-30T09:23:14.720564Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit BuildAndWaitDependencies 2025-06-30T09:23:14.720574Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1000005:12] is the new logically complete end at 9437184 2025-06-30T09:23:14.720578Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1000005:12] is the new logically incomplete end at 9437184 2025-06-30T09:23:14.720583Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1000005:12] at 9437184 2025-06-30T09:23:14.720589Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-30T09:23:14.720592Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-30T09:23:14.720597Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit BuildDataTxOutRS 2025-06-30T09:23:14.720602Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit BuildDataTxOutRS 2025-06-30T09:23:14.720610Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-30T09:23:14.720614Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit BuildDataTxOutRS 2025-06-30T09:23:14.720619Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit StoreAndSendOutRS 2025-06-30T09:23:14.720623Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit StoreAndSendOutRS 2025-06-30T09:23:14.720628Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-30T09:23:14.720632Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit StoreAndSendOutRS 2025-06-30T09:23:14.720636Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit PrepareDataTxInRS 2025-06-30T09:23:14.720641Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit PrepareDataTxInRS 2025-06-30T09:23:14.720647Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-30T09:23:14.720651Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit PrepareDataTxInRS 2025-06-30T09:23:14.720656Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit LoadAndWaitInRS 2025-06-30T09:23:14.720661Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit LoadAndWaitInRS 2025-06-30T09:23:14.720668Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-30T09:23:14.720673Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit LoadAndWaitInRS 2025-06-30T09:23:14.720677Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit ExecuteDataTx 2025-06-30T09:23:14.720682Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit ExecuteDataTx 2025-06-30T09:23:14.720758Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000005:12] at tablet 9437184 with status COMPLETE 2025-06-30T09:23:14.720768Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000005:12] at 9437184: {NSelectRow: 3, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 3, SelectRowBytes: 24, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-30T09:23:14.720777Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-30T09:23:14.720782Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit ExecuteDataTx 2025-06-30T09:23:14.720786Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit CompleteOperation 2025-06-30T09:23:14.720791Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit CompleteOperation 2025-06-30T09:23:14.720832Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is DelayComplete 2025-06-30T09:23:14.720837Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit CompleteOperation 2025-06-30T09:23:14.720841Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit CompletedOperations 2025-06-30T09:23:14.720845Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit CompletedOperations 2025-06-30T09:23:14.720851Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-30T09:23:14.720855Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit CompletedOperations 2025-06-30T09:23:14.720860Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:12] at 9437184 has finished 2025-06-30T09:23:14.720865Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:23:14.720869Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-30T09:23:14.720874Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-30T09:23:14.720878Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-30T09:23:14.731765Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437185 step# 1000005 txid# 12} 2025-06-30T09:23:14.731782Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437185 step# 1000005} 2025-06-30T09:23:14.731795Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-30T09:23:14.731802Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:12] at 9437185 on unit CompleteOperation 2025-06-30T09:23:14.731822Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 12] from 9437185 at tablet 9437185 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-30T09:23:14.731848Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-30T09:23:14.733176Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000005 txid# 12} 2025-06-30T09:23:14.733191Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000005} 2025-06-30T09:23:14.733203Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:23:14.733210Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:12] at 9437184 on unit CompleteOperation 2025-06-30T09:23:14.733226Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 12] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-30T09:23:14.733234Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed+EvWrite [GOOD] >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed-EvWrite >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UTF8 |82.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest |82.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |82.6%| [LD] {RESULT} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |82.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |82.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLock-EvWrite [GOOD] >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLockOutOfOrder >> BackupRestoreS3::TestAllPrimitiveTypes-INT64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT64 >> EncryptedBackupParamsValidationTest::BadSourcePath >> TSchemeShardTest::ManyDirs [GOOD] >> TSchemeShardTest::ListNotCreatedDirCase ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::ShowCreateTableColumnAlterObject [GOOD] Test command err: 2025-06-30T09:22:17.401049Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670300378600608:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:17.401079Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003094/r3tmp/tmp2DjVWf/pdisk_1.dat 2025-06-30T09:22:17.486450Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:17.501839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:17.501870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:17.503762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7169, node 1 2025-06-30T09:22:17.522191Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:17.522221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:17.522223Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:17.522287Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4009 TClient is connected to server localhost:4009 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:17.578028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:17.825090Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:276: Subscribed for config changes 2025-06-30T09:22:17.825113Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:331: Updated config 2025-06-30T09:22:17.828741Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670300378601610:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:17.828758Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670300378601621:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:17.828771Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:17.829934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:17.836368Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670300378601624:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:22:17.913013Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670300378601693:2717] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:17.913367Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-30T09:22:17.913405Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:413: Perform request, TraceId.SpanIdPtr: 0x000012F53C8B67E8 2025-06-30T09:22:17.913413Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:423: Received compile request, sender: [1:7521670300378601606:2298], queryUid: , queryText: "\n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n ", keepInCache: 1, split: 0{ TraceId: 01jz029y34a85j7w84kz1wbzc4, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDUwZjdmNGItNWQ5YTBiMTctZjgxNjQ2ZTEtZTVjZWQxNGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default} 2025-06-30T09:22:17.913422Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-30T09:22:17.913433Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:519: Added request to queue, sender: [1:7521670300378601606:2298], queueSize: 1 2025-06-30T09:22:17.913559Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:880: Created compile actor, sender: [1:7521670300378601606:2298], compileActor: [1:7521670300378601712:2309] 2025-06-30T09:22:17.955481Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jz029y34a85j7w84kz1wbzc4, SessionId: CompileActor 2025-06-30 09:22:17.955 INFO ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C86EB640) [core dq] kqp_host.cpp:1377: Good place to weld in 2025-06-30T09:22:17.955688Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jz029y34a85j7w84kz1wbzc4, SessionId: CompileActor 2025-06-30 09:22:17.955 INFO ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C86EB640) [core dq] kqp_host.cpp:1382: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake.fake/olap-tier1") '('"source_type" '"ObjectStorage"))) (return (Write! $2 (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/tier1")) '('typeId (String '"EXTERNAL_DATA_SOURCE"))) (Void) '('('mode 'createObject) '('features $3)))) ) 2025-06-30T09:22:17.955761Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jz029y34a85j7w84kz1wbzc4, SessionId: CompileActor 2025-06-30 09:22:17.955 INFO ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C86EB640) [KQP] kqp_host.cpp:1388: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake.fake/olap-tier1") '('"source_type" '"ObjectStorage"))) (return (Write! $2 (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/tier1") ... '('"secretId" $5) '('"value" $5))) (let $7 '('('"_logical_id" '335) '('"_id" '"d68fc778-fcb31d11-f1c9fc7c-835e470d") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"ownerUserId") (Member $14 '"secretId") (Member $14 '"value"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (Take (NarrowMap (ToFlow $15) (lambda '($16 $17 $18) (AsStruct '('"ownerUserId" $16) '('"secretId" $17) '('"value" $18)))) $3))) '('('"_logical_id" '348) '('"_id" '"6632d872-55b8caa3-4ee16fe1-b0fccd32")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) '((KqpTxResultBinding (ListType $6) '"0" '"0")) '('('"type" '"data_query")))) ) 2025-06-30T09:23:11.012868Z node 41 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.012 TRACE ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [core] kqp_transform.cpp:47: Saving explain plan 2025-06-30T09:23:11.012966Z node 41 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.012 TRACE ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [KQP] kqp_transform.cpp:33: PhysicalPeepholeTransformer: ( (let $1 (KqpTable '"//Root/.metadata/secrets/values" '"72057594046644480:9" '"" '3)) (let $2 '('"ownerUserId" '"secretId" '"value")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '"1")) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"ownerUserId" $5) '('"secretId" $5) '('"value" $5))) (let $7 '('('"_logical_id" '335) '('"_id" '"d68fc778-fcb31d11-f1c9fc7c-835e470d") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"ownerUserId") (Member $14 '"secretId") (Member $14 '"value"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (Take (NarrowMap (ToFlow $15) (lambda '($16 $17 $18) (AsStruct '('"ownerUserId" $16) '('"secretId" $17) '('"value" $18)))) $3))) '('('"_logical_id" '348) '('"_id" '"6632d872-55b8caa3-4ee16fe1-b0fccd32")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) '((KqpTxResultBinding (ListType $6) '"0" '"0")) '('('"type" '"data_query")))) ) 2025-06-30T09:23:11.013056Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.013 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 74us 2025-06-30T09:23:11.013108Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.013 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] yql_expr_constraint.cpp:3279: Execution of [ConstraintTransformer::DoTransform] took 39us 2025-06-30T09:23:11.013146Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.013 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] yql_expr_csee.cpp:620: Execution of [UpdateCompletness] took 26us 2025-06-30T09:23:11.013247Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.013 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] yql_expr_csee.cpp:633: Execution of [EliminateCommonSubExpressions] took 88us 2025-06-30T09:23:11.013372Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.013 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [KQP] kqp_opt_peephole.cpp:548: >>> TKqpTxsPeepholeTransformer: ( (let $1 (KqpTable '"//Root/.metadata/secrets/values" '"72057594046644480:9" '"" '3)) (let $2 '('"ownerUserId" '"secretId" '"value")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '"1")) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"ownerUserId" $5) '('"secretId" $5) '('"value" $5))) (let $7 '('('"_logical_id" '335) '('"_id" '"d68fc778-fcb31d11-f1c9fc7c-835e470d") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"ownerUserId") (Member $14 '"secretId") (Member $14 '"value"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (Take (NarrowMap (ToFlow $15) (lambda '($16 $17 $18) (AsStruct '('"ownerUserId" $16) '('"secretId" $17) '('"value" $18)))) $3))) '('('"_logical_id" '348) '('"_id" '"6632d872-55b8caa3-4ee16fe1-b0fccd32")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) '((KqpTxResultBinding (ListType $6) '"0" '"0")) '('('"type" '"data_query")))) ) 2025-06-30T09:23:11.013488Z node 41 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.013 TRACE ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [KQP] kqp_transform.cpp:33: TxsPeephole: ( (let $1 (KqpTable '"//Root/.metadata/secrets/values" '"72057594046644480:9" '"" '3)) (let $2 '('"ownerUserId" '"secretId" '"value")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '"1")) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"ownerUserId" $5) '('"secretId" $5) '('"value" $5))) (let $7 '('('"_logical_id" '335) '('"_id" '"d68fc778-fcb31d11-f1c9fc7c-835e470d") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"ownerUserId") (Member $14 '"secretId") (Member $14 '"value"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (Take (NarrowMap (ToFlow $15) (lambda '($16 $17 $18) (AsStruct '('"ownerUserId" $16) '('"secretId" $17) '('"value" $18)))) $3))) '('('"_logical_id" '348) '('"_id" '"6632d872-55b8caa3-4ee16fe1-b0fccd32")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) ) 2025-06-30T09:23:11.013504Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.013 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 3us 2025-06-30T09:23:11.013517Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.013 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] yql_expr_constraint.cpp:3279: Execution of [ConstraintTransformer::DoTransform] took 3us 2025-06-30T09:23:11.013547Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.013 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] yql_expr_csee.cpp:620: Execution of [UpdateCompletness] took 20us 2025-06-30T09:23:11.013616Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.013 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] yql_expr_csee.cpp:633: Execution of [EliminateCommonSubExpressionsForSubGraph] took 59us 2025-06-30T09:23:11.013729Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.013 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [KQP] kqp_opt_peephole.cpp:494: >>> TKqpTxPeepholeTransformer: ( (let $1 (KqpTable '"//Root/.metadata/secrets/values" '"72057594046644480:9" '"" '3)) (let $2 '('"ownerUserId" '"secretId" '"value")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '"1")) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"ownerUserId" $5) '('"secretId" $5) '('"value" $5))) (let $7 '('('"_logical_id" '335) '('"_id" '"d68fc778-fcb31d11-f1c9fc7c-835e470d") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"ownerUserId") (Member $14 '"secretId") (Member $14 '"value"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (Take (NarrowMap (ToFlow $15) (lambda '($16 $17 $18) (AsStruct '('"ownerUserId" $16) '('"secretId" $17) '('"value" $18)))) $3))) '('('"_logical_id" '348) '('"_id" '"6632d872-55b8caa3-4ee16fe1-b0fccd32")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) ) 2025-06-30T09:23:11.014006Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.013 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 63us 2025-06-30T09:23:11.014035Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.014 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] yql_expr_constraint.cpp:3279: Execution of [ConstraintTransformer::DoTransform] took 12us 2025-06-30T09:23:11.014047Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.014 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 1us 2025-06-30T09:23:11.014080Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.014 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] yql_expr_constraint.cpp:3279: Execution of [ConstraintTransformer::DoTransform] took 28us 2025-06-30T09:23:11.014100Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.014 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] yql_expr_csee.cpp:620: Execution of [UpdateCompletness] took 10us 2025-06-30T09:23:11.014140Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.014 DEBUG ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [perf] yql_expr_csee.cpp:633: Execution of [EliminateCommonSubExpressionsForSubGraph] took 29us 2025-06-30T09:23:11.014192Z node 41 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jz02bj0ba1d2s2p0rz9ygrd3, SessionId: CompileActor 2025-06-30 09:23:11.014 TRACE ydb-core-sys_view-ut(pid=306178, tid=0x00007FB0C9EEE640) [core peephole] yql_out_transformers.cpp:62: PeepHoleOpt: ( (let $1 (OptionalType (DataType 'Utf8))) (return (KqpProgram (lambda '($2) (block '( (let $3 (lambda '($4) (Member $4 '"ownerUserId") (Member $4 '"secretId") (Member $4 '"value"))) (return (FromFlow (ExpandMap (Take (ToFlow $2) (Uint64 '"1001")) $3))) ))) (TupleType (StreamType (StructType '('"ownerUserId" $1) '('"secretId" $1) '('"value" $1)))))) ) >> BackupRestoreS3::RestoreIndexTableSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreViewQueryText >> BackupRestore::ImportDataShouldHandleErrors [GOOD] >> BackupRestore::BackupUuid >> TSchemeShardTest::ListNotCreatedDirCase [GOOD] >> TSchemeShardTest::ListNotCreatedIndexCase >> BackupPathTest::EncryptedExportWithExplicitDestinationPath [GOOD] >> TSchemeShardTest::ListNotCreatedIndexCase [GOOD] >> TSchemeShardTest::FindSubDomainPathId >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-FLOAT >> DataShardOutOfOrder::TestUnprotectedReadsThenWriteVisibility [GOOD] >> EncryptedBackupParamsValidationTest::BadSourcePath [GOOD] >> DataShardTxOrder::ImmediateBetweenOnline_oo8_dirty [GOOD] >> BackupPathTest::EncryptedExportWithExplicitObjectList >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed+EvWrite [GOOD] >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed-EvWrite >> TSchemeShardTest::FindSubDomainPathId [GOOD] >> TSchemeShardTest::FindSubDomainPathIdActor ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::AlterTtlSettings [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:45.974033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:45.974057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:45.974064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:45.974070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:45.974085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:45.974089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:45.974100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:45.974117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:45.974242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:45.974330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:45.990654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:45.990683Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:45.990843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:45.994683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:45.996047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:45.996106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:45.998024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:45.998085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:45.998232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:45.998299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:45.999415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:45.999468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:45.999815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:45.999826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:45.999835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:45.999843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:45.999849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:45.999877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:46.001817Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:46.033571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:46.033657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.033720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:46.033730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:46.033795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:46.033810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:46.035946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.036020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:46.036108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.036134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:46.036141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:46.036148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:46.038641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.038665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:46.038675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:46.039259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.039272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:46.039277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:46.039283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:46.039870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:46.040271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:46.040315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:46.040552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:46.040582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... >FrontStep: 5000006 2025-06-30T09:23:15.015357Z node 188 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:23:15.015386Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:23:15.015473Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:23:15.015524Z node 188 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:15.015530Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [188:212:2212], at schemeshard: 72057594046678944, txId: 1005, path id: 3 2025-06-30T09:23:15.015538Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [188:212:2212], at schemeshard: 72057594046678944, txId: 1005, path id: 4 2025-06-30T09:23:15.015634Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-30T09:23:15.015644Z node 188 :FLAT_TX_SCHEMESHARD INFO: alter_table.cpp:199: TAlterColumnTable TProposedWaitParts operationId# 1005:0 ProgressState at tablet: 72057594046678944 2025-06-30T09:23:15.015656Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: alter_table.cpp:222: TAlterColumnTable TProposedWaitParts operationId# 1005:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-06-30T09:23:15.015900Z node 188 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:23:15.015916Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:23:15.015921Z node 188 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:23:15.015927Z node 188 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-30T09:23:15.015934Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:23:15.016222Z node 188 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 11 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:23:15.016236Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 11 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:23:15.016246Z node 188 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:23:15.016250Z node 188 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-06-30T09:23:15.016254Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:23:15.016266Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 0/1, is published: true 2025-06-30T09:23:15.017120Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1005:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-30T09:23:15.017600Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:23:15.017684Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:23:15.028987Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6237: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1005 2025-06-30T09:23:15.029027Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 1005, tablet: 72075186233409546, partId: 0 2025-06-30T09:23:15.029066Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 1005:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1005 FAKE_COORDINATOR: Erasing txId 1005 2025-06-30T09:23:15.029965Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-30T09:23:15.030030Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-30T09:23:15.030040Z node 188 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1005:0 ProgressState 2025-06-30T09:23:15.030066Z node 188 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:23:15.030072Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:23:15.030078Z node 188 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:23:15.030081Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:23:15.030087Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: true 2025-06-30T09:23:15.030109Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [188:370:2346] message: TxId: 1005 2025-06-30T09:23:15.030118Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:23:15.030124Z node 188 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1005:0 2025-06-30T09:23:15.030129Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1005:0 2025-06-30T09:23:15.030176Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:23:15.031723Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-30T09:23:15.031745Z node 188 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [188:517:2485] TestWaitNotification: OK eventTxId 1005 2025-06-30T09:23:15.031930Z node 188 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore/ColumnTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:23:15.032029Z node 188 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore/ColumnTable" took 111us result status StatusSuccess 2025-06-30T09:23:15.032219Z node 188 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/OlapStore/ColumnTable" PathDescription { Self { Name: "ColumnTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 ColumnTableVersion: 3 ColumnTableSchemaVersion: 1 ColumnTableTtlSettingsVersion: 3 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "ColumnTable" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } } Columns { Id: 2 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } NextColumnFamilyId: 1 } TtlSettings { Disabled { } Version: 3 } SchemaPresetId: 1 SchemaPresetName: "default" ColumnStorePathId { OwnerId: 72057594046678944 LocalId: 3 } ColumnShardCount: 1 Sharding { ColumnShards: 72075186233409546 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "timestamp" } } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |82.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> SlowTopicAutopartitioning::CDC_Write [GOOD] >> AnalyzeColumnshard::AnalyzeRebootSaInAggregate >> TPersQueueTest::Init [GOOD] >> TPersQueueTest::EventBatching >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites+EvWrite [GOOD] >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites-EvWrite ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink [GOOD] Test command err: 2025-06-30T09:23:13.357176Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:13.357289Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:23:13.357306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045dc/r3tmp/tmp5VTLj2/pdisk_1.dat 2025-06-30T09:23:13.483342Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:23:13.484352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:13.504646Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:13.506024Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275392691937 != 1751275392691941 2025-06-30T09:23:13.550416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:13.550463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:13.561228Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:13.639697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:13.662838Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:13.663173Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:13.663300Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:23:13.663391Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:23:13.675756Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:13.676017Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:23:13.676046Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:23:13.676291Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:23:13.676303Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:23:13.676311Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:23:13.676380Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:23:13.676407Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:23:13.676423Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:23:13.686791Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:23:13.693557Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:23:13.693648Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:23:13.693680Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:23:13.693689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:23:13.693696Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:23:13.693723Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:23:13.693806Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:13.693816Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:13.693936Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:23:13.693967Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:23:13.693982Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:23:13.693991Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:23:13.693999Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:23:13.694006Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:23:13.694014Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:23:13.694021Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:23:13.694027Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:23:13.694053Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:13.694060Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:13.694068Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:23:13.694197Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:23:13.694204Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:23:13.694231Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:23:13.694293Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:23:13.694306Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:23:13.694328Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:23:13.694337Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:23:13.694344Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:23:13.694351Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:23:13.694378Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:23:13.694456Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:23:13.694465Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:23:13.694471Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:23:13.694477Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:23:13.694491Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:23:13.694496Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:23:13.694502Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:23:13.694507Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:23:13.694515Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:23:13.694959Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:23:13.694980Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:23:13.706852Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:23:13.706884Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:23:13.706892Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:23:13.706907Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... : 0 ru: 1 rate limiter was not found force flag: 1 2025-06-30T09:23:16.226572Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715677. Resolved key sets: 0 2025-06-30T09:23:16.226645Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=2&id=YjNjM2IwNDEtOWJlYjIyNWQtMTg5OTYyNDYtZjVlNmI0OGM=, ActorId: [2:853:2688], ActorState: ExecuteState, TraceId: 01jz02bpx543wfq5nnz96xc1rn, Create QueryResponse for error on request, msg: 2025-06-30T09:23:16.226698Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=2&id=Y2YyYTc0YzAtMjJiNTViODItZDZkMDUyYTQtODNjYjc2YmI=, ActorId: [2:855:2690], ActorState: ExecuteState, TraceId: 01jz02bpx5925jcrr7pn5ed0rj, Create QueryResponse for error on request, msg: 2025-06-30T09:23:16.226849Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jz02bpx4f1v5bm39k4ra01df, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDVjNjkyYjktYzk5ZDM4MzctZjMxMmM4YWYtNjBhZjRiNDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:16.226861Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715677. Ctx: { TraceId: 01jz02bpx4f1v5bm39k4ra01df, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDVjNjkyYjktYzk5ZDM4MzctZjMxMmM4YWYtNjBhZjRiNDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-30T09:23:16.226870Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1101:2675] TxId: 281474976715677. Ctx: { TraceId: 01jz02bpx4f1v5bm39k4ra01df, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDVjNjkyYjktYzk5ZDM4MzctZjMxMmM4YWYtNjBhZjRiNDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-30T09:23:16.226884Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [2:1101:2675] TxId: 281474976715677. Ctx: { TraceId: 01jz02bpx4f1v5bm39k4ra01df, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDVjNjkyYjktYzk5ZDM4MzctZjMxMmM4YWYtNjBhZjRiNDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-30T09:23:16.226891Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1101:2675] TxId: 281474976715677. Ctx: { TraceId: 01jz02bpx4f1v5bm39k4ra01df, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDVjNjkyYjktYzk5ZDM4MzctZjMxMmM4YWYtNjBhZjRiNDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-30T09:23:16.227302Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715678. Resolved key sets: 0 2025-06-30T09:23:16.227459Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jz02bpx43hxwv23138ech23f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Mjk5NDdlZTctYTY5MWUxMGItNTI3MzQ1ZjMtNjExMTA0YWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:16.227469Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715678. Ctx: { TraceId: 01jz02bpx43hxwv23138ech23f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Mjk5NDdlZTctYTY5MWUxMGItNTI3MzQ1ZjMtNjExMTA0YWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-30T09:23:16.227480Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1110:2677] TxId: 281474976715678. Ctx: { TraceId: 01jz02bpx43hxwv23138ech23f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Mjk5NDdlZTctYTY5MWUxMGItNTI3MzQ1ZjMtNjExMTA0YWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-30T09:23:16.227495Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [2:1110:2677] TxId: 281474976715678. Ctx: { TraceId: 01jz02bpx43hxwv23138ech23f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Mjk5NDdlZTctYTY5MWUxMGItNTI3MzQ1ZjMtNjExMTA0YWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-30T09:23:16.227503Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1110:2677] TxId: 281474976715678. Ctx: { TraceId: 01jz02bpx43hxwv23138ech23f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Mjk5NDdlZTctYTY5MWUxMGItNTI3MzQ1ZjMtNjExMTA0YWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-30T09:23:16.227512Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715679. Resolved key sets: 0 2025-06-30T09:23:16.227684Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jz02bpx45fgbqy2ns8qhqwfd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmRlMWY5NjUtODUxMWU0NjctN2FjNjZiNjMtZDliNGU3Y2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:16.227693Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715679. Ctx: { TraceId: 01jz02bpx45fgbqy2ns8qhqwfd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmRlMWY5NjUtODUxMWU0NjctN2FjNjZiNjMtZDliNGU3Y2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-30T09:23:16.227699Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1111:2680] TxId: 281474976715679. Ctx: { TraceId: 01jz02bpx45fgbqy2ns8qhqwfd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmRlMWY5NjUtODUxMWU0NjctN2FjNjZiNjMtZDliNGU3Y2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-30T09:23:16.227711Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [2:1111:2680] TxId: 281474976715679. Ctx: { TraceId: 01jz02bpx45fgbqy2ns8qhqwfd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmRlMWY5NjUtODUxMWU0NjctN2FjNjZiNjMtZDliNGU3Y2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-30T09:23:16.227718Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1111:2680] TxId: 281474976715679. Ctx: { TraceId: 01jz02bpx45fgbqy2ns8qhqwfd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmRlMWY5NjUtODUxMWU0NjctN2FjNjZiNjMtZDliNGU3Y2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-30T09:23:16.227784Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715680. Resolved key sets: 0 2025-06-30T09:23:16.227795Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715681. Resolved key sets: 0 2025-06-30T09:23:16.227811Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jz02bpx543wfq5nnz96xc1rn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjNjM2IwNDEtOWJlYjIyNWQtMTg5OTYyNDYtZjVlNmI0OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:16.227817Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715680. Ctx: { TraceId: 01jz02bpx543wfq5nnz96xc1rn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjNjM2IwNDEtOWJlYjIyNWQtMTg5OTYyNDYtZjVlNmI0OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-30T09:23:16.227824Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1118:2688] TxId: 281474976715680. Ctx: { TraceId: 01jz02bpx543wfq5nnz96xc1rn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjNjM2IwNDEtOWJlYjIyNWQtMTg5OTYyNDYtZjVlNmI0OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-30T09:23:16.227833Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [2:1118:2688] TxId: 281474976715680. Ctx: { TraceId: 01jz02bpx543wfq5nnz96xc1rn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjNjM2IwNDEtOWJlYjIyNWQtMTg5OTYyNDYtZjVlNmI0OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-30T09:23:16.227840Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1118:2688] TxId: 281474976715680. Ctx: { TraceId: 01jz02bpx543wfq5nnz96xc1rn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjNjM2IwNDEtOWJlYjIyNWQtMTg5OTYyNDYtZjVlNmI0OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-30T09:23:16.227849Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715681. Ctx: { TraceId: 01jz02bpx5925jcrr7pn5ed0rj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Y2YyYTc0YzAtMjJiNTViODItZDZkMDUyYTQtODNjYjc2YmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:16.227854Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715681. Ctx: { TraceId: 01jz02bpx5925jcrr7pn5ed0rj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Y2YyYTc0YzAtMjJiNTViODItZDZkMDUyYTQtODNjYjc2YmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-30T09:23:16.227859Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1119:2690] TxId: 281474976715681. Ctx: { TraceId: 01jz02bpx5925jcrr7pn5ed0rj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Y2YyYTc0YzAtMjJiNTViODItZDZkMDUyYTQtODNjYjc2YmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-30T09:23:16.227866Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [2:1119:2690] TxId: 281474976715681. Ctx: { TraceId: 01jz02bpx5925jcrr7pn5ed0rj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Y2YyYTc0YzAtMjJiNTViODItZDZkMDUyYTQtODNjYjc2YmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-30T09:23:16.227872Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1119:2690] TxId: 281474976715681. Ctx: { TraceId: 01jz02bpx5925jcrr7pn5ed0rj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Y2YyYTc0YzAtMjJiNTViODItZDZkMDUyYTQtODNjYjc2YmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 >> TSchemeShardTest::FindSubDomainPathIdActor [GOOD] >> TSchemeShardTest::FindSubDomainPathIdActorAsync >> AnalyzeColumnshard::AnalyzeServerless |82.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> EncryptedBackupParamsValidationTest::NoDestination >> BackupRestore::TestAllPrimitiveTypes-UTF8 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-YSON ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestUnprotectedReadsThenWriteVisibility [GOOD] Test command err: 2025-06-30T09:23:14.080557Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:14.080656Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:14.080723Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:14.080774Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:14.080788Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:23:14.080816Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045c9/r3tmp/tmprDnHjh/pdisk_1.dat 2025-06-30T09:23:14.218481Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:14.310053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:14.401291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:14.401338Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:14.403156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:14.403185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:14.418961Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:23:14.419127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:14.419256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:14.681528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:14.730941Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [2:1185:2342], Recipient [2:1211:2354]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:14.733208Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [2:1185:2342], Recipient [2:1211:2354]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:14.733320Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1211:2354] 2025-06-30T09:23:14.733389Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:23:14.743133Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [2:1185:2342], Recipient [2:1211:2354]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:14.747935Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:23:14.748406Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:23:14.748638Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:23:14.748650Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:23:14.748655Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:23:14.748712Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:23:14.748783Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:23:14.748838Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:1235:2354] in generation 1 2025-06-30T09:23:14.749646Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:23:14.754899Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:23:14.754989Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:23:14.755019Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:1239:2371] 2025-06-30T09:23:14.755025Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:23:14.755030Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:23:14.755035Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:23:14.755127Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:1211:2354], Recipient [2:1211:2354]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:14.755134Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:14.755235Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:23:14.755261Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:23:14.755269Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:23:14.755279Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:23:14.755287Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:23:14.755293Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:23:14.755298Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:23:14.755303Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:23:14.755309Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:23:14.798326Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1243:2372], Recipient [2:1211:2354]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:14.798349Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:14.798358Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1195:2739], serverId# [2:1243:2372], sessionId# [0:0:0] 2025-06-30T09:23:14.798442Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:764:2428], Recipient [2:1243:2372] 2025-06-30T09:23:14.798447Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:23:14.798479Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:23:14.798530Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:23:14.798543Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:23:14.798577Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:23:14.798586Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:23:14.798591Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:23:14.798597Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:23:14.798602Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:23:14.798657Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:23:14.798663Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:23:14.798667Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:23:14.798671Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:23:14.798685Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:23:14.798689Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:23:14.798693Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:23:14.798698Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:23:14.798703Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:23:14.801408Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152 ... } } 2025-06-30T09:23:16.230326Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz02bq3g8k42t73q056c2m03, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDk1YTZlNGMtYzY3YjAwYjEtZTg2NDg0M2YtYzMxNTg2MmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:16.231456Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [2:1652:2413], Recipient [2:1211:2354]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2000 TxId: 18446744073709551615 } LockTxId: 281474976715662 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2025-06-30T09:23:16.231584Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-30T09:23:16.231610Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-30T09:23:16.231635Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:23:16.231642Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:23:16.231648Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:23:16.231654Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:23:16.231668Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-30T09:23:16.231676Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:23:16.231680Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:23:16.231686Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:23:16.231692Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:23:16.231719Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2000 TxId: 18446744073709551615 } LockTxId: 281474976715662 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-30T09:23:16.231783Z node 2 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715662, counter# 18446744073709551612 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-30T09:23:16.231794Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-30T09:23:16.231802Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[2:1652:2413], 0} after executionsCount# 1 2025-06-30T09:23:16.231813Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[2:1652:2413], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:23:16.231834Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[2:1652:2413], 0} finished in read 2025-06-30T09:23:16.231847Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:23:16.231856Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:23:16.231861Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:23:16.231867Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:23:16.231877Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-30T09:23:16.231881Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:23:16.231887Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-30T09:23:16.231893Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-30T09:23:16.231917Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-30T09:23:16.232203Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:1652:2413], Recipient [2:1211:2354]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-30T09:23:16.232214Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } } 2025-06-30T09:23:16.270276Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jz02bq4c9rwmma6qhwjcc4a6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFlZDIyYjctY2NiZThiNmMtZDljNTkzYzUtYTVlZWYxMTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:16.271187Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [2:1676:2414], Recipient [2:1211:2354]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } LockTxId: 281474976715666 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2025-06-30T09:23:16.271277Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-30T09:23:16.271302Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CheckRead 2025-06-30T09:23:16.271328Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-30T09:23:16.271334Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CheckRead 2025-06-30T09:23:16.271340Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-30T09:23:16.271345Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-30T09:23:16.271358Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:8] at 72075186224037888 2025-06-30T09:23:16.271366Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-30T09:23:16.271370Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-30T09:23:16.271376Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit ExecuteRead 2025-06-30T09:23:16.271385Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit ExecuteRead 2025-06-30T09:23:16.271410Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } LockTxId: 281474976715666 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-30T09:23:16.271533Z node 2 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715666, counter# 1 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-30T09:23:16.271543Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v2500/18446744073709551615 2025-06-30T09:23:16.271551Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[2:1676:2414], 0} after executionsCount# 1 2025-06-30T09:23:16.271561Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[2:1676:2414], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-30T09:23:16.271582Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[2:1676:2414], 0} finished in read 2025-06-30T09:23:16.271600Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-30T09:23:16.271605Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit ExecuteRead 2025-06-30T09:23:16.271610Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit CompletedOperations 2025-06-30T09:23:16.271615Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CompletedOperations 2025-06-30T09:23:16.271626Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-30T09:23:16.271630Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CompletedOperations 2025-06-30T09:23:16.271635Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 72075186224037888 has finished 2025-06-30T09:23:16.271642Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-30T09:23:16.271668Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-30T09:23:16.271958Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:1676:2414], Recipient [2:1211:2354]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-30T09:23:16.271969Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-30T09:23:16.272156Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [2:249:2134], Recipient [2:1211:2354]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715666 LockNode: 1 Status: STATUS_SUBSCRIBED { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline_oo8_dirty [GOOD] Test command err: 2025-06-30T09:23:13.613939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:23:13.613969Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:13.614560Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:13.618343Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:13.618497Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:136:2157] 2025-06-30T09:23:13.618550Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:23:13.629352Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:13.632761Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:23:13.632799Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:23:13.632994Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-30T09:23:13.633005Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-30T09:23:13.633014Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-30T09:23:13.633079Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:23:13.633092Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:23:13.633108Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:205:2157] in generation 2 2025-06-30T09:23:13.667052Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:23:13.675564Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-30T09:23:13.675671Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:23:13.675707Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:220:2216] 2025-06-30T09:23:13.675714Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-30T09:23:13.675721Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-30T09:23:13.675729Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:23:13.675784Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:13.675795Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:13.675889Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-30T09:23:13.675924Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-30T09:23:13.675964Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:23:13.675974Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:23:13.675985Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-30T09:23:13.675992Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-30T09:23:13.675999Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-30T09:23:13.676006Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-30T09:23:13.676014Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:23:13.676030Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:216:2213], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:13.676037Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:13.676046Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:214:2212], serverId# [1:216:2213], sessionId# [0:0:0] 2025-06-30T09:23:13.676716Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:136:2157]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\001J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-30T09:23:13.676737Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:23:13.676758Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:23:13.676808Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-30T09:23:13.676824Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-30T09:23:13.676840Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-30T09:23:13.676851Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-30T09:23:13.676857Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-30T09:23:13.676864Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-30T09:23:13.676870Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:23:13.676955Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-30T09:23:13.676960Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-30T09:23:13.676965Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-30T09:23:13.676969Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:23:13.676982Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-30T09:23:13.676988Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-30T09:23:13.676994Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-30T09:23:13.676999Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-30T09:23:13.677005Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-30T09:23:13.688259Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-30T09:23:13.688298Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:23:13.688307Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:23:13.688323Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-30T09:23:13.688342Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-30T09:23:13.688522Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:226:2222], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:13.688534Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:13.688546Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:225:2221], serverId# [1:226:2222], sessionId# [0:0:0] 2025-06-30T09:23:13.688576Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:136:2157]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-30T09:23:13.688582Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-30T09:23:13.688640Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-30T09:23:13.688651Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-30T09:23:13.688662Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-30T09:23:13.688669Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-30T09:23:13.689618Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-30T09:23:13.689646Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:23:13.689759Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:13.689769Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:13.689783Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:23:13.689796Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:23:13.689803Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-30T09:23:13.689814Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-30T09:23:13.689822Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 3:2402], Recipient [1:240:2231]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-30T09:23:16.561457Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.561461Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-30T09:23:16.561476Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:240:2231]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-30T09:23:16.561481Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.561485Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-06-30T09:23:16.561499Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:240:2231]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-30T09:23:16.561503Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.561508Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-30T09:23:16.561523Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:240:2231]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-30T09:23:16.561527Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.561532Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-30T09:23:16.561545Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:240:2231]: {TEvReadSet step# 1000005 txid# 122 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 40} 2025-06-30T09:23:16.561550Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.561554Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 122 2025-06-30T09:23:16.561569Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:240:2231]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-30T09:23:16.561573Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.561578Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 2025-06-30T09:23:16.561594Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:240:2231]: {TEvReadSet step# 1000005 txid# 125 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 41} 2025-06-30T09:23:16.561598Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.561604Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 125 2025-06-30T09:23:16.561617Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:240:2231]: {TEvReadSet step# 1000005 txid# 128 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 42} 2025-06-30T09:23:16.561622Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.561626Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 128 2025-06-30T09:23:16.561638Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:240:2231]: {TEvReadSet step# 1000005 txid# 131 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 43} 2025-06-30T09:23:16.561645Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.561650Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 131 2025-06-30T09:23:16.561673Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:240:2231]: {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2025-06-30T09:23:16.561679Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.561684Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2025-06-30T09:23:16.561725Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:23:16.561733Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:149] at 9437184 on unit CompleteOperation 2025-06-30T09:23:16.561744Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 149] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 2 ms 2025-06-30T09:23:16.561753Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-30T09:23:16.561760Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:23:16.561795Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-30T09:23:16.561802Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-30T09:23:16.561807Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-30T09:23:16.561812Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:23:16.561817Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:151] at 9437184 on unit CompleteOperation 2025-06-30T09:23:16.561826Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 151] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 2 ms 2025-06-30T09:23:16.561833Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-30T09:23:16.561838Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:23:16.561867Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:23:16.561872Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437184 on unit CompleteOperation 2025-06-30T09:23:16.561880Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 2 ms 2025-06-30T09:23:16.561887Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-30T09:23:16.561892Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:23:16.561916Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:23:16.561921Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:154] at 9437184 on unit CompleteOperation 2025-06-30T09:23:16.561929Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 154] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 2 ms 2025-06-30T09:23:16.561936Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-30T09:23:16.561941Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:23:16.561981Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:240:2231], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-30T09:23:16.561985Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.561991Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 149 2025-06-30T09:23:16.562007Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:240:2231], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-30T09:23:16.562011Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.562016Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 151 2025-06-30T09:23:16.562032Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:240:2231], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-30T09:23:16.562037Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.562041Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2025-06-30T09:23:16.562056Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:240:2231], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-30T09:23:16.562061Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:16.562066Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed-EvWrite [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-FLOAT |82.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> BackupRestoreS3::RestoreViewQueryText [GOOD] >> BackupRestoreS3::RestoreViewReferenceTable >> EncryptedExportTest::EncryptionAndChecksum [GOOD] >> BackupPathTest::EncryptedExportWithExplicitObjectList [GOOD] >> BackupRestore::BackupUuid [GOOD] >> BackupRestore::RestoreViewQueryText >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLockOutOfOrder [GOOD] >> EncryptedBackupParamsValidationTest::NoDestination [GOOD] >> BackupRestore::TestAllPrimitiveTypes-FLOAT [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DOUBLE ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed-EvWrite [GOOD] Test command err: 2025-06-30T09:23:13.699976Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:13.700074Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:23:13.700094Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045c1/r3tmp/tmpYDKxbg/pdisk_1.dat 2025-06-30T09:23:13.812629Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:23:13.813732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:13.832756Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:13.833942Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275393176628 != 1751275393176632 2025-06-30T09:23:13.878260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:13.878299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:13.890010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:13.968341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:14.225104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:14.326905Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ===== UPSERT initial rows 2025-06-30T09:23:14.515343Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:785:2637], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:14.515426Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:795:2642], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:14.515477Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:14.517084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:14.677364Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:799:2645], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:23:14.716909Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:855:2682] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:14.784356Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02bne7fymdfe5rvn18a7kh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGJjMzdkNzEtNDAzOGUyOTEtMmRhNzY0YzYtZWRhMGYwZTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:14.811621Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02bnqf5pc3q4dtpp7wywrj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDkxNmM0YWMtNjMxY2NmYjgtZmU5OTZiYjctNDIwYWM2NjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ===== Begin SELECT 2025-06-30T09:23:14.902042Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz02bnrf5h7ccqh1cktdhcj6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzdhODlhYzktNTI5OTFmNzctZmZlZDMyZmUtMzBmN2Q0ZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 1 } } ===== UPSERT and commit ... waiting for commit read sets 2025-06-30T09:23:14.932798Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jz02bntwbq80ksw1yqtvr4g6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzdhODlhYzktNTI5OTFmNzctZmZlZDMyZmUtMzBmN2Q0ZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... captured readset ... captured readset ===== restarting tablet 2025-06-30T09:23:15.103721Z node 1 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1103: SelfId: [1:982:2723], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [1:919:2723]TEvDeliveryProblem was received from tablet: 72075186224037888 ===== Waiting for commit response ===== Last SELECT 2025-06-30T09:23:15.241440Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz02bp459bbj2b5g9nnfddm0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGI0ODU2ZTgtMzEwN2Q3Mi1iZmU4MTk4Yy04ZjA4MTQ3Mg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 3 } items { uint32_value: 2 } } 2025-06-30T09:23:15.999658Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:272:2315], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:15.999756Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:15.999769Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045c1/r3tmp/tmpWEm0zC/pdisk_1.dat 2025-06-30T09:23:16.097934Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-30T09:23:16.098447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:16.114230Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:16.120183Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1751275395573651 != 1751275395573654 2025-06-30T09:23:16.165981Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:16.166035Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:16.176890Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:16.254261Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:16.464926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:16.563342Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ===== UPSERT initial rows 2025-06-30T09:23:16.723296Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:784:2636], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:16.723323Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:794:2641], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:16.723335Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:16.733476Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:16.888108Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:798:2644], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:23:16.923939Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:854:2681] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:16.976669Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02bqkjbt723wa0pfm1h8qa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjQzZGYzYTQtZjI2YzkxMzItZjc3YmVmNzUtMTZmYTZlYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:17.011413Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02bqw0aj5n33y46aqp5f4a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWY1ODhlYmMtYjJhNDg0M2EtMTU0MjBkMTItYTFkY2NmOTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ===== Begin SELECT 2025-06-30T09:23:17.131272Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz02bqx2a6nxjq0gxy834aa5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTUzYzhmNzMtOTNmMjY3ZTEtMzBiNjNmMmUtZGQ2Mzg2NWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 1 } } ===== UPSERT and commit ... waiting for commit read sets 2025-06-30T09:23:17.151426Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jz02br0e9h5sx2d16enahvmr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTUzYzhmNzMtOTNmMjY3ZTEtMzBiNjNmMmUtZGQ2Mzg2NWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... captured readset ... captured readset ===== restarting tablet ===== Waiting for commit response ===== Last SELECT 2025-06-30T09:23:17.442679Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz02br9b42adgrp64390mjpv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzM2OWNhMGUtMTM3MzcwN2MtNzk1ZjlmMzQtMjZmODU4ZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 3 } items { uint32_value: 2 } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLockOutOfOrder [GOOD] Test command err: 2025-06-30T09:23:13.588513Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:13.588637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:23:13.588660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045bd/r3tmp/tmpli3ht4/pdisk_1.dat 2025-06-30T09:23:13.702722Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:23:13.703697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:13.722288Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:13.723508Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275393133593 != 1751275393133597 2025-06-30T09:23:13.771460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:13.771498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:13.783412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:13.865151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:14.098631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:14.213511Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:14.404746Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:785:2637], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:14.404792Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:795:2642], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:14.404813Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:14.406369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:14.572927Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:799:2645], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:23:14.628228Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:855:2682] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:14.749305Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02bnb4fekfv541jppkkm5k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTRiMzRiYWUtMzZmNGYwYWEtNTBmOTYzNjctZDg4MmRiMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:14.783463Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02bnpc9pac604dfrj1tm42, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTFjNTI0NDgtZjg3NGQyNDMtN2ZlZjg0OWItNzQ4NTA4ZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:15.068036Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz02bnxj7pndj8tqez2x5xxj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2E4ODMzZjItYjcwY2QwOGYtY2FhYWM5YjctNjBmYWU3OGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } 2025-06-30T09:23:15.350497Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jz02bp88czyrakpet2yhrxqp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTQ2NWZiMGEtNmYwMTBmMDAtOTAzYThkY2ItZTgzMmVmODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:15.392304Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz02bp9c8f9zpz3n3y7ejk7r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2E4ODMzZjItYjcwY2QwOGYtY2FhYWM5YjctNjBmYWU3OGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:15.413604Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jz02bpa4461fy6xsswf83zr1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2E4ODMzZjItYjcwY2QwOGYtY2FhYWM5YjctNjBmYWU3OGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:15.429553Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=M2E4ODMzZjItYjcwY2QwOGYtY2FhYWM5YjctNjBmYWU3OGY=, ActorId: [1:919:2733], ActorState: ExecuteState, TraceId: 01jz02bpapdtc87k09ee1wfby4, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken 2025-06-30T09:23:15.440363Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jz02bpapdtc87k09ee1wfby4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2E4ODMzZjItYjcwY2QwOGYtY2FhYWM5YjctNjBmYWU3OGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:16.153206Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:272:2315], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:16.153292Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:16.153302Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045bd/r3tmp/tmphqMv8j/pdisk_1.dat 2025-06-30T09:23:16.264731Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-30T09:23:16.265125Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:16.280892Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:16.281439Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1751275395681823 != 1751275395681826 2025-06-30T09:23:16.324925Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:16.324971Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:16.335891Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:16.412882Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:16.629935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:16.731100Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:16.913323Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:784:2636], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:16.913353Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:794:2641], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:16.913366Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:16.914484Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:17.081214Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:798:2644], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:23:17.119884Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:854:2681] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:17.131535Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02bqshfjvrn9xyg3zdh3d6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzhmYjNiODYtYmVkNDU0ODYtNThiMjZjYzQtMWM4Njg4Y2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:17.153870Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02br0rbpnqfas7efhqgtjn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWQ3YTBjMWItYTE0NjVlNS1jNTU3ZjZjZS0xNWNkZTA2ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... performing the first select 2025-06-30T09:23:17.408418Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz02br6sfn07220bq8chj4k6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTA2NTZjOWItYThkODNkNGEtMjdhNWZkYjUtOGY3ZDZmN2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } 2025-06-30T09:23:17.469983Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jz02br9487h039mzc5e9qsd5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjRkMzZkY2EtZWM0OTIxZmEtNjdhMzNjNy1hNTc4YjJlZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } ... waiting for commit read sets 2025-06-30T09:23:17.488595Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz02brb18td9nq5pq6kyxq5w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjRkMzZkY2EtZWM0OTIxZmEtNjdhMzNjNy1hNTc4YjJlZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... captured readset ... captured readset ... performing an upsert 2025-06-30T09:23:17.829782Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jz02brnk3sjc9vb4d77mep1y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTNkYWI2ZC0xZGZhZDAwLWIyYzkzNmI2LWI1ZjQ2NmYw, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... performing the second select 2025-06-30T09:23:17.856137Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jz02brph3ve5xw27b0h82d1v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTA2NTZjOWItYThkODNkNGEtMjdhNWZkYjUtOGY3ZDZmN2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... performing the third select 2025-06-30T09:23:17.872734Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jz02brq1cvm88e8jbvf19thk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTA2NTZjOWItYThkODNkNGEtMjdhNWZkYjUtOGY3ZDZmN2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... performing the last upsert and commit 2025-06-30T09:23:17.882808Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=2&id=MTA2NTZjOWItYThkODNkNGEtMjdhNWZkYjUtOGY3ZDZmN2M=, ActorId: [2:925:2729], ActorState: ExecuteState, TraceId: 01jz02brqh3sced761gs2kcnqb, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken >> TSchemeShardTest::FindSubDomainPathIdActorAsync [GOOD] >> EncryptedExportTest::EncryptionChecksumAndCompression >> AnalyzeColumnshard::AnalyzeStatus >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeSave >> ReadIteratorExternalBlobs::NotExtBlobs [GOOD] >> BackupPathTest::ExportCommonSourcePathImportExplicitly >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed-EvWrite [GOOD] >> KqpErrors::ProposeError >> DataShardOutOfOrder::UncommittedReadSetAck [GOOD] >> EncryptedBackupParamsValidationTest::NoItemDestination >> KqpErrors::ProposeResultLost_RwTx+UseSink >> KqpProxy::NoLocalSessionExecution |82.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/partition_stats/ut/unittest |82.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut >> BackupRestore::TestAllPrimitiveTypes-YSON [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UUID >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites-EvWrite [GOOD] |82.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |82.7%| [LD] {RESULT} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed-EvWrite [GOOD] Test command err: 2025-06-30T09:23:15.326197Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:15.326286Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:23:15.326303Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004577/r3tmp/tmp2TUfrl/pdisk_1.dat 2025-06-30T09:23:15.432343Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:23:15.433229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:15.450385Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:15.451595Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275394906063 != 1751275394906067 2025-06-30T09:23:15.494144Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:15.494184Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:15.504830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:15.579208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:15.609731Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:15.610112Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:15.610253Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:23:15.610361Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:23:15.629021Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:15.629325Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:23:15.629360Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:23:15.629607Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:23:15.629620Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:23:15.629629Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:23:15.629732Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:23:15.629769Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:23:15.629790Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:23:15.643370Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:23:15.649072Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:23:15.649190Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:23:15.649222Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:23:15.649229Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:23:15.649235Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:23:15.649243Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:23:15.649347Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:15.649357Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:15.649492Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:23:15.649525Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:23:15.649539Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:23:15.649548Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:23:15.649558Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:23:15.649564Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:23:15.649570Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:23:15.649576Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:23:15.649582Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:23:15.649608Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:15.649617Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:15.649625Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:23:15.649789Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:23:15.649796Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:23:15.649822Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:23:15.649883Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:23:15.649897Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:23:15.649920Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:23:15.649929Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:23:15.649934Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:23:15.649942Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:23:15.649946Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:23:15.650021Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:23:15.650026Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:23:15.650031Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:23:15.650035Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:23:15.650050Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:23:15.650054Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:23:15.650059Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:23:15.650076Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:23:15.650091Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:23:15.650421Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:23:15.650433Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:23:15.663035Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:23:15.663069Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:23:15.663079Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:23:15.663094Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... " NodeId: 2 StartTimeMs: 1751275398440 CreateTimeMs: 1751275398439 UpdateTimeMs: 1751275398441 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:18.441728Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:996:2792] 2025-06-30T09:23:18.441738Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:999:2795], CA [2:1000:2796], CA [2:997:2793], CA [2:1001:2797], CA [2:998:2794], 2025-06-30T09:23:18.441745Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 5 compute actor(s) and 0 datashard(s): CA [2:999:2795], CA [2:1000:2796], CA [2:997:2793], CA [2:1001:2797], CA [2:998:2794], 2025-06-30T09:23:18.441855Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:997:2793], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 264 DurationUs: 1000 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 212 FinishTimeMs: 1751275398441 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 182 BuildCpuTimeUs: 30 HostName: "ghrun-x4qzxsyz2q" NodeId: 2 StartTimeMs: 1751275398440 CreateTimeMs: 1751275398439 UpdateTimeMs: 1751275398441 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:18.441866Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:997:2793] 2025-06-30T09:23:18.441873Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:999:2795], CA [2:1000:2796], CA [2:1001:2797], CA [2:998:2794], 2025-06-30T09:23:18.441878Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 4 compute actor(s) and 0 datashard(s): CA [2:999:2795], CA [2:1000:2796], CA [2:1001:2797], CA [2:998:2794], 2025-06-30T09:23:18.441927Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:998:2794], task: 4, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 121 Tasks { TaskId: 4 StageId: 3 CpuTimeUs: 76 FinishTimeMs: 1751275398441 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 67 BuildCpuTimeUs: 9 HostName: "ghrun-x4qzxsyz2q" NodeId: 2 StartTimeMs: 1751275398441 CreateTimeMs: 1751275398439 UpdateTimeMs: 1751275398441 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:18.441935Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:998:2794] 2025-06-30T09:23:18.441941Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:999:2795], CA [2:1000:2796], CA [2:1001:2797], 2025-06-30T09:23:18.441947Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 3 compute actor(s) and 0 datashard(s): CA [2:999:2795], CA [2:1000:2796], CA [2:1001:2797], 2025-06-30T09:23:18.441963Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:999:2795], task: 5, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 175 Tasks { TaskId: 5 StageId: 4 CpuTimeUs: 113 FinishTimeMs: 1751275398441 InputRows: 2 InputBytes: 10 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 89 BuildCpuTimeUs: 24 HostName: "ghrun-x4qzxsyz2q" NodeId: 2 StartTimeMs: 1751275398441 CreateTimeMs: 1751275398439 UpdateTimeMs: 1751275398441 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:18.441971Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:999:2795] 2025-06-30T09:23:18.441976Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1000:2796], CA [2:1001:2797], 2025-06-30T09:23:18.441983Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [2:1000:2796], CA [2:1001:2797], 2025-06-30T09:23:18.442053Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1000:2796], task: 6, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 89 Tasks { TaskId: 6 StageId: 5 CpuTimeUs: 45 FinishTimeMs: 1751275398441 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 27 BuildCpuTimeUs: 18 HostName: "ghrun-x4qzxsyz2q" NodeId: 2 CreateTimeMs: 1751275398439 UpdateTimeMs: 1751275398441 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:18.442062Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1000:2796] 2025-06-30T09:23:18.442067Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1001:2797], 2025-06-30T09:23:18.442072Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:1001:2797], 2025-06-30T09:23:18.442125Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1001:2797], task: 7, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 86 DurationUs: 1000 Tasks { TaskId: 7 StageId: 6 CpuTimeUs: 37 FinishTimeMs: 1751275398442 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ResultRows: 2 ResultBytes: 7 ComputeCpuTimeUs: 24 BuildCpuTimeUs: 13 HostName: "ghrun-x4qzxsyz2q" NodeId: 2 StartTimeMs: 1751275398441 CreateTimeMs: 1751275398439 UpdateTimeMs: 1751275398442 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:18.442133Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1001:2797] 2025-06-30T09:23:18.442186Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-30T09:23:18.442196Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:989:2773] TxId: 281474976715665. Ctx: { TraceId: 01jz02bs79e0q23b0msspqjyqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzQ2MDkzODYtMjEwNGM2ZjItOTYwOTRlMzAtMmU0MjgyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.001230s ReadRows: 2 ReadBytes: 16 ru: 2 rate limiter was not found force flag: 1 { items { uint32_value: 3 } items { uint32_value: 2 } }, { items { uint32_value: 4 } items { uint32_value: 2 } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::UncommittedReadSetAck [GOOD] Test command err: 2025-06-30T09:23:14.863633Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:14.863710Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:14.863768Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:14.863808Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:14.863818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:23:14.863842Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004596/r3tmp/tmpPQ5UYq/pdisk_1.dat 2025-06-30T09:23:14.963943Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:15.062890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:15.157944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:15.157998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:15.164261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:15.164307Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:15.187530Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:23:15.187682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:15.187844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:15.493787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:15.556080Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [2:1186:2342], Recipient [2:1211:2354]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:15.559048Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [2:1186:2342], Recipient [2:1211:2354]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:15.559240Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1211:2354] 2025-06-30T09:23:15.559328Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:23:15.568520Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [2:1186:2342], Recipient [2:1211:2354]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:15.570537Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:23:15.570593Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:23:15.570862Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:23:15.570875Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:23:15.570884Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:23:15.570954Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:23:15.570994Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:23:15.571007Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:1235:2354] in generation 1 2025-06-30T09:23:15.572694Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:23:15.578454Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:23:15.578550Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:23:15.578581Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:1239:2371] 2025-06-30T09:23:15.578587Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:23:15.578593Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:23:15.578599Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:23:15.578698Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:1211:2354], Recipient [2:1211:2354]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:15.578704Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:15.578826Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:23:15.578855Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:23:15.578868Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:23:15.578876Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:23:15.578884Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:23:15.578890Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:23:15.578897Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:23:15.578901Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:23:15.578905Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:23:15.623146Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1243:2372], Recipient [2:1211:2354]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:15.623170Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:15.623178Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1195:2739], serverId# [2:1243:2372], sessionId# [0:0:0] 2025-06-30T09:23:15.623258Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:764:2428], Recipient [2:1243:2372] 2025-06-30T09:23:15.623263Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:23:15.623292Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:23:15.623344Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:23:15.623356Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:23:15.623385Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:23:15.623392Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:23:15.623395Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:23:15.623399Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:23:15.623403Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:23:15.623450Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:23:15.623453Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:23:15.623456Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:23:15.623459Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:23:15.623469Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:23:15.623473Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:23:15.623478Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:23:15.623483Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:23:15.623493Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:23:15.627324Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152 ... 224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-30T09:23:18.222117Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:23:18.222124Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715667 2025-06-30T09:23:18.222156Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2539 txid# 281474976715667 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-30T09:23:18.222367Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:23:18.222448Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [2:1992:2468], Recipient [2:2120:2498]: {TEvReadSet step# 2690 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-30T09:23:18.222455Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:23:18.222459Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715669 2025-06-30T09:23:18.222467Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2690 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-30T09:23:18.222514Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [2:1992:2468], Recipient [2:2120:2498]: {TEvReadSet step# 2690 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-30T09:23:18.222519Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:23:18.222523Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715669 2025-06-30T09:23:18.222529Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2690 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-30T09:23:18.222564Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [1:2052:3243], Recipient [2:2248:2539] 2025-06-30T09:23:18.222568Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:23:18.222955Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715668 2025-06-30T09:23:18.222974Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2540 txid# 281474976715668 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-30T09:23:18.223140Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:23:18.223361Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [1:2052:3243], Recipient [2:2248:2539] 2025-06-30T09:23:18.223369Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:23:18.223376Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715669 2025-06-30T09:23:18.223387Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2690 txid# 281474976715669 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-30T09:23:18.223420Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [1:2052:3243], Recipient [2:2248:2539] 2025-06-30T09:23:18.223426Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:23:18.223432Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715669 2025-06-30T09:23:18.223439Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2690 txid# 281474976715669 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-30T09:23:18.223464Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:755: Complete volatile write [2690 : 281474976715669] from 72075186224037890 at tablet 72075186224037890 send result to client [1:2204:3308] 2025-06-30T09:23:18.223517Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:23:18.225734Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-30T09:23:18.226077Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:2120:2498], Recipient [2:1992:2468]: {TEvReadSet step# 2539 txid# 281474976715667 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletConsumer# 72075186224037890 Flags# 0 Seqno# 1} 2025-06-30T09:23:18.226089Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:18.226098Z node 2 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715667 2025-06-30T09:23:18.226655Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-30T09:23:18.226695Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Reply at 72075186224037890 {TEvReadSet step# 2690 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-30T09:23:18.226708Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-30T09:23:18.227107Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [2:2120:2498], Recipient [2:1992:2468]: {TEvReadSet step# 2690 txid# 281474976715669 TabletSource# 72075186224037890 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-30T09:23:18.227120Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:23:18.227127Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037888 source 72075186224037890 dest 72075186224037888 producer 72075186224037890 txId 281474976715669 2025-06-30T09:23:18.227152Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037888 got read set: {TEvReadSet step# 2690 txid# 281474976715669 TabletSource# 72075186224037890 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-30T09:23:18.227162Z node 2 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 2690:281474976715669 at 72075186224037888 2025-06-30T09:23:18.227184Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-30T09:23:18.227213Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-30T09:23:18.227339Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:2120:2498], Recipient [1:2052:3243] 2025-06-30T09:23:18.227349Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:18.227360Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715668 2025-06-30T09:23:18.227652Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-30T09:23:18.227664Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Reply at 72075186224037890 {TEvReadSet step# 2690 txid# 281474976715669 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-30T09:23:18.227697Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-30T09:23:18.227728Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:2120:2498], Recipient [2:1992:2468]: {TEvReadSet step# 2690 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletConsumer# 72075186224037890 Flags# 0 Seqno# 3} 2025-06-30T09:23:18.227734Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:18.227740Z node 2 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715669 2025-06-30T09:23:18.227789Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [2:2120:2498], Recipient [1:2052:3243] 2025-06-30T09:23:18.227795Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-30T09:23:18.227805Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037890 dest 72075186224037889 producer 72075186224037890 txId 281474976715669 2025-06-30T09:23:18.227820Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 2690 txid# 281474976715669 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-30T09:23:18.227826Z node 1 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 2690:281474976715669 at 72075186224037889 2025-06-30T09:23:18.227837Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-30T09:23:18.227872Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:2120:2498], Recipient [1:2052:3243] 2025-06-30T09:23:18.227877Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-30T09:23:18.227884Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715669 >> KqpErrors::ResolveTableError ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::FindSubDomainPathIdActorAsync [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:44.759578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:44.759597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.759601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:44.759604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:44.759608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:44.759610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:44.759616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.766283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:44.766419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:44.766496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:44.785474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:44.785500Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:44.788592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:44.788687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:44.788727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:44.791581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:44.791689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:44.791793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.791854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:44.801589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.801695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:44.802083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:44.802140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:44.802147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:44.802168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.803879Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:44.829623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:44.829735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.829803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:44.829814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:44.829882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:44.829902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:44.830728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.830793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:44.830842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.830853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:44.830859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:44.830866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:44.831468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.831485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:44.831496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:44.831994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.832009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.832015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.832023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:44.832847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:44.833384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:44.833419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:44.833613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.833752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:44.833764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.833804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:44.833820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:44.834375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.834386Z node 1 :FLAT_TX_SCHEMESHARD ... Id: 3], 2 2025-06-30T09:23:17.652426Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:23:17.652443Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:23:17.652449Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:23:17.652456Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-30T09:23:17.652462Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:23:17.652711Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:23:17.652727Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:23:17.652733Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:23:17.652737Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:23:17.652742Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-30T09:23:17.652755Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-30T09:23:17.653962Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:23:17.654070Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-30T09:23:17.656910Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-30T09:23:17.656929Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-30T09:23:17.657025Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-30T09:23:17.657047Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:23:17.657052Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [15:514:2465] TestWaitNotification: OK eventTxId 102 2025-06-30T09:23:17.657131Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomenA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:23:17.657196Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomenA" took 85us result status StatusSuccess 2025-06-30T09:23:17.657288Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomenA" PathDescription { Self { Name: "SubDomenA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ChildrenExist: false BalancerTabletID: 72075186233409547 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 247 AccountSize: 247 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:17.657337Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomenA/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:23:17.657354Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomenA/Topic1" took 19us result status StatusSuccess 2025-06-30T09:23:17.657418Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomenA/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 247 AccountSize: 247 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:18.209117Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:18.209233Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 3 took 134us result status StatusSuccess 2025-06-30T09:23:18.209432Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomenA/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 247 AccountSize: 247 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:18.292421Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__find_subdomain_path_id.cpp:20: FindTabletSubDomainPathId for tablet 72075186233409546 >> BackupRestore::RestoreViewQueryText [GOOD] >> BackupRestore::RestoreViewReferenceTable >> TableCreation::ConcurrentTableCreationWithDifferentVersions >> BackupRestoreS3::TestAllPrimitiveTypes-FLOAT [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites-EvWrite [GOOD] Test command err: 2025-06-30T09:23:15.469795Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:15.469906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:23:15.469927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004586/r3tmp/tmpkFGu26/pdisk_1.dat 2025-06-30T09:23:15.583070Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:23:15.583909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:15.600563Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:15.601408Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275395040250 != 1751275395040254 2025-06-30T09:23:15.643317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:15.643350Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:15.653865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:15.731490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:15.748267Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:15.748446Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:15.748522Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:23:15.748591Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:23:15.756464Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:15.756636Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:23:15.756661Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:23:15.756838Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:23:15.756848Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:23:15.756855Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:23:15.756910Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:23:15.756927Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:23:15.756936Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:23:15.767217Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:23:15.772990Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:23:15.773085Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:23:15.773110Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:23:15.773115Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:23:15.773119Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:23:15.773123Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:23:15.773211Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:15.773221Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:15.773331Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:23:15.773356Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:23:15.773366Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:23:15.773373Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:23:15.773379Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:23:15.773385Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:23:15.773393Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:23:15.773401Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:23:15.773407Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:23:15.773435Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:15.773440Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:15.773448Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:23:15.773569Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:23:15.773575Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:23:15.773596Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:23:15.773640Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:23:15.773651Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:23:15.773668Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:23:15.773675Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:23:15.773678Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:23:15.773682Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:23:15.773685Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:23:15.773761Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:23:15.773764Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:23:15.773767Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:23:15.773770Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:23:15.773782Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:23:15.773784Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:23:15.773787Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:23:15.773789Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:23:15.773795Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:23:15.774032Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:23:15.774042Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:23:15.784430Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:23:15.784456Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:23:15.784463Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:23:15.784477Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 751275398867 UpdateTimeMs: 1751275398870 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:18.870546Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1033:2825] 2025-06-30T09:23:18.870558Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1036:2828], CA [2:1037:2829], CA [2:1034:2826], CA [2:1038:2830], CA [2:1035:2827], 2025-06-30T09:23:18.870566Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 5 compute actor(s) and 0 datashard(s): CA [2:1036:2828], CA [2:1037:2829], CA [2:1034:2826], CA [2:1038:2830], CA [2:1035:2827], 2025-06-30T09:23:18.870675Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1034:2826], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 250 DurationUs: 1000 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 193 FinishTimeMs: 1751275398870 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 181 BuildCpuTimeUs: 12 HostName: "ghrun-x4qzxsyz2q" NodeId: 2 StartTimeMs: 1751275398869 CreateTimeMs: 1751275398868 UpdateTimeMs: 1751275398870 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:18.870687Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1034:2826] 2025-06-30T09:23:18.870695Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1036:2828], CA [2:1037:2829], CA [2:1038:2830], CA [2:1035:2827], 2025-06-30T09:23:18.870702Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 4 compute actor(s) and 0 datashard(s): CA [2:1036:2828], CA [2:1037:2829], CA [2:1038:2830], CA [2:1035:2827], 2025-06-30T09:23:18.870911Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1035:2827], task: 4, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 122 DurationUs: 1000 Tasks { TaskId: 4 StageId: 3 CpuTimeUs: 81 FinishTimeMs: 1751275398870 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 72 BuildCpuTimeUs: 9 HostName: "ghrun-x4qzxsyz2q" NodeId: 2 StartTimeMs: 1751275398869 CreateTimeMs: 1751275398868 UpdateTimeMs: 1751275398870 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:18.870929Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1035:2827] 2025-06-30T09:23:18.870938Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1036:2828], CA [2:1037:2829], CA [2:1038:2830], 2025-06-30T09:23:18.870945Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 3 compute actor(s) and 0 datashard(s): CA [2:1036:2828], CA [2:1037:2829], CA [2:1038:2830], 2025-06-30T09:23:18.870969Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1036:2828], task: 5, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 180 Tasks { TaskId: 5 StageId: 4 CpuTimeUs: 125 FinishTimeMs: 1751275398870 InputRows: 2 InputBytes: 10 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 103 BuildCpuTimeUs: 22 HostName: "ghrun-x4qzxsyz2q" NodeId: 2 StartTimeMs: 1751275398870 CreateTimeMs: 1751275398868 UpdateTimeMs: 1751275398870 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:18.870980Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1036:2828] 2025-06-30T09:23:18.870986Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1037:2829], CA [2:1038:2830], 2025-06-30T09:23:18.870994Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [2:1037:2829], CA [2:1038:2830], 2025-06-30T09:23:18.871042Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1037:2829], task: 6, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 98 Tasks { TaskId: 6 StageId: 5 CpuTimeUs: 47 FinishTimeMs: 1751275398870 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 30 BuildCpuTimeUs: 17 HostName: "ghrun-x4qzxsyz2q" NodeId: 2 CreateTimeMs: 1751275398868 UpdateTimeMs: 1751275398870 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:18.871052Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1037:2829] 2025-06-30T09:23:18.871059Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1038:2830], 2025-06-30T09:23:18.871064Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:1038:2830], 2025-06-30T09:23:18.871144Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1038:2830], task: 7, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 90 DurationUs: 1000 Tasks { TaskId: 7 StageId: 6 CpuTimeUs: 40 FinishTimeMs: 1751275398871 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ResultRows: 2 ResultBytes: 7 ComputeCpuTimeUs: 27 BuildCpuTimeUs: 13 HostName: "ghrun-x4qzxsyz2q" NodeId: 2 StartTimeMs: 1751275398870 CreateTimeMs: 1751275398868 UpdateTimeMs: 1751275398871 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:18.871154Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1038:2830] 2025-06-30T09:23:18.871214Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-30T09:23:18.871228Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1026:2807] TxId: 281474976715667. Ctx: { TraceId: 01jz02bsm93kqc1zy49jjd8t21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU3YWIzNjctYzMzYTVkMzQtNTVjM2U2ZTgtOTc5ZDYwMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.001157s ReadRows: 2 ReadBytes: 16 ru: 2 rate limiter was not found force flag: 1 { items { uint32_value: 3 } items { uint32_value: 2 } }, { items { uint32_value: 4 } items { uint32_value: 2 } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> ReadIteratorExternalBlobs::NotExtBlobs [GOOD] Test command err: 2025-06-30T09:19:33.541420Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:19:33.541516Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:19:33.541534Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004338/r3tmp/tmptI1pnM/pdisk_1.dat 2025-06-30T09:19:33.656865Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:19:33.657881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:19:33.675991Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:33.677176Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275173053815 != 1751275173053819 2025-06-30T09:19:33.719677Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:33.719726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:33.730366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:33.804336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:33.824291Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:19:33.824577Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:19:33.824693Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:19:33.824773Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:19:33.835446Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:621:2528], Recipient [1:630:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:19:33.835712Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:19:33.835743Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:19:33.835930Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:19:33.835941Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:19:33.835950Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:19:33.836027Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:19:33.836049Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:19:33.836064Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:19:33.846364Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:19:33.849968Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:19:33.850057Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:19:33.850083Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:19:33.850087Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:19:33.850090Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:19:33.850096Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:19:33.850164Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:630:2534], Recipient [1:630:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:33.850170Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:19:33.850261Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:19:33.850281Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:19:33.850290Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:19:33.850298Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:19:33.850304Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-30T09:19:33.850309Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-30T09:19:33.850312Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-30T09:19:33.850317Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:19:33.850321Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:19:33.850337Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:637:2538], Recipient [1:630:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:33.850341Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:19:33.850346Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:19:33.850423Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:375:2369], Recipient [1:637:2538] 2025-06-30T09:19:33.850427Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:19:33.850442Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:19:33.850500Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-30T09:19:33.850514Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:19:33.850533Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:19:33.850542Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-30T09:19:33.850547Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-30T09:19:33.850553Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-30T09:19:33.850557Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:33.850607Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-30T09:19:33.850612Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-30T09:19:33.850616Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-30T09:19:33.850619Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:33.850630Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-30T09:19:33.850634Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-30T09:19:33.850641Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-30T09:19:33.850645Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-30T09:19:33.850650Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-30T09:19:33.850966Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:649:2545], Recipient [1:630:2534]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-30T09:19:33.850976Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:19:33.861298Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:19:33.861330Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-30T09:19:33.861354Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-30T09:19:33.861368Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... thTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:01.398841Z node 18 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz02b8c829vhcp56ccjt55vn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=18&id=Zjk2ODI3ZTEtYTk2N2YxMmQtZWQxYmIyOWUtZTA3MTk0Mjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:02.246777Z node 19 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [19:287:2329], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:02.246858Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:02.246890Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004338/r3tmp/tmpkaCMHs/pdisk_1.dat 2025-06-30T09:23:02.341428Z node 19 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 19 Type# 268639257 2025-06-30T09:23:02.341986Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:02.357631Z node 19 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:02.357940Z node 19 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [19:33:2080] 1751275381654341 != 1751275381654345 2025-06-30T09:23:02.402518Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:02.402577Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:02.413326Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:02.487603Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:02.706438Z node 19 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [19:699:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:02.706477Z node 19 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [19:709:2586], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:02.706490Z node 19 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:02.707762Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:02.752305Z node 19 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:02.862017Z node 19 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [19:713:2589], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:23:02.893959Z node 19 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [19:783:2628] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:03.622724Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz02b9xj2q8v9drpxnryn573, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=Y2UxOTZlZjctZjlmOTdjODAtMzQ5NjZiZjktNzg4YmUzMmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:04.447295Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02batwa9kypae8fd273zr8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=NTNlODM2NjEtZjdjZjg4MWItZDU4YWU3MmMtNjZkNGQ3MjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:05.416073Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02bbmh43p9m2t48pqy60x1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=NDUyMTcxYS0zYWIyZDQ2MC02ZWI0MzU2OS01MTllMjMyNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:06.256740Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz02bcjv9zf67sghc66553v2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=YTkzNzgzOWMtZjM2ODBjMGUtZjA4NWJhNTQtMWJmZjJlMTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:07.062767Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jz02bdd30s66qr3dkhqss369, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=YjVkOTk2MDMtOGYzOGM3N2QtOTdiOTJhOC1mMTkyMWZmZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:08.176834Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz02be6cb30hbt7r77v04c3v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=NTVhOTc0ZDQtNDUyNjA1MmItZDgyYTA0NzctMzc3NDY5NDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:08.939207Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jz02bf97dxqhggngk2y8pmdg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=ZjE4NmQwMzItMjg0YWRhNWQtNzRmYTYwNjUtNjdkY2ZiYmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:09.712694Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jz02bg0w9bncad43nke8mqh6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=MzBmNGJlMDItZDdkY2U2YWMtZDFiNzNkODMtMjg5NWI4ZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:10.681148Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jz02bgs7bbgsnxt1p999e6x4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=NTkyZjEzY2ItMWJkMzYyZWQtMjQ4OTNhMWUtNDZmNTMyMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:11.593511Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jz02bhqh067ger2y6y5b63aq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=MzBmNzg5ZGMtYTIyMWQzYzMtMWQ4NmEwNy1hMTNlNzY1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... waiting for stats after upsert 2025-06-30T09:23:13.751662Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7389: Cannot get console configs 2025-06-30T09:23:13.751704Z node 19 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Captured TEvDataShard::TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 1 Round: 0 TableStats { DataSize: 10487312 RowCount: 10 IndexSize: 0 InMemSize: 10487312 LastAccessTime: 1505 LastUpdateTime: 1505 ImmediateTxCompleted: 10 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 925 Memory: 17425320 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 19 StartTime: 450 TableOwnerId: 72057594046644480 FollowerId: 0 ... waiting for stats after compaction Captured TEvDataShard::TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 1 Round: 1 TableStats { DataSize: 10487312 RowCount: 10 IndexSize: 0 InMemSize: 10487312 LastAccessTime: 1505 LastUpdateTime: 1505 ImmediateTxCompleted: 10 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 20 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 461 Memory: 124804 Storage: 10486554 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 19 StartTime: 450 TableOwnerId: 72057594046644480 FollowerId: 0 Captured TEvDataShard::TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 1 Round: 2 TableStats { DataSize: 10486220 RowCount: 10 IndexSize: 0 InMemSize: 0 LastAccessTime: 1505 LastUpdateTime: 1505 ImmediateTxCompleted: 10 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 20 HasLoanedParts: false Channels { Channel: 1 DataSize: 10486220 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 461 Memory: 124804 Storage: 10486554 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 19 StartTime: 450 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-30T09:23:18.228348Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jz02bs0k2hkpx014d9qes380, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=OTIxY2NkZjctNzE0Yjc2OTktZmVhNWYyN2UtMjhlOGU5Ng==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> EncryptedBackupParamsValidationTest::NoItemDestination [GOOD] >> TableCreation::SimpleTableCreation >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DOUBLE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATE >> BackupPathTest::ExportCommonSourcePathImportExplicitly [GOOD] >> BackupRestoreS3::RestoreViewReferenceTable [GOOD] >> BackupRestoreS3::RestoreViewDependentOnAnotherView >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerless [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldCompactServerless >> EncryptedBackupParamsValidationTest::NoCommonDestination >> KqpProxy::NoLocalSessionExecution [GOOD] >> KqpProxy::NoUserAccessToScriptExecutionsTable >> BackupRestore::TestAllPrimitiveTypes-UUID [GOOD] >> TMaintenanceApiTest::SingleCompositeActionGroup >> BackupPathTest::ImportFilterByPrefix >> TCmsTest::TestSetResetMarkers >> TCmsTest::ActionIssuePartialPermissions >> TableCreation::ConcurrentTableCreationWithDifferentVersions [GOOD] >> TableCreation::ConcurrentUpdateTable >> TableCreation::SimpleTableCreation [GOOD] >> TableCreation::SimpleUpdateTable >> EncryptedExportTest::EncryptionChecksumAndCompression [GOOD] >> TCmsTest::ManageRequestsWrong >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunning [GOOD] >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunningExtSubdomain >> TCmsTest::StateStorageNodesFromOneRing >> EncryptedBackupParamsValidationTest::NoCommonDestination [GOOD] >> TCmsTest::WalleTasks ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other [GOOD] Test command err: 2025-06-30T09:19:16.231604Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521669521922007714:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:16.231735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:19:16.265642Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a1f/r3tmp/tmpqBU8Of/pdisk_1.dat 2025-06-30T09:19:16.292327Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:19:16.292767Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521669521922007613:2080] 1751275156225196 != 1751275156225199 TServer::EnableGrpc on GrpcPort 63744, node 1 2025-06-30T09:19:16.309748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004a1f/r3tmp/yandexA4NHGH.tmp 2025-06-30T09:19:16.309768Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004a1f/r3tmp/yandexA4NHGH.tmp 2025-06-30T09:19:16.309878Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004a1f/r3tmp/yandexA4NHGH.tmp 2025-06-30T09:19:16.309974Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:19:16.316016Z INFO: TTestServer started on Port 2087 GrpcPort 63744 TClient is connected to server localhost:2087 PQClient connected to localhost:63744 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:19:16.354953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:19:16.368227Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:19:16.368270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:19:16.369370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:19:16.369931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:19:16.438074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:19:16.682187Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669521922008383:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:16.682197Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669521922008396:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:16.682242Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:16.683340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:19:16.683745Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521669521922008428:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:16.683789Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:19:16.686295Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521669521922008398:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-30T09:19:16.739103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:16.748331Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521669521922008519:2464] txid# 281474976710664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:19:16.752272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:19:16.763637Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521669521922008536:2315], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:19:16.764327Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZTdkNDA2MDAtZTNkNmM2ODQtOTZjNjI3MzUtZjMzMDNjOGI=, ActorId: [1:7521669521922008381:2296], ActorState: ExecuteState, TraceId: 01jz024d65byz1w3nrvjkax0md, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:19:16.764837Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:19:16.773729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521669521922008745:2611] 2025-06-30T09:19:17.224599Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:19:21.225934Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521669521922007714:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:19:21.225990Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:19:22.108268Z :WriteToTopic_Demo_11_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:19:22.135636Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:19:22.155685Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:19:22.155851Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521669547691812731:2698] connected; active server actors: 1 2025-06-30T09:19:22.155916Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:19:22.156210Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:19:22.156243Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1 ... Q: 72075186224037895, Partition: {0, {1, 281474976715674}, 100000}, State: StateIdle] Init complete for topic 'topic_A' Partition: {0, {1, 281474976715674}, 100000} SourceId: test-message_group_id_3 SeqNo: 1 offset: 1 MaxOffset: 2 2025-06-30T09:23:16.262918Z node 11 :PERSQUEUE DEBUG: partition.cpp:606: [PQ: 72075186224037895, Partition: {0, {1, 281474976715674}, 100000}, State: StateIdle] Init complete for topic 'topic_A' Partition: {0, {1, 281474976715674}, 100000} SourceId: test-message_group_id_1 SeqNo: 1 offset: 0 MaxOffset: 2 2025-06-30T09:23:16.262972Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037895, Partition: {0, {1, 281474976715674}, 100000}, State: StateIdle] no data for compaction 2025-06-30T09:23:16.262984Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:0:Initializer] Start initializing step TInitDataStep 2025-06-30T09:23:16.262987Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-30T09:23:16.262991Z node 11 :PERSQUEUE INFO: partition_init.cpp:895: [topic_A:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:23:16.262992Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic_A:0:Initializer] Initializing completed. 2025-06-30T09:23:16.262997Z node 11 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037895, Partition: 0, State: StateInit] init complete for topic 'topic_A' partition 0 generation 2 [11:7521670555446908021:2468] 2025-06-30T09:23:16.263001Z node 11 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72075186224037895, Partition: 0, State: StateInit] SYNC INIT topic topic_A partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:23:16.263008Z node 11 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72075186224037895, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-30T09:23:16.263022Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037895, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 2 2025-06-30T09:23:16.263025Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037895, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:23:16.263063Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037895, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:23:16.263856Z node 11 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037896][topic_A] TEvClientConnected TabletId 72075186224037895, NodeId 11, Generation 2 2025-06-30T09:23:16.263888Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037895] server connected, pipe [11:7521670555446907995:2444], now have 1 active actors on pipe 2025-06-30T09:23:18.246089Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037894] server connected, pipe [11:7521670564036842696:2902], now have 1 active actors on pipe 2025-06-30T09:23:18.247171Z node 11 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:465: [72075186224037896][topic_A] TEvClientDestroyed 72075186224037894 2025-06-30T09:23:18.253740Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037894] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:23:18.253965Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037894] Registered with mediator time cast 2025-06-30T09:23:18.254087Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037894] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:23:18.254122Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:742: [PQ: 72075186224037894] has a tx info 2025-06-30T09:23:18.254141Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037894] PlanStep 1742296505901, PlanTxId 281474976715673, ExecStep 1742296505901, ExecTxId 281474976715673 2025-06-30T09:23:18.254205Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72075186224037894] Txs.size=0, PlannedTxs.size=0 2025-06-30T09:23:18.254334Z node 11 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:23:18.254338Z node 11 :PERSQUEUE INFO: pq_impl.cpp:788: [PQ: 72075186224037894] has a tx writes info 2025-06-30T09:23:18.254467Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3630: [PQ: 72075186224037894] send TEvSubscribeLock for WriteId {1, 281474976715674} 2025-06-30T09:23:18.254486Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:23:18.254493Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitConfigStep 2025-06-30T09:23:18.254584Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:23:18.254657Z node 11 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateInit] bootstrapping {1, {1, 281474976715674}, 100000} [11:7521670564036842723:2498] 2025-06-30T09:23:18.254936Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitDiskStatusStep 2025-06-30T09:23:18.255266Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-30T09:23:18.255325Z node 11 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037894, Partition: 1, State: StateInit] bootstrapping 1 [11:7521670564036842722:2497] 2025-06-30T09:23:18.255526Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitDiskStatusStep 2025-06-30T09:23:18.255542Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitMetaStep 2025-06-30T09:23:18.255838Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitMetaStep 2025-06-30T09:23:18.255878Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitInfoRangeStep 2025-06-30T09:23:18.256069Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitInfoRangeStep 2025-06-30T09:23:18.256107Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitDataRangeStep 2025-06-30T09:23:18.256169Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: D0000100000_00000000000000000000_00000_0000000001_00000| 2025-06-30T09:23:18.256207Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:624: [topic_A:{1, {1, 281474976715674}, 100000}:TInitDataRangeStep] Got data offset 0 count 1 size 250 so 0 eo 1 D0000100000_00000000000000000000_00000_0000000001_00000| 2025-06-30T09:23:18.256228Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitDataStep 2025-06-30T09:23:18.256382Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:845: [topic_A:{1, {1, 281474976715674}, 100000}:TInitDataStep] read res partition offset 0 endOffset 1 key 0,1 valuesize 250 expected 250 2025-06-30T09:23:18.256397Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-30T09:23:18.256401Z node 11 :PERSQUEUE INFO: partition_init.cpp:895: [topic_A:{1, {1, 281474976715674}, 100000}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:23:18.256405Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Initializing completed. 2025-06-30T09:23:18.256412Z node 11 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateInit] init complete for topic 'topic_A' partition {1, {1, 281474976715674}, 100000} generation 2 [11:7521670564036842723:2498] 2025-06-30T09:23:18.256425Z node 11 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateInit] SYNC INIT topic topic_A partitition {1, {1, 281474976715674}, 100000} so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 SYNC INIT sourceId test-message_group_id_2 seqNo 1 offset 0 2025-06-30T09:23:18.256434Z node 11 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateIdle] Process pending events. Count 0 2025-06-30T09:23:18.256452Z node 11 :PERSQUEUE DEBUG: partition.cpp:606: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateIdle] Init complete for topic 'topic_A' Partition: {1, {1, 281474976715674}, 100000} SourceId: test-message_group_id_2 SeqNo: 1 offset: 0 MaxOffset: 1 2025-06-30T09:23:18.256462Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitDataRangeStep 2025-06-30T09:23:18.256490Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateIdle] no data for compaction 2025-06-30T09:23:18.256563Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitDataStep 2025-06-30T09:23:18.256569Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-30T09:23:18.256572Z node 11 :PERSQUEUE INFO: partition_init.cpp:895: [topic_A:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:23:18.256574Z node 11 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic_A:1:Initializer] Initializing completed. 2025-06-30T09:23:18.256579Z node 11 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037894, Partition: 1, State: StateInit] init complete for topic 'topic_A' partition 1 generation 2 [11:7521670564036842722:2497] 2025-06-30T09:23:18.256583Z node 11 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72075186224037894, Partition: 1, State: StateInit] SYNC INIT topic topic_A partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-30T09:23:18.256592Z node 11 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72075186224037894, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-30T09:23:18.256616Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 1, State: StateIdle] Topic 'topic_A' partition 1 user consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 2 2025-06-30T09:23:18.256620Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 1, State: StateIdle] Topic 'topic_A' partition 1 user test-consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:23:18.256661Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 1, State: StateIdle] no data for compaction 2025-06-30T09:23:18.256824Z node 11 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037896][topic_A] TEvClientConnected TabletId 72075186224037894, NodeId 11, Generation 2 2025-06-30T09:23:18.256845Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037894] server connected, pipe [11:7521670564036842698:2444], now have 1 active actors on pipe ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> BackupRestore::TestAllPrimitiveTypes-UUID [GOOD] Test command err: 2025-06-30T09:23:01.215043Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670491345286531:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:01.215062Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpObiZMo/pdisk_1.dat 2025-06-30T09:23:01.408241Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:01.434559Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 8800, node 1 2025-06-30T09:23:01.454033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:01.454048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:01.454052Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:01.454103Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62090 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:01.524713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:01.539652Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:01.539695Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:01.543576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Backup "/Root" to "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpyeTLLM/"Create temporary directory "/Root/~backup_20250630T092301" in databaseProcess "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpyeTLLM/dir"Create directory "/Root/~backup_20250630T092301/dir" in databaseWrite ACL into "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpyeTLLM/dir/permissions.pb"Remove directory "/Root/~backup_20250630T092301/dir"2025-06-30T09:23:01.666513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Remove temporary directory "/Root/~backup_20250630T092301" in database2025-06-30T09:23:01.675649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Backup completed successfully2025-06-30T09:23:01.684229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Restore "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpyeTLLM/" to "/Root"Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpyeTLLM/"},{"type":"Directory","path":"/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpyeTLLM/dir"}]Process "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpyeTLLM/dir"Restore empty directory "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpyeTLLM/dir" to "/Root/dir"Restore ACL "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpyeTLLM/dir" to "/Root/dir"Read ACL from "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpyeTLLM/dir/permissions.pb"2025-06-30T09:23:01.706329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully 2025-06-30T09:23:02.574125Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521670493814236772:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:02.574350Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpITTmpe/pdisk_1.dat 2025-06-30T09:23:02.623112Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:02.627520Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 15057, node 4 2025-06-30T09:23:02.635176Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:02.635192Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:02.635195Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:02.635233Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7651 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:02.674524Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:02.674560Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:02.679451Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:02.681088Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:02.687040Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:23:03.068492Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670498109205006:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.068522Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.115569Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:03.214867Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670498109205168:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.214918Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.290102Z node 4 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][4:7521670498109205345:2318] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:4:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-30 ... ode 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:23:18.556528Z node 28 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:18.574545Z node 28 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jz02bscqcm9htmstph6ak4dn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTA0M2VhZTEtNTYyMGI4MGUtZGVmYjg4ZDAtYmVlODA2MjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmp0aAPAc/pdisk_1.dat 2025-06-30T09:23:19.314648Z node 31 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[31:7521670568277594395:2242];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:19.314839Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:23:19.347279Z node 31 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24728, node 31 2025-06-30T09:23:19.363890Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:19.363905Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:19.363908Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:19.363958Z node 31 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6292 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:23:19.419381Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:19.419415Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:19.421508Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:19.421613Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:19.884823Z node 31 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [31:7521670568277595161:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:19.884852Z node 31 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:19.885010Z node 31 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [31:7521670568277595173:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:19.885974Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:19.900158Z node 31 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [31:7521670568277595175:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:23:19.996064Z node 31 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [31:7521670568277595248:2648] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:20.001332Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:20.044855Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02btv13mk2d6j8frj27sxm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=OTBiYWJmMTMtNjMyY2E0NzktYzkxYTkwNTYtODY3NTg2YWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:20.070646Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02btvj9y9d268nzfbrfzs5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=OTBiYWJmMTMtNjMyY2E0NzktYzkxYTkwNTYtODY3NTg2YWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/"Create temporary directory "/Root/~backup_20250630T092320" in databaseProcess "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/UuidTable"Copy tables: { src: "/Root/UuidTable", dst: "/Root/~backup_20250630T092320/UuidTable" }Describe table "/Root/UuidTable"Describe table "/Root/~backup_20250630T092320/UuidTable"Backup table "/Root/~backup_20250630T092320/UuidTable" to "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/UuidTable"Write scheme into "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/UuidTable/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/UuidTable/permissions.pb"Read table "/Root/~backup_20250630T092320/UuidTable"Write data into "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/UuidTable/data_00.csv"Drop table "/Root/~backup_20250630T092320/UuidTable"Remove temporary directory "/Root/~backup_20250630T092320" in database2025-06-30T09:23:20.306662Z node 31 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 31, TabletId: 72075186224037889 not found 2025-06-30T09:23:20.307093Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) 2025-06-30T09:23:20.315516Z node 31 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Backup completed successfullyRestore "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/" to "/Root"2025-06-30T09:23:20.348297Z node 31 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 31, TabletId: 72075186224037888 not found Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/UuidTable"}]Process "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/UuidTable"Read scheme from "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/UuidTable/scheme.pb"Restore table "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/UuidTable" to "/Root/UuidTable"2025-06-30T09:23:20.369279Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Created "/Root/UuidTable"Read data from "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/UuidTable/data_00.csv"2025-06-30T09:23:20.423721Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jz02bv6mav3ccctxf7ekjawx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=YTlhZDhmNjktNTA1MGFlOWYtZjJhMzlmNzgtOTA1YTkyMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/UuidTable" to "/Root/UuidTable"Read ACL from "/home/runner/.ya/build/build_root/cl3w/0027b8/r3tmp/tmpWGbHQk/UuidTable/permissions.pb"2025-06-30T09:23:20.453013Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully2025-06-30T09:23:20.483300Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jz02bv8b5fy5kpth1vkfm0dr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=OTBiYWJmMTMtNjMyY2E0NzktYzkxYTkwNTYtODY3NTg2YWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> BackupRestore::RestoreViewReferenceTable [GOOD] >> BackupRestore::RestoreViewToDifferentDatabase >> EncryptedExportTest::ChangefeedEncryption >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL >> EncryptedBackupParamsValidationTest::IncorrectKeyLengthExport >> BackupRestore::TestAllPrimitiveTypes-DATE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATETIME |82.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/test-results/unittest/{meta.json ... results_accumulator.log} |82.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut >> BackupRestoreS3::RestoreViewDependentOnAnotherView [GOOD] |82.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut >> KqpProxy::NoUserAccessToScriptExecutionsTable [GOOD] >> BackupPathTest::ImportFilterByPrefix [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeInvalid [GOOD] >> TableCreation::SimpleUpdateTable [GOOD] >> TableCreation::ConcurrentUpdateTable [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobal >> KqpErrors::ResolveTableError [GOOD] >> KqpErrors::ProposeResultLost_RwTx+UseSink [GOOD] >> EncryptedExportTest::ChangefeedEncryption [GOOD] >> DataShardTxOrder::DelayData [GOOD] >> TDowntimeTest::HasUpcomingDowntime [GOOD] >> TCmsTenatsTest::TestNoneTenantPolicy |82.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity >> TCmsTest::StateStorageNodesFromOneRing [GOOD] >> EncryptedBackupParamsValidationTest::IncorrectKeyLengthExport [GOOD] >> BackupPathTest::ImportFilterByYdbObjectPath >> BackupRestore::TestAllPrimitiveTypes-DATETIME [GOOD] >> TCmsTest::ActionIssuePartialPermissions [GOOD] >> KqpErrors::ProposeError [GOOD] >> TCmsTest::ManageRequestsWrong [GOOD] >> TCmsTest::TestSetResetMarkers [GOOD] >> KqpErrors::ProposeResultLost_RwTx-UseSink >> BackupRestore::RestoreViewToDifferentDatabase [GOOD] |82.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |82.7%| [LD] {RESULT} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut >> EncryptedBackupParamsValidationTest::NoSourcePrefix >> TDowntimeTest::SetIgnoredDowntimeGap [GOOD] >> EncryptedExportTest::TopicEncryption >> TDowntimeTest::CleanupOldSegments [GOOD] |82.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity >> TCmsTest::StateStorageAvailabilityMode >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL [GOOD] >> BackupPathTest::ImportFilterByYdbObjectPath [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobal [GOOD] >> TMaintenanceApiTest::SingleCompositeActionGroup [GOOD] >> BsControllerConfig::ManyPDisksRestarts [GOOD] >> TCmsTenatsTest::TestNoneTenantPolicy [GOOD] >> TSequenceReboots::CreateMultipleSequencesHaveInitialSequenceShard [GOOD] >> TPersQueueTest::EventBatching [GOOD] >> TPersQueueTest::NoDecompressionMemoryLeaks >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP >> TCmsTest::ActionWithZeroDuration >> TPersQueueTest::Cache [GOOD] >> BackupRestore::RestoreViewDependentOnAnotherView >> EncryptedBackupParamsValidationTest::NoSourcePrefix [GOOD] >> KqpErrors::ProposeErrorEvWrite >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestBlock42 >> TCmsTest::WalleTasks [GOOD] >> KqpErrors::ProposeResultLost_RwTx-UseSink [GOOD] >> TPersQueueTest::CacheHead >> TCmsTest::ManageRequestsDry >> TPQTestSlow::TestOnDiskStoredSourceIds [GOOD] >> EncryptedExportTest::TopicEncryption [GOOD] >> SelfHeal::TestThreeMaintenanceRequests4Plus2Block [GOOD] >> IndexBuildTest::CancellationNotEnoughRetries [GOOD] >> SelfHeal::TestTwoMaintenanceRequestsOneFaultyMirror3dc >> TCmsTest::StateStorageAvailabilityMode [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TZ_DATE [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TZ_DATETIME [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TZ_TIMESTAMP [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP64 >> BackupPathTest::EncryptedImportWithoutCommonPrefix >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalAsync >> TCmsTenatsTest::TestDefaultTenantPolicyWithSingleTenantHost >> TMaintenanceApiTest::SimplifiedMirror3DC ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/slow/unittest >> SlowTopicAutopartitioning::CDC_Write [GOOD] >> BsControllerConfig::MergeBoxes Test command err: 2025-06-30T09:22:33.439019Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670367559315114:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:33.439065Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:33.475440Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00344f/r3tmp/tmpgDpdBJ/pdisk_1.dat TServer::EnableGrpc on GrpcPort 28037, node 1 2025-06-30T09:22:33.514054Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:33.515105Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670367559315094:2080] 1751275353438879 != 1751275353438882 2025-06-30T09:22:33.517217Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/00344f/r3tmp/yandex4ogGlA.tmp 2025-06-30T09:22:33.517227Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/00344f/r3tmp/yandex4ogGlA.tmp 2025-06-30T09:22:33.517277Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/00344f/r3tmp/yandex4ogGlA.tmp 2025-06-30T09:22:33.517315Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:33.522693Z INFO: TTestServer started on Port 23760 GrpcPort 28037 TClient is connected to server localhost:23760 2025-06-30T09:22:33.541542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:33.541593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:33.542661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected PQClient connected to localhost:28037 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:33.581397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-30T09:22:33.592417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:33.836068Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670367559315870:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:33.836124Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670367559315883:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:33.836137Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:33.837285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:33.839347Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670367559315885:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:22:33.885322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:33.893033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:33.914929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-30T09:22:33.932111Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670367559316192:2580] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } === CheckClustersList. Subcribe to ClusterTracker from [1:7521670367559316252:2613] 2025-06-30T09:22:34.441453Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:38.439200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521670367559315114:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:38.439256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:22:39.229493Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-30T09:22:39.252354Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521670393329120249:2689], Recipient [1:7521670367559315427:2148]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:39.252382Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:22:39.252385Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:22:39.252394Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521670393329120245:2686], Recipient [1:7521670367559315427:2148]: {TEvModifySchemeTransaction txid# 281474976715674 TabletId# 72057594046644480} 2025-06-30T09:22:39.252396Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:22:39.261157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "origin" Columns { Name: "id" Type: "Uint64" NotNull: false } Columns { Name: "order" Type: "Uint64" NotNull: false } Columns { Name: "value" Type: "Utf8" NotNull: false } KeyColumnNames: "id" KeyColumnNames: "order" UniformPartitionsCount: 1 PartitionConfig { PartitioningPolicy { SizeToSplit: 5242880 MinPartitionsCount: 1 MaxPartitionsCount: 64 SplitByLoadSettings { Enabled: true } } } Temporary: false } } TxId: 281474976715674 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:22:39.261259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /Root/origin, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-30T09:22:39.261292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /Root/origin, opId: 281474976715674:0, schema: Name: "origin" Columns { Name: "id" Type: "Uint64" NotNull: false } Columns { Name: "order" Type: "Uint64" NotNull: false } Columns { Name: "value" Type: "Utf8" NotNull: false } KeyColumnNames: "id" KeyColumnNames: "order" UniformPartitionsCount: 1 PartitionConfig { PartitioningPolicy { SizeToSplit: 5242880 MinPartitionsCount: 1 MaxPartitionsCount: 64 SplitByLoadSettings { Enabled: true } } } Temporary: false, at schemeshard: 72057594046644480 2025-06-30T09:22:39.261396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: origin, child id: [OwnerId: 72057594046644480, LocalPathId: 13], at schemeshard: 72057594046644480 2025-06-30T09:22:39.261415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 0 2025-06-30T09:22:39.261423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976715674:0 type: ... 0 PackedSize 3841673 count 15929 nextOffset 87631 batches 9 2025-06-30T09:22:47.651723Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87632 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651725Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87632 partNo 0 2025-06-30T09:22:47.651729Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87632 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3841946 count 15930 nextOffset 87632 batches 9 2025-06-30T09:22:47.651732Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87633 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651735Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87633 partNo 0 2025-06-30T09:22:47.651739Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87633 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3842220 count 15931 nextOffset 87633 batches 9 2025-06-30T09:22:47.651742Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87634 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651744Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87634 partNo 0 2025-06-30T09:22:47.651748Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87634 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3842494 count 15932 nextOffset 87634 batches 9 2025-06-30T09:22:47.651751Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87635 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651754Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87635 partNo 0 2025-06-30T09:22:47.651757Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87635 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3842768 count 15933 nextOffset 87635 batches 9 2025-06-30T09:22:47.651760Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87636 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651763Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87636 partNo 0 2025-06-30T09:22:47.651766Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87636 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3843042 count 15934 nextOffset 87636 batches 9 2025-06-30T09:22:47.651774Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87637 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651776Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87637 partNo 0 2025-06-30T09:22:47.651786Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87637 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3843315 count 15935 nextOffset 87637 batches 9 2025-06-30T09:22:47.651789Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87638 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651792Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87638 partNo 0 2025-06-30T09:22:47.651795Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87638 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3843589 count 15936 nextOffset 87638 batches 9 2025-06-30T09:22:47.651798Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87639 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651801Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87639 partNo 0 2025-06-30T09:22:47.651804Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87639 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3843862 count 15937 nextOffset 87639 batches 9 2025-06-30T09:22:47.651807Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87640 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651809Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87640 partNo 0 2025-06-30T09:22:47.651813Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87640 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3844136 count 15938 nextOffset 87640 batches 9 2025-06-30T09:22:47.651816Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87641 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651820Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87641 partNo 0 2025-06-30T09:22:47.651823Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87641 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3844409 count 15939 nextOffset 87641 batches 9 2025-06-30T09:22:47.651826Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87642 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651829Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87642 partNo 0 2025-06-30T09:22:47.651832Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87642 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3844683 count 15940 nextOffset 87642 batches 9 2025-06-30T09:22:47.651835Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87643 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651838Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87643 partNo 0 2025-06-30T09:22:47.651842Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87643 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3844956 count 15941 nextOffset 87643 batches 9 2025-06-30T09:22:47.651845Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87644 InitialSeqNo=(NULL) 2025-06-30T09:22:47.651847Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob processing sourceId '\00072075186224037892' seqNo 87644 partNo 0 2025-06-30T09:22:47.651858Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 part blob complete sourceId '\00072075186224037892' seqNo 87644 partNo 0 FormedBlobsCount 0 NewHead: Offset 71702 PartNo 0 PackedSize 3845230 count 15942 nextOffset 87644 batches 9 2025-06-30T09:22:47.651861Z node 1 :PERSQUEUE TRACE: partition_compaction.cpp:21: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'origin/feed/streamImpl' partition 0 process write for '\00072075186224037892' DisableDeduplication=0 SeqNo=87645 InitialSeqNo=(NULL) >> TSchemeshardBackgroundCompactionTest::ShouldCompactServerless [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP [GOOD] >> TCmsTest::ActionWithZeroDuration [GOOD] >> BackupRestore::RestoreViewDependentOnAnotherView [GOOD] >> BackupRestore::RestoreKesusResources >> EncryptedBackupParamsValidationTest::EmptyImportItem >> KqpErrors::ProposeErrorEvWrite [GOOD] >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestBlock42 [GOOD] >> TCmsTest::WalleRebootDownNode >> KqpBatchDelete::SimplePartitions [GOOD] >> TCmsTest::ManageRequestsDry [GOOD] >> TSchemeShardTest::AlterIndexTableDirectly [GOOD] >> TPersQueueTest::CacheHead [FAIL] >> NodeDisconnected::BsQueueRetries [GOOD] >> TPersQueueTest::DirectReadBudgetOnRestart [GOOD] >> LabeledDbCounters::OneTablet [GOOD] >> KqpOlapScheme::TenThousandColumns [GOOD] >> KqpBatchUpdate::ManyPartitions_1 [GOOD] >> SystemView::PartitionStatsTtlFields [GOOD] >> EncryptedExportTest::ViewEncryption >> TPersQueueTest::DirectReadCorrectOffsetsOnRestart >> LabeledDbCounters::OneTabletRemoveCounters >> IndexBuildTest::CheckLimitWithDroppedIndex ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::DelayData [GOOD] Test command err: 2025-06-30T09:23:13.566565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:23:13.566591Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:13.567091Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:13.570764Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:13.570905Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:136:2157] 2025-06-30T09:23:13.570955Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:23:13.582172Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:13.584672Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:23:13.584708Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:23:13.584917Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-30T09:23:13.584928Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-30T09:23:13.584937Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-30T09:23:13.585044Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:23:13.585063Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:23:13.585078Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:205:2157] in generation 2 2025-06-30T09:23:13.614927Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:23:13.620139Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-30T09:23:13.620206Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:23:13.620227Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:220:2216] 2025-06-30T09:23:13.620232Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-30T09:23:13.620236Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-30T09:23:13.620240Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:23:13.620277Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:13.620283Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:13.620345Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-30T09:23:13.620366Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-30T09:23:13.620393Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:23:13.620401Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:23:13.620408Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-30T09:23:13.620414Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-30T09:23:13.620419Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-30T09:23:13.620425Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-30T09:23:13.620431Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:23:13.620442Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:216:2213], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:13.620446Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:13.620451Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:214:2212], serverId# [1:216:2213], sessionId# [0:0:0] 2025-06-30T09:23:13.620829Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:136:2157]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nK\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\n \000Z\006\010\002\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-30T09:23:13.620837Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-30T09:23:13.620846Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-30T09:23:13.620876Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-30T09:23:13.620887Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-30T09:23:13.620916Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-30T09:23:13.620923Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-30T09:23:13.620926Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-30T09:23:13.620930Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-30T09:23:13.620933Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:23:13.620987Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-30T09:23:13.620990Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-30T09:23:13.620993Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-30T09:23:13.620996Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:23:13.621005Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-30T09:23:13.621007Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-30T09:23:13.621010Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-30T09:23:13.621012Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-30T09:23:13.621016Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-30T09:23:13.631693Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-30T09:23:13.631716Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-30T09:23:13.631722Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-30T09:23:13.631732Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-30T09:23:13.631745Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-30T09:23:13.631840Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:226:2222], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:13.631849Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:13.631857Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:225:2221], serverId# [1:226:2222], sessionId# [0:0:0] 2025-06-30T09:23:13.631879Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:136:2157]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-30T09:23:13.631884Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-30T09:23:13.631918Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-30T09:23:13.631924Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-30T09:23:13.631930Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-30T09:23:13.631933Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-30T09:23:13.632477Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-30T09:23:13.632487Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:23:13.632528Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:13.632533Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:13.632539Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:23:13.632544Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:23:13.632547Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-30T09:23:13.632553Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-30T09:23:13.632557Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100 ... 184 to execution unit CompletedOperations 2025-06-30T09:23:22.315518Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:506] at 9437184 on unit CompletedOperations 2025-06-30T09:23:22.315526Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:506] at 9437184 is Executed 2025-06-30T09:23:22.315531Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:506] at 9437184 executing on unit CompletedOperations 2025-06-30T09:23:22.315536Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:506] at 9437184 has finished 2025-06-30T09:23:22.315542Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:23:22.315568Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-30T09:23:22.315575Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000005:507] in PlanQueue unit at 9437184 2025-06-30T09:23:22.315682Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:240:2231], Recipient [1:240:2231]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:22.315690Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-30T09:23:22.315698Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-30T09:23:22.315704Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:23:22.315711Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:282: Return cached ready operation [1000005:507] at 9437184 2025-06-30T09:23:22.315716Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit PlanQueue 2025-06-30T09:23:22.315721Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-30T09:23:22.315725Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit PlanQueue 2025-06-30T09:23:22.315730Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit LoadTxDetails 2025-06-30T09:23:22.315734Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit LoadTxDetails 2025-06-30T09:23:22.315885Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437184 loaded tx from db 1000005:507 keys extracted: 1 2025-06-30T09:23:22.315895Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-30T09:23:22.315900Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit LoadTxDetails 2025-06-30T09:23:22.315906Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit FinalizeDataTxPlan 2025-06-30T09:23:22.315911Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit FinalizeDataTxPlan 2025-06-30T09:23:22.315918Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-30T09:23:22.315928Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit FinalizeDataTxPlan 2025-06-30T09:23:22.315933Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-30T09:23:22.315938Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit BuildAndWaitDependencies 2025-06-30T09:23:22.315953Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1000005:507] is the new logically complete end at 9437184 2025-06-30T09:23:22.315959Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1000005:507] is the new logically incomplete end at 9437184 2025-06-30T09:23:22.315963Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1000005:507] at 9437184 2025-06-30T09:23:22.315970Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-30T09:23:22.315975Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-30T09:23:22.315980Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit BuildDataTxOutRS 2025-06-30T09:23:22.315986Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit BuildDataTxOutRS 2025-06-30T09:23:22.315997Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-30T09:23:22.316002Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit BuildDataTxOutRS 2025-06-30T09:23:22.316007Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit StoreAndSendOutRS 2025-06-30T09:23:22.316012Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit StoreAndSendOutRS 2025-06-30T09:23:22.316017Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-30T09:23:22.316022Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit StoreAndSendOutRS 2025-06-30T09:23:22.316028Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit PrepareDataTxInRS 2025-06-30T09:23:22.316032Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit PrepareDataTxInRS 2025-06-30T09:23:22.316037Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-30T09:23:22.316041Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit PrepareDataTxInRS 2025-06-30T09:23:22.316045Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit LoadAndWaitInRS 2025-06-30T09:23:22.316050Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit LoadAndWaitInRS 2025-06-30T09:23:22.316055Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-30T09:23:22.316060Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit LoadAndWaitInRS 2025-06-30T09:23:22.316064Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit ExecuteDataTx 2025-06-30T09:23:22.316070Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit ExecuteDataTx 2025-06-30T09:23:22.316179Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000005:507] at tablet 9437184 with status COMPLETE 2025-06-30T09:23:22.316190Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000005:507] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 11, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-30T09:23:22.316201Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is ExecutedNoMoreRestarts 2025-06-30T09:23:22.316206Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit ExecuteDataTx 2025-06-30T09:23:22.316211Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit CompleteOperation 2025-06-30T09:23:22.316215Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit CompleteOperation 2025-06-30T09:23:22.316257Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is DelayComplete 2025-06-30T09:23:22.316264Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit CompleteOperation 2025-06-30T09:23:22.316269Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit CompletedOperations 2025-06-30T09:23:22.316275Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit CompletedOperations 2025-06-30T09:23:22.316282Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-30T09:23:22.316288Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit CompletedOperations 2025-06-30T09:23:22.316294Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:507] at 9437184 has finished 2025-06-30T09:23:22.316300Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:23:22.316306Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-30T09:23:22.316312Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-30T09:23:22.316342Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-30T09:23:22.328873Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000005 txid# 506 txid# 507} 2025-06-30T09:23:22.328919Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000005} 2025-06-30T09:23:22.328947Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:23:22.328963Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:506] at 9437184 on unit CompleteOperation 2025-06-30T09:23:22.328999Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 506] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-30T09:23:22.329018Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-30T09:23:22.329116Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-30T09:23:22.329124Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:507] at 9437184 on unit CompleteOperation 2025-06-30T09:23:22.329143Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 507] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-30T09:23:22.329152Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 |82.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TDowntimeTest::CleanupOldSegments [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL64 >> BackupPathTest::EncryptedImportWithoutCommonPrefix [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalAsync [GOOD] >> TPersQueueTest::NoDecompressionMemoryLeaks [GOOD] >> TCmsTenatsTest::TestDefaultTenantPolicyWithSingleTenantHost [GOOD] >> TMaintenanceApiTest::SimplifiedMirror3DC [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable >> TCmsTest::CheckUnreplicatedDiskPreventsRestart >> BackupRestore::RestoreKesusResources [GOOD] >> TCmsTest::WalleRebootDownNode [GOOD] >> TCmsTest::TestProcessingQueue >> TCmsTest::WalleRequestDuringRollingRestart >> TCmsTest::Notifications >> PhantomBlobs::TestOneDeadMirror3dc [GOOD] >> KqpOlapScheme::NullKeySchema >> PhantomBlobs::TestTwoDeadMirror3dc [GOOD] >> KqpOlapScheme::NullKeySchema [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalUnique [GOOD] >> TPersQueueTest::PreferredCluster_TwoEnabledClustersAndWriteSessionsWithDifferentPreferredCluster_SessionWithMismatchedClusterDiesAndOthersAlive >> TCmsTenatsTest::TestLimitsWithDownNode >> TMaintenanceApiTest::RequestReplaceDevicePDisk >> TCmsTest::StateStorageRollingRestart >> EncryptedExportTest::ViewEncryption [GOOD] >> BackupPathTest::ExplicitDuplicatedItems >> ProxyEncryption::CorrectlyFailOnNoKeys >> IndexBuildTest::CheckLimitWithDroppedIndex [GOOD] >> KqpOlapScheme::SetColumnFamily >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree >> IndexBuildTest::DropIndex >> BackupRestore::TestAllPrimitiveTypes-INTERVAL >> EncryptedBackupParamsValidationTest::EmptyImportItem [GOOD] >> TCmsTest::CheckUnreplicatedDiskPreventsRestart [GOOD] >> TCmsTest::TestProcessingQueue [GOOD] >> BackupRestore::RestoreReplicationWithoutSecret >> TCmsTest::WalleRequestDuringRollingRestart [GOOD] >> TCmsTest::Notifications [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-STRING >> TCmsTenatsTest::TestLimitsWithDownNode [GOOD] >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy >> TMaintenanceApiTest::RequestReplaceDevicePDisk [GOOD] >> TPersQueueTest::CheckACLForGrpcWrite >> ProxyEncryption::CorrectlyFailOnNoKeys [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree [GOOD] >> IndexBuildTest::DropIndex [GOOD] >> KqpOlapScheme::SetColumnFamily [GOOD] >> SystemView::PartitionStatsLocksFields >> RequestValidation::TestSinglePutBlobSize0 >> BackupRestoreS3::TestAllPrimitiveTypes-BOOL >> KqpOlapScheme::PrimaryKeyNotDefaultColumnFamily >> BsControllerConfig::MergeBoxes [GOOD] >> TPersQueueTest::PreferredCluster_TwoEnabledClustersAndWriteSessionsWithDifferentPreferredCluster_SessionWithMismatchedClusterDiesAndOthersAlive [GOOD] >> SelfHeal::TestTwoMaintenanceRequestsOneFaultyMirror3dc [GOOD] >> SelfHeal::TestTwoMaintenanceRequestsOneFaulty4Plus2Block >> EncryptedBackupParamsValidationTest::IncorrectKeyImport >> TCmsTest::AllVDisksEvictionInRack >> BackupRestore::RestoreReplicationWithoutSecret [GOOD] >> TCmsTest::Mirror3dcPermissions >> TCmsTest::StateStorageRollingRestart [GOOD] >> ImportBigEncryptedFileTest::ImportBigEncryptedFile >> BackupPathTest::ExplicitDuplicatedItems [GOOD] >> TPersQueueTest::CheckACLForGrpcWrite [GOOD] >> TPersQueueTest::CheckACLForGrpcRead >> RequestValidation::TestSinglePutBlobSize0 [GOOD] >> RequestValidation::TestMultiPutBlobSize0 [GOOD] >> RequestValidation::TestVMovedPatchSize0 >> RequestValidation::TestVMovedPatchSize0 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INTERVAL [GOOD] >> TPersQueueTest::PreferredCluster_DisabledRemoteClusterAndWriteSessionsWithDifferentPreferredClusterAndLaterRemoteClusterEnabled_SessionWithMismatchedClusterDiesAfterPreferredClusterEnabledAndOtherSessionsAlive >> EncryptedBackupParamsValidationTest::IncorrectKeyImport [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-BOOL [GOOD] |82.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/test-results/unittest/{meta.json ... results_accumulator.log} |82.7%| [TA] $(B)/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::AlterIndexTableDirectly [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:44.757890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:44.757914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.757920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:44.757925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:44.757931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:44.757934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:44.757941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:44.766230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:44.766363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:44.766442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:44.788026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:44.788047Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:44.791231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:44.791291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:44.791326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:44.793195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:44.793305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:44.793409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.793471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:44.801587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.801677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:44.802038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:44.802071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:44.802076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:44.802080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:44.802094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.803720Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:44.827254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:44.827335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.827400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:44.827410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:44.827467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:44.827481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:44.828240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.828285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:44.828329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.828341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:44.828347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:44.828353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:44.828859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.828874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:44.828882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:44.829342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.829354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:44.829361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.829370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:44.832727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:44.833284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:44.833325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:44.833517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:44.833574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.833634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:44.833643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:44.833672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:44.833683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:44.834147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:44.834157Z node 1 :FLAT_TX_SCHEMESHARD ... 240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:32.974650Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/table/indexByValue" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:23:32.974678Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/table/indexByValue" took 29us result status StatusSuccess 2025-06-30T09:23:32.974809Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/table/indexByValue" PathDescription { Self { Name: "indexByValue" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 3 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1592 DataSize: 1592 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "indexByValue" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 3 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 100500 MinPartitionsCount: 1 FastSplitSettings { SizeThreshold: 100500 RowCountThreshold: 100500 } } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:32.974907Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/table/indexByValue/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:23:32.974926Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/table/indexByValue/indexImplTable" took 20us result status StatusSuccess 2025-06-30T09:23:32.975030Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/table/indexByValue/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 4 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 100500 MinPartitionsCount: 1 FastSplitSettings { SizeThreshold: 100500 RowCountThreshold: 100500 } } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409552 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 3 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1592 DataSize: 1592 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TCmsTest::AllVDisksEvictionInRack [GOOD] >> BackupRestore::RestoreExternalDataSourceWithoutSecret >> TCmsTest::Mirror3dcPermissions [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-STRING [GOOD] >> TCmsTest::StateStorageLockedNodes >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy [GOOD] >> SystemView::PartitionStatsLocksFields [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-JSON >> BackupRestoreS3::TestAllPrimitiveTypes-DOUBLE >> BackupRestore::RestoreExternalDataSourceWithoutSecret [GOOD] >> KqpOlapScheme::PrimaryKeyNotDefaultColumnFamily [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobal >> TCmsTest::StateStorageLockedNodes [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATE32 >> SystemView::QueryStatsAllTables >> BackupPathTest::ExportUnexistingExplicitPath >> EncryptedBackupParamsValidationTest::EncryptionSettingsWithoutKeyImport >> BackupRestoreS3::TestAllPrimitiveTypes-JSON [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DOUBLE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATE32 [GOOD] >> KqpOlapScheme::SetNotDefaultColumnFamilyForPrimaryKey >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobal [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalAsync >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalAsync [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalUnique [GOOD] >> BackupRestore::PrefixedVectorIndex >> BackupPathTest::ExportUnexistingExplicitPath [GOOD] >> EncryptedBackupParamsValidationTest::EncryptionSettingsWithoutKeyImport [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-JSON_DOCUMENT >> BackupRestoreS3::TestAllPrimitiveTypes-DATE ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::SimplePartitions [GOOD] Test command err: Trying to start YDB, gRPC: 18867, MsgBus: 6278 2025-06-30T09:22:58.610360Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670477693151748:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:58.616100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044a6/r3tmp/tmpHRLFIi/pdisk_1.dat 2025-06-30T09:22:58.668169Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:58.670460Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670477693151511:2080] 1751275378591385 != 1751275378591388 TServer::EnableGrpc on GrpcPort 18867, node 1 2025-06-30T09:22:58.738902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:58.738918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:58.738921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:58.738977Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:58.739476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:58.739494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:58.747273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6278 TClient is connected to server localhost:6278 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:58.892013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:58.895871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:22:58.904220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:58.960661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:59.028006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:59.099023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:59.295233Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670481988120406:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:59.295308Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:59.366859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.425678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.507505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.530078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.556416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.590137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.610952Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:59.617298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:59.655842Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670481988121070:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:59.655895Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:59.656126Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670481988121078:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:59.657264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:59.660770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:22:59.660875Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670481988121080:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:22:59.760297Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670481988121131:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 25138, MsgBus: 32229 2025-06-30T09:23:01.950356Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670491300446691:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:01.956596Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_ ... 5:7521670614253908122:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:23:30.789577Z node 15 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [15:7521670614253908173:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:31.237312Z node 15 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 13758, MsgBus: 25126 2025-06-30T09:23:31.759028Z node 16 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[16:7521670618177202658:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:31.759066Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044a6/r3tmp/tmpm11w7T/pdisk_1.dat 2025-06-30T09:23:31.774658Z node 16 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:31.774926Z node 16 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [16:7521670618177202639:2080] 1751275411758914 != 1751275411758917 TServer::EnableGrpc on GrpcPort 13758, node 16 2025-06-30T09:23:31.780938Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:31.780967Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:31.780969Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:31.781030Z node 16 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25126 TClient is connected to server localhost:25126 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:31.864224Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:31.864249Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:31.864940Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:31.865167Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:23:31.868029Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:31.881765Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:31.904049Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:31.916575Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:32.068190Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7521670622472171528:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:32.068214Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:32.070966Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:32.077888Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:32.090508Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:32.104694Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:32.118655Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:32.132561Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:32.187892Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:32.200188Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7521670622472172182:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:32.200216Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7521670622472172187:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:32.200222Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:32.200937Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:32.209076Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [16:7521670622472172189:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:23:32.302114Z node 16 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [16:7521670622472172240:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:32.761041Z node 16 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::RequestReplaceDevicePDisk [GOOD] Test command err: 2025-06-30T09:23:21.374526Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:21.375962Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:21.379685Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:21.379805Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:21.380400Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:21.380445Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:21.380642Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:21.380834Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:21.380993Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:21.381029Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:21.383609Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:21.383647Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:21.383700Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:21.383768Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:21.420384Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:21.454248Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:21.454377Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:21.456086Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:21.456327Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:21.456343Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:21.456359Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:21.456365Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:21.456396Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:21.456434Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:21.456585Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:21.459229Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 9 PDiskId: 9 Path: "/9/pdisk-9.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 10 PDiskId: 10 Path: "/10/pdisk-10.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 11 PDiskId: 11 Path: "/11/pdisk-11.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 12 PDiskId: 12 Path: "/12/pdisk-12.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 13 PDiskId: 13 Path: "/13/pdisk-13.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 14 PDiskId: 14 Path: "/14/pdisk-14.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 15 PDiskId: 15 Path: "/15/pdisk-15.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 16 PDiskId: 16 Path: "/16/pdisk-16.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 12 PDiskId: 12 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 12 PDiskId: 12 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 12 PDiskId: 12 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 12 PDiskId: 12 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 13 PDiskId: 13 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 13 PDiskId: 13 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 13 PDiskId: 13 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 13 PDiskId: 13 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 14 PDiskId: 14 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 14 PDiskId: 14 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 14 PDiskId: 14 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 14 PDiskId: 14 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlo ... Rack: "6" Unit: "6" } StartTimeSeconds: 0 PileId: 0 } Timestamp: 120028000 } } 2025-06-30T09:23:27.292408Z node 28 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-34-34" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 34 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-35-35" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 35 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-28-28" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 28 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-29-29" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 29 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-30-30" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 30 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-31-31" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 31 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-32-32" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 32 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-33-33" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 33 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 PileId: 0 } Timestamp: 120028000 } 2025-06-30T09:23:27.292469Z node 28 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.003000s 2025-06-30T09:23:27.292482Z node 28 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-30T09:23:27.292533Z node 28 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: REPLACE_DEVICES Host: "::1" Devices: "/28/pdisk-28.data" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false Reason: "" AvailabilityMode: MODE_MAX_AVAILABILITY MaintenanceTaskId: "" 2025-06-30T09:23:27.292546Z node 28 :CMS DEBUG: cms.cpp:379: Checking action: Type: REPLACE_DEVICES Host: "::1" Devices: "/28/pdisk-28.data" Duration: 60000000 2025-06-30T09:23:27.292605Z node 28 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:27.292627Z node 28 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-30T09:23:27.292639Z node 28 :CMS INFO: cluster_info.cpp:778: Adding lock for PDisk 28:28 (::1:/28/pdisk-28.data) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:27.292659Z node 28 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:27.292722Z node 28 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:03:00.028000Z, action# Type: REPLACE_DEVICES Host: "::1" Devices: "/28/pdisk-28.data" Duration: 60000000 2025-06-30T09:23:27.292733Z node 28 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-30T09:23:27.292802Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 28, wbId# [28:8388350642965737326:1634689637] 2025-06-30T09:23:27.292811Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 29, wbId# [29:8388350642965737326:1634689637] 2025-06-30T09:23:27.292816Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 30, wbId# [30:8388350642965737326:1634689637] 2025-06-30T09:23:27.292821Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 31, wbId# [31:8388350642965737326:1634689637] 2025-06-30T09:23:27.292826Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 32, wbId# [32:8388350642965737326:1634689637] 2025-06-30T09:23:27.292832Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 33, wbId# [33:8388350642965737326:1634689637] 2025-06-30T09:23:27.292837Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 34, wbId# [34:8388350642965737326:1634689637] 2025-06-30T09:23:27.292843Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 35, wbId# [35:8388350642965737326:1634689637] 2025-06-30T09:23:27.292946Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 28, response# PDiskStateInfo { PDiskId: 28 CreateTime: 0 ChangeTime: 0 Path: "/28/pdisk-28.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-30T09:23:27.293042Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 29, response# PDiskStateInfo { PDiskId: 29 CreateTime: 0 ChangeTime: 0 Path: "/29/pdisk-29.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-30T09:23:27.293080Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 30, response# PDiskStateInfo { PDiskId: 30 CreateTime: 0 ChangeTime: 0 Path: "/30/pdisk-30.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-30T09:23:27.293095Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 31, response# PDiskStateInfo { PDiskId: 31 CreateTime: 0 ChangeTime: 0 Path: "/31/pdisk-31.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-30T09:23:27.293108Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 32, response# PDiskStateInfo { PDiskId: 32 CreateTime: 0 ChangeTime: 0 Path: "/32/pdisk-32.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-30T09:23:27.293121Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 33, response# PDiskStateInfo { PDiskId: 33 CreateTime: 0 ChangeTime: 0 Path: "/33/pdisk-33.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-30T09:23:27.293134Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 34, response# PDiskStateInfo { PDiskId: 34 CreateTime: 0 ChangeTime: 0 Path: "/34/pdisk-34.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-30T09:23:27.293147Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 35, response# PDiskStateInfo { PDiskId: 35 CreateTime: 0 ChangeTime: 0 Path: "/35/pdisk-35.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-30T09:23:27.293159Z node 28 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-30T09:23:27.323825Z node 28 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:27.375436Z node 28 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:27.375547Z node 28 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: REPLACE_DEVICES Host: "::1" Devices: "/28/pdisk-28.data" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false Reason: "" AvailabilityMode: MODE_MAX_AVAILABILITY MaintenanceTaskId: "" }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: REPLACE_DEVICES Host: "::1" Devices: "/28/pdisk-28.data" Duration: 60000000 } Deadline: 180028000 } } 2025-06-30T09:23:27.375561Z node 28 :CMS DEBUG: cms.cpp:1063: Schedule cleanup at 1970-01-01T00:05:00.028000Z 2025-06-30T09:23:27.375869Z node 28 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: WRONG_REQUEST Reason: "Unknown request user-r-1" } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> RequestValidation::TestVMovedPatchSize0 [GOOD] Test command err: RandomSeed# 6733941782827357853 2025-06-30T09:19:05.409985Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: data is too large; id# [1:1:0:0:0:20971520:1] size# 20971520 chunkSize# 134217728 Marker# BSVS02 2025-06-30T09:19:05.753148Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 99 PartSize# 100 id# [1:1:0:0:0:100:1] Marker# BSVS01 2025-06-30T09:19:06.405726Z 7 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: ingress mismatch; id# [1:1:0:0:0:100:2] Marker# BSVS11 2025-06-30T09:19:06.598910Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:19:06.599530Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 3 Marker# BSVS41 2025-06-30T09:19:06.926465Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:19:06.926951Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 2 Marker# BSVS41 2025-06-30T09:19:07.088549Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:19:07.089023Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 3 Marker# BSVS41 2025-06-30T09:19:07.371901Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:19:07.372468Z 7 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) ydb/core/erasure/erasure.cpp:2116: Unknown crcMode = 2 Marker# BSVS41 2025-06-30T09:19:07.639185Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:19:07.639803Z 7 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) ydb/core/erasure/erasure.cpp:2116: Unknown crcMode = 3 Marker# BSVS41 2025-06-30T09:19:07.825224Z 2 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:19:07.825773Z 3 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 2 Marker# BSVS41 2025-06-30T09:19:08.027992Z 2 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:19:08.028512Z 3 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 3 Marker# BSVS41 2025-06-30T09:19:08.469024Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:19:08.469516Z 7 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 2 Marker# BSVS41 2025-06-30T09:19:08.724718Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:19:08.725183Z 7 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 3 Marker# BSVS41 2025-06-30T09:19:09.068469Z 1 00h00m46.310512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:19:09.068907Z 8 00h00m46.310512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 2 Marker# BSVS41 2025-06-30T09:19:09.376949Z 1 00h00m46.310512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-30T09:19:09.377441Z 8 00h00m46.310512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 3 Marker# BSVS41 *** PUT BLOB [72075186270680851:57:3905:6:786432:4194304:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:4194304:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:4194304:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:4194304:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:4194304:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** 0 5 1 6 2 7 3 0 4 1 5 2 6 3 7 4 2025-06-30T09:19:09.960155Z 8 00h02m00.060512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [72075186270680851:57:3905:6:786432:4194304:3] barrier# {Soft# {Gen# 57 Step# 3905} Hard# } 2025-06-30T09:19:09.960207Z 2 00h02m00.060512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [72075186270680851:57:3905:6:786432:4194304:5] barrier# {Soft# {Gen# 57 Step# 3905} Hard# } 2025-06-30T09:19:09.961678Z 8 00h02m00.060512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [72075186270680851:57:3905:6:786432:4194304:3] barrier# {Soft# {Gen# 57 Step# 3905} Hard# } 2025-06-30T09:19:09.962347Z 2 00h02m00.060512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [72075186270680851:57:3905:6:786432:4194304:5] barrier# {Soft# {Gen# 57 Step# 3905} Hard# } 0 5 1 6 2 7 3 0 4 1 5 2 6 3 7 4 BlobsWritten# 18144 step 0 waiting for replies scanning parts step 1 waiting for replies scanning parts step 2 waiting for replies scanning parts step 3 waiting for replies scanning parts step 4 waiting for replies scanning parts empty@ 4 0 empty@ 4 1 empty@ 4 2 empty@ 4 3 empty@ 4 4 empty@ 4 5 empty@ 4 6 empty@ 4 7 empty@ 4 8 empty@ 4 9 empty@ 4 10 empty@ 4 11 empty@ 4 12 empty@ 4 13 empty@ 4 14 empty@ 4 15 empty@ 4 16 empty@ 4 17 empty@ 4 18 empty@ 4 19 empty@ 4 20 empty@ 4 21 empty@ 4 22 empty@ 4 23 empty@ 4 24 empty@ 4 25 empty@ 4 26 empty@ 4 27 empty@ 4 28 empty@ 4 29 empty@ 4 30 empty@ 4 31 empty@ 4 32 empty@ 4 33 empty@ 4 34 empty@ 4 35 empty@ 4 36 empty@ 4 37 empty@ 4 38 empty@ 4 39 empty@ 4 40 empty@ 4 41 empty@ 4 42 empty@ 4 43 empty@ 4 44 empty@ 4 45 empty@ 4 46 empty@ 4 47 empty@ 4 120 empty@ 4 121 empty@ 4 122 empty@ 4 123 empty@ 4 124 empty@ 4 125 empty@ 4 126 empty@ 4 127 empty@ 4 128 empty@ 4 129 empty@ 4 130 empty@ 4 131 empty@ 4 132 empty@ 4 133 empty@ 4 134 empty@ 4 135 empty@ 4 136 empty@ 4 137 empty@ 4 138 empty@ 4 139 empty@ 4 140 empty@ 4 141 empty@ 4 142 empty@ 4 143 empty@ 4 144 empty@ 4 145 empty@ 4 146 empty@ 4 147 empty@ 4 148 empty@ 4 149 empty@ 4 150 empty@ 4 151 empty@ 4 152 empty@ 4 153 empty@ 4 154 empty@ 4 155 empty@ 4 156 empty@ 4 157 empty@ 4 158 empty@ 4 159 empty@ 4 160 empty@ 4 161 empty@ 4 162 empty@ 4 163 empty@ 4 164 empty@ 4 165 empty@ 4 166 empty@ 4 167 empty@ 4 240 empty@ 4 241 empty@ 4 242 empty@ 4 243 empty@ 4 244 empty@ 4 245 empty@ 4 246 empty@ 4 247 empty@ 4 248 empty@ 4 249 empty@ 4 250 empty@ 4 251 empty@ 4 252 empty@ 4 253 empty@ 4 254 empty@ 4 255 empty@ 4 256 empty@ 4 257 empty@ 4 258 empty@ 4 259 empty@ 4 260 empty@ 4 261 empty@ 4 262 empty@ 4 263 empty@ 4 264 empty@ 4 265 empty@ 4 266 empty@ 4 267 empty@ 4 268 empty@ 4 269 empty@ 4 270 empty@ 4 271 empty@ 4 272 empty@ 4 273 empty@ 4 274 empty@ 4 275 empty@ 4 276 empty@ 4 277 empty@ 4 278 empty@ 4 279 empty@ 4 280 empty@ 4 281 empty@ 4 282 empty@ 4 283 empty@ 4 284 empty@ 4 285 empty@ 4 286 empty@ 4 287 empty@ 4 432 empty@ 4 433 empty@ 4 434 empty@ 4 435 empty@ 4 436 empty@ 4 437 empty@ 4 438 empty@ 4 439 empty@ 4 440 empty@ 4 441 empty@ 4 442 empty@ 4 443 empty@ 4 444 empty@ 4 445 empty@ 4 446 empty@ 4 447 empty@ 4 448 empty@ 4 449 empty@ 4 450 empty@ 4 451 empty@ 4 452 empty@ 4 453 empty@ 4 454 empty@ 4 455 empty@ 4 456 empty@ 4 457 empty@ 4 458 empty@ 4 459 empty@ 4 460 empty@ 4 461 empty@ 4 462 empty@ 4 463 empty@ 4 464 empty@ 4 465 empty@ 4 466 empty@ 4 467 empty@ 4 468 empty@ 4 469 empty@ 4 470 empty@ 4 471 empty@ 4 472 empty@ 4 473 empty@ 4 474 empty@ 4 475 empty@ 4 476 empty@ 4 477 empty@ 4 478 empty@ 4 479 empty@ 4 552 empty@ 4 553 empty@ 4 554 empty@ 4 555 empty@ 4 556 empty@ 4 557 empty@ 4 558 empty@ 4 559 empty@ 4 560 empty@ 4 561 empty@ 4 562 empty@ 4 563 empty@ 4 564 empty@ 4 565 empty@ 4 566 empty@ 4 567 empty@ 4 568 empty@ 4 569 empty@ 4 570 empty@ 4 571 empty@ 4 572 empty@ 4 573 empty@ 4 574 empty@ 4 575 empty@ 4 576 empty@ 4 577 empty@ 4 578 empty@ 4 579 empty@ 4 580 empty@ 4 581 empty@ 4 582 empty@ 4 583 empty@ 4 584 empty@ 4 585 empty@ 4 586 empty@ 4 587 empty@ 4 588 empty@ 4 589 empty@ 4 590 empty@ 4 591 empty@ 4 592 empty@ 4 593 empty@ 4 594 empty@ 4 595 empty@ 4 596 empty@ 4 597 empty@ 4 598 empty@ 4 599 empty@ 4 672 empty@ 4 673 empty@ 4 674 empty@ 4 675 empty@ 4 676 empty@ 4 677 empty@ 4 678 empty@ 4 679 empty@ 4 680 empty@ 4 681 empty@ 4 682 empty@ 4 683 empty@ 4 684 empty@ 4 685 empty@ 4 686 empty@ 4 687 empty@ 4 688 empty@ 4 689 empty@ 4 690 empty@ 4 691 empty@ 4 692 empty@ 4 693 empty@ 4 694 empty@ 4 695 empty@ 4 696 empty@ 4 697 empty@ 4 698 empty@ 4 699 empty@ 4 700 empty@ 4 701 empty@ 4 702 empty@ 4 703 empty@ 4 704 empty@ 4 705 empty@ 4 706 empty@ 4 707 empty@ 4 708 empty@ 4 709 empty@ 4 710 empty@ 4 711 empty@ 4 712 empty@ 4 713 empty@ 4 714 empty@ 4 715 empty@ 4 716 empty@ 4 717 empty@ 4 718 empty@ 4 719 empty@ 4 720 empty@ 4 721 empty@ 4 722 empty@ 4 723 empty@ 4 724 empty@ 4 725 empty@ 4 726 empty@ 4 727 empty@ 4 728 empty@ 4 729 empty@ 4 730 empty@ 4 731 empty@ 4 732 empty@ 4 733 empty@ 4 734 empty@ 4 735 empty@ 4 736 empty@ 4 737 empty@ 4 738 empty@ 4 739 empty@ 4 740 empty@ 4 741 empty@ 4 742 empty@ 4 743 empty@ 4 744 empty@ 4 745 empty@ 4 746 empty@ 4 747 empty@ 4 748 empty@ 4 749 empty@ 4 750 empty@ 4 751 empty@ 4 752 empty@ 4 753 empty@ 4 754 empty@ 4 755 empty@ 4 756 empty@ 4 757 empty@ 4 758 empty@ 4 759 empty@ 4 760 empty@ 4 761 empty@ 4 762 empty@ 4 763 empty@ 4 764 empty@ 4 765 empty@ 4 766 empty@ 4 767 empty@ 4 840 empty@ 4 841 empty@ 4 842 empty@ 4 843 empty@ 4 844 empty@ 4 845 empty@ 4 846 empty@ 4 847 empty@ 4 848 empty@ 4 849 empty@ 4 850 empty@ 4 851 empty@ 4 852 empty@ 4 853 empty@ 4 854 empty@ 4 855 empty@ 4 856 empty@ 4 857 empty@ 4 858 empty@ 4 859 empty@ 4 860 empty@ 4 861 empty@ 4 862 empty@ 4 863 empty@ 4 864 empty@ 4 865 empty@ 4 866 empty@ 4 867 empty@ 4 868 empty@ 4 869 empty@ 4 870 empty@ 4 871 empty@ 4 872 empty@ 4 873 empty@ 4 874 empty@ 4 875 empty@ 4 876 empty@ 4 877 empty@ 4 878 empty@ 4 879 empty@ 4 880 empty@ 4 881 empty@ 4 882 empty@ 4 883 empty@ 4 884 empty@ 4 885 empty@ 4 886 empty@ 4 887 empty@ 4 960 empty@ 4 961 empty@ 4 962 empty@ 4 963 empty@ 4 964 empty@ 4 965 empty@ 4 966 empty@ 4 967 empty@ 4 968 empty@ 4 969 empty@ 4 970 empty@ 4 971 empty@ 4 972 empty@ 4 973 empty@ 4 974 empty@ 4 975 empty@ 4 976 empty@ 4 977 empty@ 4 978 empty@ 4 979 empty@ 4 980 empty@ 4 981 empty@ 4 982 empty@ 4 983 empty@ 4 984 empty@ 4 985 empty@ 4 98 ... teration 9612 iteration 9613 iteration 9614 iteration 9615 iteration 9616 iteration 9617 iteration 9618 iteration 9619 iteration 9620 iteration 9621 iteration 9622 iteration 9623 iteration 9624 iteration 9625 iteration 9626 iteration 9627 iteration 9628 iteration 9629 iteration 9630 iteration 9631 iteration 9632 iteration 9633 iteration 9634 iteration 9635 iteration 9636 iteration 9637 iteration 9638 iteration 9639 iteration 9640 iteration 9641 iteration 9642 iteration 9643 iteration 9644 iteration 9645 iteration 9646 iteration 9647 iteration 9648 iteration 9649 iteration 9650 iteration 9651 iteration 9652 iteration 9653 iteration 9654 iteration 9655 iteration 9656 iteration 9657 iteration 9658 iteration 9659 iteration 9660 iteration 9661 iteration 9662 iteration 9663 iteration 9664 iteration 9665 iteration 9666 iteration 9667 iteration 9668 iteration 9669 iteration 9670 iteration 9671 iteration 9672 iteration 9673 iteration 9674 iteration 9675 iteration 9676 iteration 9677 iteration 9678 iteration 9679 iteration 9680 iteration 9681 iteration 9682 iteration 9683 iteration 9684 iteration 9685 iteration 9686 iteration 9687 iteration 9688 iteration 9689 iteration 9690 iteration 9691 iteration 9692 iteration 9693 iteration 9694 iteration 9695 iteration 9696 iteration 9697 iteration 9698 iteration 9699 compaction iteration 9700 iteration 9701 iteration 9702 iteration 9703 iteration 9704 iteration 9705 iteration 9706 iteration 9707 iteration 9708 iteration 9709 iteration 9710 iteration 9711 iteration 9712 iteration 9713 iteration 9714 iteration 9715 iteration 9716 iteration 9717 iteration 9718 iteration 9719 iteration 9720 iteration 9721 iteration 9722 iteration 9723 iteration 9724 iteration 9725 iteration 9726 iteration 9727 iteration 9728 iteration 9729 iteration 9730 iteration 9731 iteration 9732 iteration 9733 iteration 9734 iteration 9735 iteration 9736 iteration 9737 iteration 9738 iteration 9739 iteration 9740 iteration 9741 iteration 9742 iteration 9743 iteration 9744 iteration 9745 iteration 9746 iteration 9747 iteration 9748 iteration 9749 iteration 9750 iteration 9751 iteration 9752 iteration 9753 iteration 9754 iteration 9755 iteration 9756 iteration 9757 iteration 9758 iteration 9759 iteration 9760 iteration 9761 iteration 9762 iteration 9763 iteration 9764 iteration 9765 iteration 9766 iteration 9767 iteration 9768 iteration 9769 iteration 9770 iteration 9771 iteration 9772 iteration 9773 iteration 9774 iteration 9775 iteration 9776 iteration 9777 iteration 9778 iteration 9779 iteration 9780 iteration 9781 iteration 9782 iteration 9783 iteration 9784 iteration 9785 iteration 9786 iteration 9787 iteration 9788 iteration 9789 iteration 9790 iteration 9791 iteration 9792 iteration 9793 iteration 9794 iteration 9795 iteration 9796 iteration 9797 iteration 9798 iteration 9799 compaction garbage collect iteration 9800 iteration 9801 iteration 9802 iteration 9803 iteration 9804 iteration 9805 iteration 9806 iteration 9807 iteration 9808 iteration 9809 iteration 9810 iteration 9811 iteration 9812 iteration 9813 iteration 9814 iteration 9815 iteration 9816 iteration 9817 iteration 9818 iteration 9819 iteration 9820 iteration 9821 iteration 9822 iteration 9823 iteration 9824 iteration 9825 iteration 9826 iteration 9827 iteration 9828 iteration 9829 iteration 9830 iteration 9831 iteration 9832 iteration 9833 iteration 9834 iteration 9835 iteration 9836 iteration 9837 iteration 9838 iteration 9839 iteration 9840 iteration 9841 iteration 9842 iteration 9843 iteration 9844 iteration 9845 iteration 9846 iteration 9847 iteration 9848 iteration 9849 iteration 9850 iteration 9851 iteration 9852 iteration 9853 iteration 9854 iteration 9855 iteration 9856 iteration 9857 iteration 9858 iteration 9859 iteration 9860 iteration 9861 iteration 9862 iteration 9863 iteration 9864 iteration 9865 iteration 9866 iteration 9867 iteration 9868 iteration 9869 iteration 9870 iteration 9871 iteration 9872 iteration 9873 iteration 9874 iteration 9875 iteration 9876 iteration 9877 iteration 9878 iteration 9879 iteration 9880 iteration 9881 iteration 9882 iteration 9883 iteration 9884 iteration 9885 iteration 9886 iteration 9887 iteration 9888 iteration 9889 iteration 9890 iteration 9891 iteration 9892 iteration 9893 iteration 9894 iteration 9895 iteration 9896 iteration 9897 iteration 9898 iteration 9899 compaction iteration 9900 iteration 9901 iteration 9902 iteration 9903 iteration 9904 iteration 9905 iteration 9906 iteration 9907 iteration 9908 iteration 9909 iteration 9910 iteration 9911 iteration 9912 iteration 9913 iteration 9914 iteration 9915 iteration 9916 iteration 9917 iteration 9918 iteration 9919 iteration 9920 iteration 9921 iteration 9922 iteration 9923 iteration 9924 iteration 9925 iteration 9926 iteration 9927 iteration 9928 iteration 9929 iteration 9930 iteration 9931 iteration 9932 iteration 9933 iteration 9934 iteration 9935 iteration 9936 iteration 9937 iteration 9938 iteration 9939 iteration 9940 iteration 9941 iteration 9942 iteration 9943 iteration 9944 iteration 9945 iteration 9946 iteration 9947 iteration 9948 iteration 9949 iteration 9950 iteration 9951 iteration 9952 iteration 9953 iteration 9954 iteration 9955 iteration 9956 iteration 9957 iteration 9958 iteration 9959 iteration 9960 iteration 9961 iteration 9962 iteration 9963 iteration 9964 iteration 9965 iteration 9966 iteration 9967 iteration 9968 iteration 9969 iteration 9970 iteration 9971 iteration 9972 iteration 9973 iteration 9974 iteration 9975 iteration 9976 iteration 9977 iteration 9978 iteration 9979 iteration 9980 iteration 9981 iteration 9982 iteration 9983 iteration 9984 iteration 9985 iteration 9986 iteration 9987 iteration 9988 iteration 9989 iteration 9990 iteration 9991 iteration 9992 iteration 9993 iteration 9994 iteration 9995 iteration 9996 iteration 9997 iteration 9998 iteration 9999 compaction garbage collect {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:0] NODATA} BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:1] OK Size# 12 FullDataSize# 12 BufferData# aGVsbG8sIHdvcmxk} BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:1] OK Size# 12 FullDataSize# 12 BufferData# aGVsbG8sIHdvcmxk} BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:3] OK Size# 0 FullDataSize# 12 BufferData# } BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:3] OK Size# 0 FullDataSize# 12 BufferData# } BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:1] OK Size# 12 FullDataSize# 12 BufferData# aGVsbG8sIHdvcmxk} BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:0] NODATA} BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:0] NODATA} BlockedGeneration# 0} rssOnBegin# 748195840 748195840 -> 748195840 2025-06-30T09:21:55.507247Z 1 00h01m00.010512s :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 269877761 Duration# 0.245709s 2025-06-30T09:22:26.673060Z 1 00h02m00.060512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 CTR 4000078 2025-06-30T09:23:32.526786Z 11 00h01m00.011024s :BS_NODE ERROR: {NW19@node_warden_group.cpp:212} error while parsing group GroupId# 2181038080 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "tenant key" MainKey.Version# 1 GroupKeyNonce# 2181038080 2025-06-30T09:23:32.710007Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:0:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:0:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:0:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-30T09:23:32.774243Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:0:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:0:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:0:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-30T09:23:32.774264Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:1:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:1:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:1:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-30T09:23:32.774270Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:2:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:2:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:2:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-30T09:23:32.774274Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:3:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:3:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:3:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-30T09:23:32.774279Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:4:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:4:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:4:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-30T09:23:32.774283Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:5:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:5:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:5:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-30T09:23:32.774288Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:6:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:6:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:6:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-30T09:23:32.774292Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:7:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:7:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:7:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-30T09:23:32.774296Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:8:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:8:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:8:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-30T09:23:32.774301Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:9:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:9:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:9:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::DropIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:22:22.655026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:22.655057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:22.655062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:22.655069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:22.655085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:22.655089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:22.655099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:22.655114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:22.655252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:22.655325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:22.668334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:22:22.668358Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:22.671759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:22.671820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:22.671867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:22.673255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:22.673330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:22.673435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.673520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:22.674147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:22.674199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:22.674489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:22.674498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:22.674522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:22.674535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:22.674542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:22.674563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.675895Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:22:22.696584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:22.696677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.696754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:22.696763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:22.696816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:22.696832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:22.697667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.697715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:22.697789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.697800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:22.697806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:22.697812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:22.698300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.698311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:22.698316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:22.698685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.698696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:22.698703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:22.698722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:22.699493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:22.699910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:22.699955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:22.700175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:22.700206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:22:22.700222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:22.700291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:22:22.700299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:22.700336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:22:22.700349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:22:22.700761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:22.700770Z node 1 :FLAT_TX_SCHEMESHARD ... 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:23:31.711270Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-30T09:23:31.711276Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 18446744073709551615 2025-06-30T09:23:31.711282Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 5 2025-06-30T09:23:31.711408Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:23:31.711420Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:23:31.711425Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-30T09:23:31.711429Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 8], version: 18446744073709551615 2025-06-30T09:23:31.711434Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 3 2025-06-30T09:23:31.711627Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:23:31.711642Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:23:31.711647Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-30T09:23:31.711652Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2025-06-30T09:23:31.711657Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:23:31.712031Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:23:31.712046Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:23:31.712051Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-30T09:23:31.712430Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-30T09:23:31.712441Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 105:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:23:31.712513Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 4 2025-06-30T09:23:31.712544Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 2/3 2025-06-30T09:23:31.712550Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 105 ready parts: 2/3 2025-06-30T09:23:31.712554Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 2/3 2025-06-30T09:23:31.712557Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 105 ready parts: 2/3 2025-06-30T09:23:31.712562Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 105, ready parts: 2/3, is published: false 2025-06-30T09:23:31.712633Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:23:31.712644Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:23:31.712649Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-30T09:23:31.712692Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:23:31.712703Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-30T09:23:31.712706Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-30T09:23:31.712711Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 18446744073709551615 2025-06-30T09:23:31.712716Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 4 2025-06-30T09:23:31.712730Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 105, ready parts: 2/3, is published: true 2025-06-30T09:23:31.712959Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 105:2, at schemeshard: 72057594046678944 2025-06-30T09:23:31.712968Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 105:2 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:23:31.713009Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-06-30T09:23:31.713029Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:2 progress is 3/3 2025-06-30T09:23:31.713033Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-06-30T09:23:31.713038Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:2 progress is 3/3 2025-06-30T09:23:31.713042Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-06-30T09:23:31.713047Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 105, ready parts: 3/3, is published: true 2025-06-30T09:23:31.713059Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:420:2375] message: TxId: 105 2025-06-30T09:23:31.713065Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-06-30T09:23:31.713072Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:0 2025-06-30T09:23:31.713077Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 105:0 2025-06-30T09:23:31.713096Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-30T09:23:31.713102Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:1 2025-06-30T09:23:31.713106Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 105:1 2025-06-30T09:23:31.713112Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 2 2025-06-30T09:23:31.713119Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:2 2025-06-30T09:23:31.713122Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 105:2 2025-06-30T09:23:31.713130Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-06-30T09:23:31.713255Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:23:31.713572Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:23:31.713593Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:23:31.713600Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:23:31.714040Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:23:31.714059Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-30T09:23:31.714095Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-30T09:23:31.714104Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [3:947:2868] TestWaitNotification: OK eventTxId 105 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::WalleRequestDuringRollingRestart [GOOD] Test command err: 2025-06-30T09:23:21.890283Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:21.891149Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:21.893823Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:21.893915Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:21.894391Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:21.894452Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:21.894520Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:21.894721Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:21.894794Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:21.894910Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:21.897484Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:21.897519Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:21.897554Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:21.897602Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:21.931880Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:21.969050Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:21.969199Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:21.971262Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:21.971508Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:21.971518Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:21.971531Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:21.971536Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:21.971562Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:21.971595Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:21.971983Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:21.977100Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 4 Path: "/1/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 5 Path: "/1/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 6 Path: "/1/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 7 Path: "/1/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 8 Path: "/2/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 9 Path: "/2/pdisk-9.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 10 Path: "/2/pdisk-10.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 11 Path: "/2/pdisk-11.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 12 Path: "/3/pdisk-12.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 13 Path: "/3/pdisk-13.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 14 Path: "/3/pdisk-14.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 15 Path: "/3/pdisk-15.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 16 Path: "/4/pdisk-16.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 17 Path: "/4/pdisk-17.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 18 Path: "/4/pdisk-18.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 19 Path: "/4/pdisk-19.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 20 Path: "/5/pdisk-20.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 21 Path: "/5/pdisk-21.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 22 Path: "/5/pdisk-22.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 23 Path: "/5/pdisk-23.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 24 Path: "/6/pdisk-24.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 25 Path: "/6/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 26 Path: "/6/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 27 Path: "/6/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 28 Path: "/7/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 29 Path: "/7/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 30 Path: "/7/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 31 Path: "/7/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 32 Path: "/8/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 33 Path: "/8/pdisk-33.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 34 Path: "/8/pdisk-34.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 35 Path: "/8/pdisk-35.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 9 PDiskId: 36 Path: "/9/pdisk-36.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 9 PDiskId: 37 Path: "/9/pdisk-37.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 9 PDiskId: 38 Path: "/9/pdisk-38.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 9 PDiskId: 39 Path: "/9/pdisk-39.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 10 PDiskId: 40 Path: "/10/pdisk-40.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 10 PDiskId: 41 Path: "/10/pdisk-41.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 10 PDiskId: 42 Path: "/10/pdisk-42.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 10 PDiskId: 43 Path: "/10/pdisk-43.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 11 PDiskId: 44 Path: "/11/pdisk-44.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 11 PDiskId: 45 Path: "/11/pdisk-45.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 11 PDiskId: 46 Path: "/11/pdisk-46.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 11 PDiskId: 47 Path: "/11/pdisk-47.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 12 PDiskId: 48 Path: "/12/pdisk-48.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 12 PDiskId: 49 Path: "/12/pdisk-49.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 12 PDiskId: 50 Path: "/12/pdisk-50.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 12 PDiskId: 51 Path: "/12/pdisk-51.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 13 PDiskId: 52 Path: "/13/pdisk-52.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 13 PDiskId: 53 Path: "/13/pdisk-53.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 13 PDiskId: 54 Path: "/13/pdisk-54.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 13 PDiskId: 55 Path: "/13/pdisk-55.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 14 PDiskId: 56 Path: "/14/pdisk-56.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 14 PDiskId: 57 Path: "/14/pdisk-57.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 14 PDiskId: 58 Path: "/14/pdisk-58.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 14 PDiskId: 59 Path: "/14/pdisk-59.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 15 PDiskId: 60 Path: "/15/pdisk-60.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 15 PDiskId: 61 Path: "/15/pdisk-61.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 15 PDiskId: 62 Path: "/15/pdisk-62.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 15 PDiskId: 63 Path: "/15/pdisk-63.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 16 PDiskId: 64 Path: "/16/pdisk-64.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 16 PDiskId: 65 Path: "/16/pdisk-65.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 16 PDiskId: 66 Path: "/16/pdisk-66.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 16 PDiskId: 67 Path: "/16/pdisk-67.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 17 PDiskId: 68 Path: "/17/pdisk-68.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 17 PDiskId: 69 Path: "/17/pdisk-69.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 17 PDiskId: 70 Path: "/17/pdisk-70.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 17 PDiskId: 71 Path: "/17/pdisk-71.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 18 PDiskId: 72 Path: "/18/pdisk-72.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 18 PDiskId: 73 Path: "/18/pdisk-73.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 18 PDiskId: 74 Path: "/18/pdisk-74.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 18 PDiskId: 75 Path: "/18/pdisk-75.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 19 PDiskId: 76 Path: "/19/pdisk-76.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 19 PDiskId: 77 Path: "/19/pdisk-77.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 19 PDiskId: 78 Path: "/19/pdisk-78.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 19 PDiskId: 79 Path: "/19/pdisk-79.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 20 PDiskId: 80 Path: "/20/pdisk-80.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 20 PDiskId: 81 Path: "/20/pdisk-81.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 20 PDiskId: 82 Path: "/20/pdisk-82.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 20 PDiskId: 83 Path: "/20/pdisk-83.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 21 PDiskId: 84 Path: "/21/pdisk-84.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 21 PDiskId: 85 Path: "/21/pdisk-85.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 21 PDiskId: 86 Path: "/21/pdisk-86.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 21 PDiskId: 87 Path: "/21/pdisk-87.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 22 PDiskId: 88 Path: "/22/pdisk-88.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 22 PDiskId: 89 Path: "/22/pdisk-89.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 22 PDiskId: 90 Path: "/22/pdisk-90.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 22 PDiskId: 91 Path: "/22/pdisk-91.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 23 PDiskId: 92 Path: "/23/pdisk-92.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 23 PDiskId: 93 Path: "/23/pdisk-93.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 23 PDiskId: 94 Path: "/23/pdisk-94.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 23 PDiskId: 95 Path: "/23/pdisk-95.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 24 PDiskId: 96 Path: "/24/pdisk-96.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 24 PDiskId: 97 Path: "/24/pdisk-97.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 24 PDiskId: 98 Path: "/24/pdisk-98.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 24 PDiskId: 99 Path: "/24/pdisk-99.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 } VSl ... ser-p-1" Action { Type: RESTART_SERVICES Host: "33" Services: "storage" Duration: 60000000 } Deadline: 180028000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 33 InterconnectPort: 12001 } } } } 2025-06-30T09:23:30.581539Z node 33 :CMS DEBUG: cms.cpp:1063: Schedule cleanup at 1970-01-01T00:05:00.028000Z 2025-06-30T09:23:30.583287Z node 33 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-1 2025-06-30T09:23:30.583308Z node 33 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-30T09:23:30.583319Z node 33 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-30T09:23:30.583331Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-1, reason# explicit remove 2025-06-30T09:23:30.605387Z node 33 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-30T09:23:30.605460Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-1" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-30T09:23:30.605588Z node 33 :CMS INFO: walle_create_task_adapter.cpp:30: Processing Wall-E request: TaskId: "task-1" Type: "automated" Issuer: "UT" Action: "reboot" Hosts: "34" DryRun: false 2025-06-30T09:23:30.616952Z node 33 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:30.616995Z node 33 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:30.617010Z node 33 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:30.638601Z node 33 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:30.638651Z node 33 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:30.638667Z node 33 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:30.638793Z node 33 :CMS INFO: cms.cpp:347: Check request: User: "Wall-E" Actions { Type: REBOOT_HOST Host: "34" Duration: 18446744073709551615 } Schedule: true DryRun: false Priority: 20 2025-06-30T09:23:30.638801Z node 33 :CMS DEBUG: cms.cpp:379: Checking action: Type: REBOOT_HOST Host: "34" Duration: 18446744073709551615 2025-06-30T09:23:30.638810Z node 33 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 34, with state: Locked, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-30T09:23:30.638818Z node 33 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '34': node state: 'Locked') 2025-06-30T09:23:30.638837Z node 33 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:30.638887Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# Wall-E-r-2, owner# Wall-E, order# 2, priority# 20, body# User: "Wall-E" Actions { Type: REBOOT_HOST Host: "34" Duration: 18446744073709551615 Issue { Type: GENERIC Message: "Cannot lock node \'34\': node state: \'Locked\'" } } PartialPermissionAllowed: false Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: 20 2025-06-30T09:23:30.649842Z node 33 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:30.649938Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "Wall-E" Actions { Type: REBOOT_HOST Host: "34" Duration: 18446744073709551615 } Schedule: true DryRun: false Priority: 20 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'34\': node state: \'Locked\'" } RequestId: "Wall-E-r-2" Deadline: 420232512 } 2025-06-30T09:23:30.650006Z node 33 :CMS DEBUG: cms_tx_store_walle_task.cpp:23: TTxStoreWalleTask Execute 2025-06-30T09:23:30.650033Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store wall-e task: id# task-1, requestId# Wall-E-r-2 2025-06-30T09:23:30.671210Z node 33 :CMS DEBUG: cms_tx_store_walle_task.cpp:53: TTxStoreWalleTask Complete 2025-06-30T09:23:30.671251Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvStoreWalleTask { Task: { TaskId: task-1 RequestId: Wall-E-r-2 Owner: Permissions: [] HasSingleCompositeActionGroup: 0 CreateTime: 1970-01-01T00:00:00.000000Z LastRefreshTime: 1970-01-01T00:00:00.000000Z } }, response# NKikimr::NCms::TEvCms::TEvWalleTaskStored { TaskId: task-1 } 2025-06-30T09:23:30.671322Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [Wall-E adapter] Reply: request# NKikimr::NCms::TEvCms::TEvWalleCreateTaskRequest { TaskId: "task-1" Type: "automated" Issuer: "UT" Action: "reboot" Hosts: "34" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvWalleCreateTaskResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'34\': node state: \'Locked\'" } TaskId: "task-1" Hosts: "34" } 2025-06-30T09:23:30.745276Z node 33 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:30.745331Z node 33 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:30.745351Z node 33 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:30.745532Z node 33 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "34" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (33) has temporary lock, VDisk [0:1:0:1:0] (::1:/34/pdisk-34.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-30T09:23:30.745549Z node 33 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "34" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (33) has temporary lock, VDisk [0:1:0:1:0] (::1:/34/pdisk-34.data) is locked by this request. Down: " } 2025-06-30T09:23:30.745562Z node 33 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 34, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:30.745605Z node 33 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:30.745630Z node 33 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-2, requestId# user-r-1, owner# user 2025-06-30T09:23:30.745639Z node 33 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12002 (34) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:30.745654Z node 33 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:30.745735Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:03:00.335536Z, action# Type: RESTART_SERVICES Host: "34" Services: "storage" Duration: 60000000 2025-06-30T09:23:30.745749Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-30T09:23:30.756919Z node 33 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:30.757041Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-2" Action { Type: RESTART_SERVICES Host: "34" Services: "storage" Duration: 60000000 } Deadline: 180335536 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 34 InterconnectPort: 12002 } } } } 2025-06-30T09:23:30.757213Z node 33 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-2 2025-06-30T09:23:30.757227Z node 33 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-30T09:23:30.757246Z node 33 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-30T09:23:30.757275Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-2, reason# explicit remove 2025-06-30T09:23:30.768381Z node 33 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-30T09:23:30.768465Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-30T09:23:30.768630Z node 33 :CMS INFO: walle_check_task_adapter.cpp:29: Processing Wall-E request: TaskId: "task-1" 2025-06-30T09:23:30.780333Z node 33 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:30.780393Z node 33 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:30.780411Z node 33 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:30.780591Z node 33 :CMS INFO: cms.cpp:347: Check request: User: "Wall-E" Actions { Type: REBOOT_HOST Host: "34" Duration: 18446744073709551615 Issue { Type: GENERIC Message: "Cannot lock node \'34\': node state: \'Locked\'" } } PartialPermissionAllowed: false Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: 20 2025-06-30T09:23:30.780603Z node 33 :CMS DEBUG: cms.cpp:379: Checking action: Type: REBOOT_HOST Host: "34" Duration: 18446744073709551615 Issue { Type: GENERIC Message: "Cannot lock node \'34\': node state: \'Locked\'" } 2025-06-30T09:23:30.780615Z node 33 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 34, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:30.780658Z node 33 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:30.780684Z node 33 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# Wall-E-p-3, requestId# Wall-E-r-2, owner# Wall-E 2025-06-30T09:23:30.780692Z node 33 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12002 (34) (permission Wall-E-p-3 until 586524-01-19T08:01:49Z) 2025-06-30T09:23:30.780707Z node 33 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:30.780754Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# Wall-E-p-3, validity# 586524-01-19T08:01:49.551615Z, action# Type: REBOOT_HOST Host: "34" Duration: 18446744073709551615 2025-06-30T09:23:30.780766Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# Wall-E-r-2, owner# Wall-E 2025-06-30T09:23:30.791858Z node 33 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:30.791960Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "Wall-E" RequestId: "Wall-E-r-2" }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "Wall-E-p-3" Action { Type: REBOOT_HOST Host: "34" Duration: 18446744073709551615 } Deadline: 18446744073709551615 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 34 InterconnectPort: 12002 } } } } 2025-06-30T09:23:30.792018Z node 33 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [Wall-E adapter] Reply: request# NKikimr::NCms::TEvCms::TEvWalleCheckTaskRequest { TaskId: "task-1" }, response# NKikimr::NCms::TEvCms::TEvWalleCheckTaskResponse { Status { Code: ALLOW } Task { TaskId: "task-1" Hosts: "34" } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::AllVDisksEvictionInRack [GOOD] Test command err: 2025-06-30T09:23:21.289679Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:21.290291Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:21.293676Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:21.293801Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:21.294279Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:21.294398Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:21.295146Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:21.295181Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:21.295230Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:21.295340Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:21.296557Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:21.296608Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:21.296641Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:21.296672Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:21.327935Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:21.350380Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:21.350510Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:21.352240Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:21.352553Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:21.352566Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:21.352577Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:21.352582Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:21.352599Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:21.352639Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:21.352665Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:21.355750Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-30T09:23:21.392179Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:21.392251Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:21.435592Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:21.435637Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:21.435732Z node 1 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:21.436066Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "stor ... andle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 27, response# PDiskStateInfo { PDiskId: 27 CreateTime: 0 ChangeTime: 0 Path: "/27/pdisk-27.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180026 2025-06-30T09:23:31.660598Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 29, response# PDiskStateInfo { PDiskId: 29 CreateTime: 0 ChangeTime: 0 Path: "/29/pdisk-29.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180026 2025-06-30T09:23:31.660609Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 30, response# PDiskStateInfo { PDiskId: 30 CreateTime: 0 ChangeTime: 0 Path: "/30/pdisk-30.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180026 2025-06-30T09:23:31.660622Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 32, response# PDiskStateInfo { PDiskId: 32 CreateTime: 0 ChangeTime: 0 Path: "/32/pdisk-32.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180026 2025-06-30T09:23:31.660634Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 28, response# PDiskStateInfo { PDiskId: 28 CreateTime: 0 ChangeTime: 0 Path: "/28/pdisk-28.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180026 2025-06-30T09:23:31.660645Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 31, response# PDiskStateInfo { PDiskId: 31 CreateTime: 0 ChangeTime: 0 Path: "/31/pdisk-31.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180026 2025-06-30T09:23:31.660662Z node 25 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-30T09:23:31.660709Z node 25 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 26:26, status# ACTIVE, required status# FAULTY, reason# Forced status, dry run# 0 2025-06-30T09:23:31.660719Z node 25 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 25:25, status# ACTIVE, required status# FAULTY, reason# Forced status, dry run# 0 2025-06-30T09:23:31.660726Z node 25 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 2 2025-06-30T09:23:31.660770Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-06-30T09:23:31.660855Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-06-30T09:23:31.660881Z node 25 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Status { Success: true } Success: true, cookie# 1 2025-06-30T09:23:31.660884Z node 25 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 25:25 2025-06-30T09:23:31.660887Z node 25 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 26:26 2025-06-30T09:23:31.672192Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:27: TTxLogAndSend Complete 2025-06-30T09:23:31.672230Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:27: TTxLogAndSend Complete 2025-06-30T09:23:31.694125Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:31.694178Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:31.694204Z node 25 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:03:00Z 2025-06-30T09:23:31.694319Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 25 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-30T09:23:31.694328Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 25 has not yet been completed" } 2025-06-30T09:23:31.694338Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 25, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:31.694345Z node 25 :CMS DEBUG: cms.cpp:730: Ring: 0; State: Ok 2025-06-30T09:23:31.694348Z node 25 :CMS DEBUG: cms.cpp:730: Ring: 1; State: Ok 2025-06-30T09:23:31.694350Z node 25 :CMS DEBUG: cms.cpp:730: Ring: 2; State: Ok 2025-06-30T09:23:31.694353Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:31.694373Z node 25 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-30T09:23:31.694379Z node 25 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:13:00Z) 2025-06-30T09:23:31.694390Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:31.694437Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:13:00.126512Z, action# Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 2025-06-30T09:23:31.694456Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-30T09:23:31.705393Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:31.705486Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 } Deadline: 780126512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 25 InterconnectPort: 12001 } } } } 2025-06-30T09:23:31.705521Z node 25 :CMS DEBUG: cms.cpp:1063: Schedule cleanup at 1970-01-01T00:33:00.126512Z 2025-06-30T09:23:31.717229Z node 25 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:13:00Z) 2025-06-30T09:23:31.717341Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:31.717367Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:31.717386Z node 25 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:03:00Z 2025-06-30T09:23:31.717549Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 26 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-30T09:23:31.717561Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 26 has not yet been completed" } 2025-06-30T09:23:31.717574Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-30T09:23:31.717582Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:31.717611Z node 25 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-06-30T09:23:31.717618Z node 25 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12002 (26) (permission user-p-2 until 1970-01-01T00:13:00Z) 2025-06-30T09:23:31.717631Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:31.717678Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:13:00.228024Z, action# Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 2025-06-30T09:23:31.717716Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-2, owner# user, order# 2, priority# 0, body# User: "user" PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-30T09:23:31.728731Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:31.728836Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-2" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-2" Permissions { Id: "user-p-2" Action { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 } Deadline: 780228024 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } 2025-06-30T09:23:31.728967Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-1 2025-06-30T09:23:31.728975Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-30T09:23:31.728988Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-30T09:23:31.729013Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reset host markers: host# 25 2025-06-30T09:23:31.729032Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, reason# permission user-p-1 was removed 2025-06-30T09:23:31.729036Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-1, reason# explicit remove 2025-06-30T09:23:31.739953Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-30T09:23:31.740032Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-1" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-30T09:23:31.740181Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-2 2025-06-30T09:23:31.740192Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-30T09:23:31.740206Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-30T09:23:31.740234Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reset host markers: host# 26 2025-06-30T09:23:31.740268Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-2, reason# permission user-p-2 was removed 2025-06-30T09:23:31.740275Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-2, reason# explicit remove 2025-06-30T09:23:31.751175Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-30T09:23:31.751247Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } >> BackupRestore::TestAllPrimitiveTypes-DATETIME64 >> KqpOlapScheme::SetNotDefaultColumnFamilyForPrimaryKey [GOOD] >> BackupRestore::PrefixedVectorIndex [GOOD] >> BackupPathTest::ExportUnexistingCommonSourcePath >> EncryptedBackupParamsValidationTest::NoSourcePrefixEncrypted >> BackupRestore::TestAllPrimitiveTypes-DATETIME64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INTERVAL64 >> BackupRestore::RestoreReplicationThatDoesNotUseSecret >> BackupPathTest::ExportUnexistingCommonSourcePath [GOOD] >> EncryptedBackupParamsValidationTest::NoSourcePrefixEncrypted [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME >> BackupRestore::TestAllPrimitiveTypes-INTERVAL64 [GOOD] >> BackupRestore::RestoreReplicationThatDoesNotUseSecret [GOOD] >> BackupPathTest::FilterByPathFailsWhenNoSchemaMapping >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPathSpecified >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME [GOOD] >> BackupRestore::TestAllPrimitiveTypes-STRING >> BackupRestore::ReplicasAreNotBackedUp >> BackupPathTest::FilterByPathFailsWhenNoSchemaMapping [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE32 >> BackupRestoreS3::TestAllPrimitiveTypes-DATE32 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME64 >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPathSpecified [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DYNUMBER >> BackupRestore::TestAllPrimitiveTypes-STRING [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON >> BackupPathTest::OnlyOneEmptyDirectory >> BackupRestore::ReplicasAreNotBackedUp [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DYNUMBER [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ResolveTableError [GOOD] Test command err: 2025-06-30T09:23:20.471066Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:20.471410Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004747/r3tmp/tmp7KAIyd/pdisk_1.dat 2025-06-30T09:23:20.590129Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:20.695239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:20.781151Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:20.781219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:20.782175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:20.782209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:20.794680Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:23:20.794960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:20.795124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:21.105908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:21.786142Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:198: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Bootstrap done, become ReadyState 2025-06-30T09:23:21.786229Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:608: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Executing physical tx, type: 2, stages: 1 2025-06-30T09:23:21.786246Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-30T09:23:21.786276Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:623: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Got request, become WaitResolveState 2025-06-30T09:23:21.786336Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715658. Resolved key sets: 1 2025-06-30T09:23:21.786380Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715658. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-30T09:23:21.786405Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2035: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (Iterator (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3))))) )))) ) 2025-06-30T09:23:21.786441Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:1512: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Stage [0,0] create compute task: 1 2025-06-30T09:23:21.786484Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Database not set, use /Root 2025-06-30T09:23:21.786490Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-30T09:23:21.786602Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Collect channels updates for task: 1 at actor [1:1471:2898] 2025-06-30T09:23:21.786611Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Sending channels info to compute actor: [1:1471:2898], channels: 0 2025-06-30T09:23:21.786629Z node 1 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-30T09:23:21.786634Z node 1 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2809: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Updating channels after the creation of compute actors 2025-06-30T09:23:21.786637Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Collect channels updates for task: 1 at actor [1:1471:2898] 2025-06-30T09:23:21.786639Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Sending channels info to compute actor: [1:1471:2898], channels: 0 2025-06-30T09:23:21.786651Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Waiting for: CA [1:1471:2898], 2025-06-30T09:23:21.786656Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [1:1471:2898], 2025-06-30T09:23:21.786659Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-30T09:23:21.788377Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: ExecuteState, got execution state from compute actor: [1:1471:2898], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-30T09:23:21.788410Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Waiting for: CA [1:1471:2898], 2025-06-30T09:23:21.788416Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [1:1471:2898], 2025-06-30T09:23:21.788757Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: ExecuteState, got execution state from compute actor: [1:1471:2898], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 463 Tasks { TaskId: 1 CpuTimeUs: 278 FinishTimeMs: 1751275401788 EgressBytes: 30 EgressRows: 3 ComputeCpuTimeUs: 24 BuildCpuTimeUs: 254 HostName: "ghrun-x4qzxsyz2q" NodeId: 1 CreateTimeMs: 1751275401786 UpdateTimeMs: 1751275401788 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:21.788782Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Compute actor has finished execution: [1:1471:2898] 2025-06-30T09:23:21.788795Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:276: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Send Commit to BufferActor=[1:1467:2898] 2025-06-30T09:23:21.788804Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Resource usage for last stat interval: ComputeTime: 0.000463s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-30T09:23:21.844809Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. terminate execution. 2025-06-30T09:23:21.844842Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2203: ActorId: [1:1468:2898] TxId: 281474976715658. Ctx: { TraceId: 01jz02bwgg7x6vsb497wch8vwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZlM2VlMGMtYjdmN2UyY2ItNzY4Zjg3Yy01ZGU5YWIxZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Terminate, become ZombieState 2025-06-30T09:23:21.855515Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:1487:2916], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[/Root/table-1]
: Error: LookupError, code: 2005 2025-06-30T09:23:21.856376Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YTY0MDAwNDQtYTM5ZmY2MzctMmU3YjdiYzgtMjQ4YTQxOGE=, ActorId: [1:1485:2914], ActorState: ExecuteState, TraceId: 01jz02bwkp80pj4wsca2h7cwyf, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ProposeResultLost_RwTx-UseSink [GOOD] Test command err: 2025-06-30T09:23:20.028101Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:20.028216Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:20.028267Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:23:20.028347Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:20.028393Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:20.028425Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004750/r3tmp/tmpok96X9/pdisk_1.dat 2025-06-30T09:23:20.143367Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:20.255805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:20.359454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:20.359509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:20.365137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:20.365178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:20.379104Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:23:20.379331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:20.379489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:20.658787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:21.274801Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1506:2912], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:21.274831Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1516:2917], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:21.274908Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:21.276325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:21.425428Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:21.425493Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:21.843908Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1520:2920], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:23:22.012124Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1658:2998] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:22.082407Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:198: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jz02bw1t93jekzszxsv8w5k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVlYzZjZDYtMjMwMWMyZGQtYmY1NGVmNzItZTdlYWEwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Bootstrap done, become ReadyState 2025-06-30T09:23:22.082512Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:608: ActorId: [1:1684:2910] TxId: 281474976715660. Ctx: { TraceId: 01jz02bw1t93jekzszxsv8w5k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVlYzZjZDYtMjMwMWMyZGQtYmY1NGVmNzItZTdlYWEwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 2, stages: 1 2025-06-30T09:23:22.082534Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-30T09:23:22.082576Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:623: ActorId: [1:1684:2910] TxId: 281474976715660. Ctx: { TraceId: 01jz02bw1t93jekzszxsv8w5k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVlYzZjZDYtMjMwMWMyZGQtYmY1NGVmNzItZTdlYWEwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got request, become WaitResolveState 2025-06-30T09:23:22.082665Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715660. Resolved key sets: 1 2025-06-30T09:23:22.082727Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715660. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-30T09:23:22.082790Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2035: ActorId: [1:1684:2910] TxId: 281474976715660. Ctx: { TraceId: 01jz02bw1t93jekzszxsv8w5k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVlYzZjZDYtMjMwMWMyZGQtYmY1NGVmNzItZTdlYWEwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (Iterator (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3))))) )))) ) 2025-06-30T09:23:22.082837Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:1512: ActorId: [1:1684:2910] TxId: 281474976715660. Ctx: { TraceId: 01jz02bw1t93jekzszxsv8w5k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVlYzZjZDYtMjMwMWMyZGQtYmY1NGVmNzItZTdlYWEwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] create compute task: 1 2025-06-30T09:23:22.082887Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz02bw1t93jekzszxsv8w5k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVlYzZjZDYtMjMwMWMyZGQtYmY1NGVmNzItZTdlYWEwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:22.082897Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715660. Ctx: { TraceId: 01jz02bw1t93jekzszxsv8w5k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVlYzZjZDYtMjMwMWMyZGQtYmY1NGVmNzItZTdlYWEwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-30T09:23:22.083061Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715660. Ctx: { TraceId: 01jz02bw1t93jekzszxsv8w5k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVlYzZjZDYtMjMwMWMyZGQtYmY1NGVmNzItZTdlYWEwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [1:1687:2910] 2025-06-30T09:23:22.083076Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715660. Ctx: { TraceId: 01jz02bw1t93jekzszxsv8w5k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVlYzZjZDYtMjMwMWMyZGQtYmY1NGVmNzItZTdlYWEwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [1:1687:2910], channels: 0 2025-06-30T09:23:22.083103Z node 1 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [1:1684:2910] TxId: 281474976715660. Ctx: { TraceId: 01jz02bw1t93jekzszxsv8w5k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVlYzZjZDYtMjMwMWMyZGQtYmY1NGVmNzItZTdlYWEwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-30T09:23:22.083110Z node 1 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2809: ActorId: [1:1684:2910] TxId: 281474976715660. Ctx: { TraceId: 01jz02bw1t93jekzszxsv8w5k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVlYzZjZDYtMjMwMWMyZGQtYmY1NGVmNzItZTdlYWEwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Updating channels after the creation of compute actors 2025-06-30T09:23:22.083115Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715660. Ctx: { TraceId: 01jz02bw1t93jekzszxsv8w5k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVlYzZjZDYtMjMwMWMyZGQtYmY1NGVmNzItZTdlYWEwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [1:1687:2910] 2025-06-30T09:23:22.08312 ... n/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CT 1, CA [3:1752:3053], 2025-06-30T09:23:24.987173Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:1752:3053], 2025-06-30T09:23:24.987313Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:791: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing task: 1 on compute actor: [4:1754:2435] 2025-06-30T09:23:24.987319Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [4:1754:2435] 2025-06-30T09:23:24.987326Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:838: TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Task: 1, output channelId: 1, dst task: 2, at actor [3:1752:3053] 2025-06-30T09:23:24.987332Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [4:1754:2435], channels: 1 2025-06-30T09:23:24.987337Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:1752:3053], channels: 1 2025-06-30T09:23:24.987368Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1754:2435], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-30T09:23:24.987374Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1754:2435], CA [3:1752:3053], 2025-06-30T09:23:24.987396Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [4:1754:2435], CA [3:1752:3053], 2025-06-30T09:23:24.987516Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1754:2435], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 140 Tasks { TaskId: 1 CpuTimeUs: 87 ComputeCpuTimeUs: 5 BuildCpuTimeUs: 82 HostName: "ghrun-x4qzxsyz2q" NodeId: 4 CreateTimeMs: 1751275404986 CurrentWaitInputTimeUs: 6 UpdateTimeMs: 1751275404987 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:24.987535Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1754:2435], CA [3:1752:3053], 2025-06-30T09:23:24.987541Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [4:1754:2435], CA [3:1752:3053], 2025-06-30T09:23:24.989135Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:391: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got result, channelId: 2, shardId: 0, inputIndex: 0, from: [3:1753:3053], finished: 0 2025-06-30T09:23:24.989168Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:394: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send ack to channelId: 2, seqNo: 1, to: [3:1753:3053] 2025-06-30T09:23:24.990596Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:391: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got result, channelId: 2, shardId: 0, inputIndex: 0, from: [3:1753:3053], finished: 1 2025-06-30T09:23:24.990615Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:394: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send ack to channelId: 2, seqNo: 2, to: [3:1753:3053] 2025-06-30T09:23:24.995704Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1752:3053], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 404 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 264 FinishTimeMs: 1751275404990 InputRows: 3 InputBytes: 12 OutputRows: 3 OutputBytes: 12 ResultRows: 3 ResultBytes: 12 ComputeCpuTimeUs: 55 BuildCpuTimeUs: 209 HostName: "ghrun-x4qzxsyz2q" NodeId: 3 CreateTimeMs: 1751275404986 UpdateTimeMs: 1751275404990 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:24.995760Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [3:1752:3053] 2025-06-30T09:23:24.995780Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1754:2435], 2025-06-30T09:23:24.995790Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [4:1754:2435], 2025-06-30T09:23:24.995932Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1754:2435], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 416 DurationUs: 7000 Tasks { TaskId: 1 CpuTimeUs: 133 FinishTimeMs: 1751275404995 OutputRows: 3 OutputBytes: 12 Tables { TablePath: "/Root/table-1" ReadRows: 3 ReadBytes: 24 AffectedPartitions: 4 } IngressRows: 3 ComputeCpuTimeUs: 51 BuildCpuTimeUs: 82 WaitInputTimeUs: 1653 HostName: "ghrun-x4qzxsyz2q" NodeId: 4 StartTimeMs: 1751275404988 CreateTimeMs: 1751275404986 UpdateTimeMs: 1751275404995 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:24.995944Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [4:1754:2435] 2025-06-30T09:23:24.995992Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-30T09:23:24.996004Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2203: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-06-30T09:23:24.996016Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [3:1744:3053] TxId: 281474976715663. Ctx: { TraceId: 01jz02bzmz1xtx3ab08tc8stcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZiNGJjNmYtYjNhNWU0MzMtYjYwM2Q1NzItODc1OGY5YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000820s ReadRows: 3 ReadBytes: 24 ru: 3 rate limiter was not found force flag: 1 { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 3 } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::TestProcessingQueue [GOOD] Test command err: 2025-06-30T09:23:21.259689Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:21.260263Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:21.267910Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:21.268011Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:21.268422Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:21.268534Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:21.269312Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:21.269354Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:21.269406Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:21.269534Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:21.271027Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:21.271111Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:21.271152Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:21.271208Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:21.305646Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:21.328297Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:21.328421Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:21.329696Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:21.329967Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:21.329974Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:21.329981Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:21.329984Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:21.329997Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:21.330027Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:21.330061Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:21.332425Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 4 Path: "/1/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 5 Path: "/1/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 6 Path: "/1/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 7 Path: "/1/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 8 Path: "/2/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 9 Path: "/2/pdisk-9.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 10 Path: "/2/pdisk-10.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 11 Path: "/2/pdisk-11.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 12 Path: "/3/pdisk-12.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 13 Path: "/3/pdisk-13.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 14 Path: "/3/pdisk-14.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 15 Path: "/3/pdisk-15.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 16 Path: "/4/pdisk-16.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 17 Path: "/4/pdisk-17.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 18 Path: "/4/pdisk-18.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 19 Path: "/4/pdisk-19.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 20 Path: "/5/pdisk-20.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 21 Path: "/5/pdisk-21.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 22 Path: "/5/pdisk-22.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 23 Path: "/5/pdisk-23.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 24 Path: "/6/pdisk-24.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 25 Path: "/6/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 26 Path: "/6/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 27 Path: "/6/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 28 Path: "/7/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 29 Path: "/7/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 30 Path: "/7/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 31 Path: "/7/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 32 Path: "/8/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 33 Path: "/8/pdisk-33.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 34 Path: "/8/pdisk-34.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 35 Path: "/8/pdisk-35.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 18 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 18 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 18 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 18 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 19 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 19 VSlotId: 1001 } GroupId: 13 GroupGe ... te for unknown PDisk 22:22 2025-06-30T09:23:25.227681Z node 17 :CMS ERROR: cluster_info.cpp:490: Cannot update state for unknown PDisk 23:23 2025-06-30T09:23:25.227685Z node 17 :CMS ERROR: cluster_info.cpp:490: Cannot update state for unknown PDisk 24:24 2025-06-30T09:23:25.227839Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:25.227894Z node 17 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:25.227962Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-30T09:23:25.227973Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 2025-06-30T09:23:25.227988Z node 17 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 17, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:25.228007Z node 17 :CMS DEBUG: cms.cpp:730: Ring: 0; State: Ok 2025-06-30T09:23:25.228011Z node 17 :CMS DEBUG: cms.cpp:730: Ring: 1; State: Ok 2025-06-30T09:23:25.228014Z node 17 :CMS DEBUG: cms.cpp:730: Ring: 2; State: Ok 2025-06-30T09:23:25.228019Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:25.228069Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 } Deadline: 180129000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 17 InterconnectPort: 12001 } } } } 2025-06-30T09:23:25.228096Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-30T09:23:25.228104Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 60000000 2025-06-30T09:23:25.228109Z node 17 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 18, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:25.228114Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:25.228138Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 60000000 } Deadline: 180129000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 18 InterconnectPort: 12002 } } } } 2025-06-30T09:23:25.228159Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-30T09:23:25.228165Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 2025-06-30T09:23:25.228170Z node 17 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 19, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:25.228175Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:25.228199Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 } Deadline: 180129000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 19 InterconnectPort: 12003 } } } } 2025-06-30T09:23:25.228214Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "20" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-30T09:23:25.228236Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "20" Services: "storage" Duration: 60000000 2025-06-30T09:23:25.228242Z node 17 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 20, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:25.228246Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:25.228266Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "20" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "20" Services: "storage" Duration: 60000000 } Deadline: 180129000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 20 InterconnectPort: 12004 } } } } 2025-06-30T09:23:25.228275Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "21" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-30T09:23:25.228279Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "21" Services: "storage" Duration: 60000000 2025-06-30T09:23:25.228282Z node 17 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 21, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:25.228287Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:25.228300Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "21" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "21" Services: "storage" Duration: 60000000 } Deadline: 180129000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 21 InterconnectPort: 12005 } } } } 2025-06-30T09:23:25.228310Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "22" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-30T09:23:25.228314Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "22" Services: "storage" Duration: 60000000 2025-06-30T09:23:25.228317Z node 17 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 22, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:25.228320Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:25.228332Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "22" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "22" Services: "storage" Duration: 60000000 } Deadline: 180129000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 22 InterconnectPort: 12006 } } } } 2025-06-30T09:23:25.228345Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "23" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-30T09:23:25.228352Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "23" Services: "storage" Duration: 60000000 2025-06-30T09:23:25.228357Z node 17 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 23, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:25.228361Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:25.228385Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "23" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "23" Services: "storage" Duration: 60000000 } Deadline: 180129000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 23 InterconnectPort: 12007 } } } } 2025-06-30T09:23:25.228402Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "24" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-30T09:23:25.228409Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "24" Services: "storage" Duration: 60000000 2025-06-30T09:23:25.228416Z node 17 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 24, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:25.228421Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:25.228444Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "24" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "24" Services: "storage" Duration: 60000000 } Deadline: 180129000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 24 InterconnectPort: 12008 } } } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::ManyPartitions_1 [GOOD] Test command err: Trying to start YDB, gRPC: 11690, MsgBus: 18870 2025-06-30T09:22:56.260968Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670467051107349:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:56.261006Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044e2/r3tmp/tmp3mAGIs/pdisk_1.dat 2025-06-30T09:22:56.332729Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11690, node 1 2025-06-30T09:22:56.369119Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:56.369147Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:56.369150Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:56.369200Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18870 2025-06-30T09:22:56.405920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:56.405956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:56.406876Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18870 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:56.501418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:56.507402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:22:56.515605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:56.552784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:22:56.587866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:22:56.602147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:56.875269Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670467051108902:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:56.875302Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:56.929935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:56.944628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:56.960919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:56.976263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:56.986807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.001007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.014689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:57.035303Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670471346076850:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.035332Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670471346076855:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.035339Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:57.036416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:57.041101Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670471346076857:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:22:57.131598Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670471346076908:3399] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:57.263192Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:22:57.315756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 27718, MsgBus: 3583 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044e2/r3tmp/tmpimd8Sa/pdisk_1.dat 2025-06-30T09:22:59.470542Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:22:59.472040Z node 2 :IMPORT WARN: schemesh ... hStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:30.699067Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 14441, MsgBus: 1034 2025-06-30T09:23:31.190017Z node 20 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[20:7521670616828850631:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:31.190043Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044e2/r3tmp/tmpzB3nnx/pdisk_1.dat 2025-06-30T09:23:31.204375Z node 20 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:31.204617Z node 20 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [20:7521670616828850611:2080] 1751275411189934 != 1751275411189937 TServer::EnableGrpc on GrpcPort 14441, node 20 2025-06-30T09:23:31.209963Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:31.209974Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:31.209975Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:31.210007Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1034 TClient is connected to server localhost:1034 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:31.294487Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:31.294519Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:31.294797Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:31.295454Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:31.305804Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:31.315932Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:31.337275Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:31.348890Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:31.505376Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7521670616828852203:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:31.505398Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:31.513436Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:31.568864Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:31.624736Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:31.679909Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:31.692203Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:31.705904Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:31.719863Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:31.736121Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7521670616828852861:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:31.736154Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:31.736172Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7521670616828852866:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:31.736885Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:31.739063Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [20:7521670616828852868:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:23:31.814855Z node 20 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [20:7521670616828852919:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:31.912330Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> BackupPathTest::OnlyOneEmptyDirectory [GOOD] >> BackupRestore::SkipEmptyDirsOnRestore >> BackupRestore::TestAllPrimitiveTypes-JSON [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON_DOCUMENT >> BackupPathTest::ExportRecursiveWithoutDestinationPrefix >> BackupRestore::SkipEmptyDirsOnRestore [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER [GOOD] >> BackupPathTest::ExportRecursiveWithoutDestinationPrefix [GOOD] >> BackupPathTest::ParallelBackupWholeDatabase >> BackupPathTest::ParallelBackupWholeDatabase [GOOD] >> BackupPathTest::ChecksumsForSchemaMappingFiles >> BackupPathTest::ChecksumsForSchemaMappingFiles [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpOlapScheme::SetNotDefaultColumnFamilyForPrimaryKey [GOOD] Test command err: Trying to start YDB, gRPC: 23563, MsgBus: 2892 2025-06-30T09:21:23.754137Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670070570520407:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:21:23.759382Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004789/r3tmp/tmpPFztAQ/pdisk_1.dat 2025-06-30T09:21:23.907191Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:23.911194Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670070570520224:2080] 1751275283746705 != 1751275283746708 TServer::EnableGrpc on GrpcPort 23563, node 1 2025-06-30T09:21:23.968352Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:21:23.968366Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:21:23.968369Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:21:23.968417Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:21:23.975019Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:21:23.975052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:21:23.981989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2892 TClient is connected to server localhost:2892 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:21:24.059866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:24.063199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:21:24.087593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:24.171090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:24.217972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:24.262252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:21:24.369726Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670074865489123:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:24.369758Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:24.477187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.490368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.510679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.530912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.543094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.604464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.630772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.654592Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670074865489780:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:24.654625Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:24.654763Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670074865489785:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:21:24.655799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:21:24.659210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:21:24.659315Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670074865489787:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:21:24.723317Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670074865489838:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:21:24.751236Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:21:24.928080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:21:24.950381Z node 1 :FLAT_TX_SCHEMESHARD WARN: s ... ON_LEVEL=1)) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:23:37.378035Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670645252469570:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:37.378053Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:37.381580Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:23:37.388168Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:23:37.388195Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:23:37.388237Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:23:37.388257Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:23:37.388275Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:23:37.388292Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:23:37.388313Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:23:37.388330Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:23:37.388348Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:23:37.388362Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:23:37.388374Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:23:37.388389Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:23:37.388746Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:23:37.388757Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:23:37.388766Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:23:37.388771Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:23:37.388783Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:23:37.388789Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:23:37.388797Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:23:37.388804Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:23:37.388809Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:23:37.388815Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:23:37.388829Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:23:37.388835Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:23:37.388847Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:23:37.388852Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:23:37.388861Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:23:37.388867Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:23:37.388872Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:23:37.388875Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:23:37.388878Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:23:37.388924Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:23:37.388930Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:23:37.388940Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:23:37.388946Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:23:37.434284Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521670645252469616:2297];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=124846021466688;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275417434;max=18446744073709551615;plan=0;src=[6:7521670645252469262:2143];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:23:37.435472Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:23:37.436171Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:23:37.438399Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670645252469685:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:37.438416Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:37.440487Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:23:37.443415Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::StateStorageLockedNodes [GOOD] Test command err: 2025-06-30T09:23:21.595093Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:21.597687Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:21.599001Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:21.599107Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:21.599571Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:21.599640Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:21.600757Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:21.601087Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:21.601130Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:21.601244Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:21.602787Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:21.602833Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:21.602884Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:21.602913Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:21.625938Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:21.658811Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:21.658971Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:21.660807Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:21.661004Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:21.661014Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:21.661023Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:21.661026Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:21.661045Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:21.661110Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:21.661139Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:21.662892Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { } } Success: true 2025-06-30T09:23:21.706658Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:21.706726Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:21.757397Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:21.757440Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:21.757505Z node 1 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:05:00Z 2025-06-30T09:23:21.757617Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300028000 } Timestamp: 300028000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300028000 } Timestamp: 300028000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300028000 } Timestamp: 300028000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300028000 } Timestamp: 300028000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300028000 } Timestamp: 300028000 NodeId: 5 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 PileId: 0 } Timestamp: 300028000 } } 2025-06-30T09:23:21.757666Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "1" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-30T09:23:21.757676Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "1" Services: "storage" Duration: 60000000 2025-06-30T09:23:21.757686Z node 1 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 1, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:21.757695Z node 1 :CMS DEBUG: cms.cpp:730: Ring: 0; State: Ok 2025-06-30T09:23:21.757710Z node 1 :CMS DEBUG: cms.cpp:730: Ring: 1; State: Ok 2025-06-30T09:23:21.757714Z node 1 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:21.757727Z node 1 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-30T09:23:21.757733Z node 1 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:06:00Z) 2025-06-30T09:23:21.757751Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:21.757792Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:06:00.028000Z, action# Type: RESTART_SERVICES Host: "1" Services: "storage" Duration: 60000000 2025-06-30T09:23:21.798625Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:21.843283Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:21.843398Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "1" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: RESTART_SERVICES Host: "1" Services: "storage" Duration: 60000000 } Deadline: 360028000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 1 InterconnectPort: 12001 } } } } 2025-06-30T09:23:21.843411Z node 1 :CMS DEBUG: cms.cpp:1063: Schedule cleanup at 1970-01-01T00:08:00.028000Z 2025-06-30T09:23:21.867570Z node 1 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:06:00Z) 2025-06-30T09:23:21.867630Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:21.867653Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:21.867666Z node 1 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:05:00Z 2025-06-30T09:23:21.867719Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "2" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-30T09:23:21.867729Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "2" Services: "storage" Duration: 60000000 2025-06-30T09:23:21.867737Z node 1 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 2, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-30T09:23:21.867745Z node 1 :CMS DEBUG: cms.cpp:730: Ring: 0; State: Restart 2025-06-30T09:23:21.867747Z node 1 :CMS DEBUG: cms.cpp:730: Ring: 1; State: Ok 2025-06-30T09:23:21.867751Z node 1 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:21.867762Z node 1 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-06-30T09:23:21.867767Z node 1 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12002 (2) (permission user-p-2 until 1970-01-01T00:06:00Z) 2025-06-30T09:23:21.867775Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:21.867807Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:06:00.130512Z, action# Type: RESTART_SERVICES Host: "2" Services: "storage" Duration: 60000000 2025-06-30T09:23:21.878799Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:21.878915Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "2" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-2" Permissions { Id: "user-p-2" Action { Type: RESTART_SERVICES Host: "2" Services: "storage" Duration: 60000000 } Deadline: 360130512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 2 InterconnectPort: 12002 } } } } 2025-06-30T09:23:22.887500Z node 6 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:22.889542Z node 6 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:22.889644Z node 6 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:22.890058Z node 6 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:22.890426Z node 6 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:22.891526Z node 6 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:22.892804Z node 6 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:22.892849Z node 6 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:22.892901Z node 6 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:22.892937Z node 6 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:22.893538Z node 6 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:22.893567Z node 6 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:22.893596Z node 6 :CM ... s::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-30T09:23:25.991170Z node 16 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-19 2025-06-30T09:23:25.991179Z node 16 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-30T09:23:25.991192Z node 16 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-30T09:23:25.991220Z node 16 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-19, reason# explicit remove 2025-06-30T09:23:26.002475Z node 16 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-30T09:23:26.002556Z node 16 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-19" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-30T09:23:26.002722Z node 16 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-20 2025-06-30T09:23:26.002754Z node 16 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-30T09:23:26.002770Z node 16 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-30T09:23:26.002801Z node 16 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-20, reason# explicit remove 2025-06-30T09:23:26.014471Z node 16 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-30T09:23:26.014554Z node 16 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-20" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-30T09:23:29.971622Z node 36 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:29.975180Z node 36 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:29.975501Z node 36 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:29.975725Z node 36 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:29.975756Z node 36 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:29.975839Z node 36 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:29.977096Z node 36 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:29.977181Z node 36 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:29.977515Z node 36 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:29.977568Z node 36 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:29.978105Z node 36 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:29.978130Z node 36 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:29.978158Z node 36 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:29.978177Z node 36 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:30.000963Z node 36 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:30.033319Z node 36 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:30.033427Z node 36 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:30.033459Z node 36 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:30.033553Z node 36 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:30.033559Z node 36 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:30.033564Z node 36 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:30.033568Z node 36 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:30.033577Z node 36 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:30.033625Z node 36 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:30.033638Z node 36 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:30.033722Z node 36 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { } } Success: true 2025-06-30T09:23:30.075999Z node 36 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:30.076062Z node 36 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:30.076479Z node 36 :CMS INFO: cms.cpp:104: OnTabletDead: 72057594037936128 2025-06-30T09:23:30.076488Z node 36 :CMS DEBUG: cms.cpp:1208: TCms::Cleanup 2025-06-30T09:23:30.078247Z node 36 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:30.078962Z node 36 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:30.078992Z node 36 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:30.079407Z node 36 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:30.079508Z node 36 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:30.079640Z node 36 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:30.079740Z node 36 :CMS DEBUG: cms_tx_load_state.cpp:69: Loaded config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:30.079764Z node 36 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:30.079851Z node 36 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:30.079939Z node 36 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 } 2025-06-30T09:23:30.216273Z node 36 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:30.263151Z node 36 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:30.263276Z node 36 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:03:30Z 2025-06-30T09:23:30.263353Z node 36 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "41" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-30T09:23:30.263366Z node 36 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "41" Services: "storage" Duration: 60000000 2025-06-30T09:23:30.263386Z node 36 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 41, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 2 2025-06-30T09:23:30.263397Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 0; State: Ok 2025-06-30T09:23:30.263404Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 1; State: Restart 2025-06-30T09:23:30.263410Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 2; State: Restart 2025-06-30T09:23:30.263414Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 3; State: Locked 2025-06-30T09:23:30.263418Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 4; State: Locked 2025-06-30T09:23:30.263422Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 5; State: Ok 2025-06-30T09:23:30.263426Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 6; State: Ok 2025-06-30T09:23:30.263429Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 7; State: Ok 2025-06-30T09:23:30.263433Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 8; State: Ok 2025-06-30T09:23:30.263444Z node 36 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Too many unavailable state storage rings. Restarting rings: 2. Temporary (for a 2 minutes) locked rings: 2. Disabled rings: 0. Maximum allowed number of unavailable rings for this mode: 4) 2025-06-30T09:23:30.263486Z node 36 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "41" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Too many unavailable state storage rings. Restarting rings: 2. Temporary (for a 2 minutes) locked rings: 2. Disabled rings: 0. Maximum allowed number of unavailable rings for this mode: 4" } Deadline: 510136000 } 2025-06-30T09:23:30.274646Z node 36 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:30.297232Z node 36 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:30.297356Z node 36 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:03:30Z 2025-06-30T09:23:30.297433Z node 36 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "40" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-30T09:23:30.297447Z node 36 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "40" Services: "storage" Duration: 60000000 2025-06-30T09:23:30.297467Z node 36 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 40, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 2 2025-06-30T09:23:30.297478Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 0; State: Ok 2025-06-30T09:23:30.297486Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 1; State: Restart 2025-06-30T09:23:30.297492Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 2; State: Restart 2025-06-30T09:23:30.297497Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 3; State: Locked 2025-06-30T09:23:30.297501Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 4; State: Locked 2025-06-30T09:23:30.297506Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 5; State: Ok 2025-06-30T09:23:30.297510Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 6; State: Ok 2025-06-30T09:23:30.297514Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 7; State: Ok 2025-06-30T09:23:30.297518Z node 36 :CMS DEBUG: cms.cpp:730: Ring: 8; State: Ok 2025-06-30T09:23:30.297524Z node 36 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:30.297590Z node 36 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "40" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "40" Services: "storage" Duration: 60000000 } Deadline: 270236000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 40 InterconnectPort: 12005 } } } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPathSpecified [GOOD] Test command err: 2025-06-30T09:23:02.897507Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670496211197485:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:02.897597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002690/r3tmp/tmp3wncfH/pdisk_1.dat 2025-06-30T09:23:02.990927Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:02.994054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:02.994068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:02.998330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25382, node 1 2025-06-30T09:23:03.012650Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:03.012665Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:03.012668Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:03.012709Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28786 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:03.040288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:03.403017Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670500506165516:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.403061Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.458798Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7521670496211197499:2142] Handle TEvProposeTransaction 2025-06-30T09:23:03.458821Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7521670496211197499:2142] TxId# 281474976715658 ProcessProposeTransaction 2025-06-30T09:23:03.458837Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7521670496211197499:2142] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7521670500506165538:2601] 2025-06-30T09:23:03.478393Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7521670500506165538:2601] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-06-30T09:23:03.478416Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7521670500506165538:2601] txid# 281474976715658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:23:03.478533Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7521670500506165538:2601] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:23:03.478541Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7521670500506165538:2601] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:23:03.478562Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7521670500506165538:2601] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:23:03.478585Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7521670500506165538:2601] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:23:03.478593Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7521670500506165538:2601] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-30T09:23:03.478622Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7521670500506165538:2601] txid# 281474976715658 HANDLE EvClientConnected 2025-06-30T09:23:03.479096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:03.479980Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7521670500506165538:2601] txid# 281474976715658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715658} 2025-06-30T09:23:03.480000Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7521670500506165538:2601] txid# 281474976715658 SEND to# [1:7521670500506165537:2302] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 53} 2025-06-30T09:23:03.527667Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670500506165675:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.527717Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.527824Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670500506165680:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.527937Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7521670496211197499:2142] Handle TEvProposeTransaction 2025-06-30T09:23:03.527943Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7521670496211197499:2142] TxId# 281474976715659 ProcessProposeTransaction 2025-06-30T09:23:03.527953Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7521670496211197499:2142] Cookie# 0 userReqId# "" txid# 281474976715659 SEND to# [1:7521670500506165683:2715] 2025-06-30T09:23:03.528925Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7521670500506165683:2715] txid# 281474976715659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-30T09:23:03.528940Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7521670500506165683:2715] txid# 281474976715659 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:23:03.528945Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7521670500506165683:2715] txid# 281474976715659 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-30T09:23:03.529399Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7521670500506165683:2715] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:23:03.529420Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7521670500506165683:2715] txid# 281474976715659 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:23:03.529454Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7521670500506165683:2715] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:23:03.529494Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7521670500506165683:2715] HANDLE EvNavigateKeySetResult, txid# 281474976715659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:23:03.529505Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7521670500506165683:2715] txid# 281474976715659 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715659 TabletId# 72057594046644480} 2025-06-30T09:23:03.529537Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7521670500506165683:2715] txid# 281474976715659 HANDLE EvClientConnected 2025-06-30T09:23:03.529898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /h ... vServerConnected 2025-06-30T09:23:30.850436Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:30.850438Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:23:30.850461Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269551620, Sender [46:7521670613932938209:2309], Recipient [46:7521670613932937427:2218]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 7521670613932938209 RawX2: 4503797195868421 } Origin: 72075186224037888 State: 2 TxId: 281474976715660 Step: 0 Generation: 1 2025-06-30T09:23:30.850471Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-30T09:23:30.850481Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 7521670613932938209 RawX2: 4503797195868421 } Origin: 72075186224037888 State: 2 TxId: 281474976715660 Step: 0 Generation: 1 2025-06-30T09:23:30.850487Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 281474976715660, tablet: 72075186224037888, partId: 2 2025-06-30T09:23:30.850506Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 281474976715660:2, at schemeshard: 72057594046644480, message: Source { RawX1: 7521670613932938209 RawX2: 4503797195868421 } Origin: 72075186224037888 State: 2 TxId: 281474976715660 Step: 0 Generation: 1 2025-06-30T09:23:30.850519Z node 46 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 281474976715660:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-30T09:23:30.850528Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976715660:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7521670613932938209 RawX2: 4503797195868421 } Origin: 72075186224037888 State: 2 TxId: 281474976715660 Step: 0 Generation: 1 2025-06-30T09:23:30.850545Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715660:2, shardIdx: 72057594046644480:1, shard: 72075186224037888, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-30T09:23:30.850553Z node 46 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976715660:2, at schemeshard: 72057594046644480 2025-06-30T09:23:30.850557Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715660:2, datashard: 72075186224037888, at schemeshard: 72057594046644480 2025-06-30T09:23:30.850563Z node 46 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715660:2 129 -> 240 2025-06-30T09:23:30.850592Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:23:30.850653Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:23:30.850684Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:23:30.850702Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:23:30.850713Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715660 2025-06-30T09:23:30.850720Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:23:30.850727Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715660 2025-06-30T09:23:30.850729Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:23:30.850756Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715660 2025-06-30T09:23:30.850763Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:23:30.850771Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715660 2025-06-30T09:23:30.850772Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:23:30.850872Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976715660:2, at schemeshard: 72057594046644480 2025-06-30T09:23:30.850883Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:23:30.850904Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976715660:2, at schemeshard: 72057594046644480 2025-06-30T09:23:30.850909Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:23:30.850911Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715660:2 2025-06-30T09:23:30.850923Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [46:7521670613932938209:2309] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715660 at schemeshard: 72057594046644480 2025-06-30T09:23:30.850940Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [46:7521670613932937427:2218], Recipient [46:7521670613932937427:2218]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:23:30.850946Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:23:30.850953Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715660:2, at schemeshard: 72057594046644480 2025-06-30T09:23:30.850957Z node 46 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715660:2 ProgressState 2025-06-30T09:23:30.850967Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:23:30.850974Z node 46 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715660:2 progress is 3/3 2025-06-30T09:23:30.850976Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715660 ready parts: 3/3 2025-06-30T09:23:30.850979Z node 46 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715660:2 progress is 3/3 2025-06-30T09:23:30.850980Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715660 ready parts: 3/3 2025-06-30T09:23:30.850982Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976715660, ready parts: 3/3, is published: true 2025-06-30T09:23:30.850992Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [46:7521670613932938136:2295] message: TxId: 281474976715660 2025-06-30T09:23:30.850997Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715660 ready parts: 3/3 2025-06-30T09:23:30.851003Z node 46 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715660:0 2025-06-30T09:23:30.851005Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715660:0 2025-06-30T09:23:30.851015Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 2 2025-06-30T09:23:30.851022Z node 46 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715660:1 2025-06-30T09:23:30.851023Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715660:1 2025-06-30T09:23:30.851026Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 7] was 2 2025-06-30T09:23:30.851028Z node 46 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715660:2 2025-06-30T09:23:30.851029Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976715660:2 2025-06-30T09:23:30.851040Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 8] was 3 2025-06-30T09:23:30.851360Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:23:30.851377Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [46:7521670613932938136:2295] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715660 at schemeshard: 72057594046644480 2025-06-30T09:23:30.851459Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [46:7521670613932938160:2685], Recipient [46:7521670613932937427:2218]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:23:30.851469Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:23:30.851471Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:23:30.852274Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [46:7521670613932938291:2791], Recipient [46:7521670613932937427:2218]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:23:30.852284Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:23:30.852286Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 >> TCmsTest::RequestReplaceBrokenDevices >> TCmsTest::WalleCleanupTest >> TMaintenanceApiTest::ManyActionGroupsWithSingleAction ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy [GOOD] Test command err: 2025-06-30T09:23:22.822037Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:22.827279Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:22.831310Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:22.831374Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:22.831436Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:22.831557Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:22.831844Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:22.831956Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:22.832432Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:22.832542Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:22.835306Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:22.835371Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:22.835420Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:22.835460Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:22.867729Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:22.900993Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:22.901137Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:22.902906Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:22.903084Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:22.903092Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:22.903103Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:22.903108Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:22.903124Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:22.903180Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:22.903204Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:22.905172Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { } } Success: true 2025-06-30T09:23:22.947266Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:22.947323Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:22.947511Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvGetConfigRequest { }, response# NKikimr::NCms::TEvCms::TEvGetConfigResponse { Status { Code: OK } Config { DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } } } 2025-06-30T09:23:22.947576Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:23.006801Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:23.006901Z node 1 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:23.007080Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 5 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 6 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 7 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 8 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 PileId: 0 } Timestamp: 120029000 } } 2025-06-30T09:23:23.037798Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:23.079210Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:23.079297Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 10 } ClusterLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } 2025-06-30T09:23:23.079313Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:23.114356Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:23.114401Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:23.114418Z node 1 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:23.114478Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false TenantPolicy: NONE AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-30T09:23:23.114489Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 2025-06-30T09:23:23.114501Z node 1 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 1, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:23.114510Z node 1 :CMS DEBUG: cms.cpp:730: Ring: 0; State: Ok 2025-06-30T09:23:23.114514Z node 1 :CMS DEBUG: cms.cpp:730: Ring: 1; State: Ok 2025-06-30T09:23:23.114517Z node 1 :CMS DEBUG: cms.cpp:730: Ring: 2; State: Ok 2025-06-30T09:23:23.114522Z node 1 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:23.114541Z node 1 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-30T09:23:23.114551Z node 1 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:23.114576Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:23.114618Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:03:00.130000Z, action# Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 2025-06-30T09:23:23.125586Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:23.125686Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false TenantPolicy: NONE AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } Deadline: 180130000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 1 InterconnectPort: 12001 } } } } 2025-06-30T09:23:23.125711Z node 1 :CMS DEBUG: cms.cpp:1063: Schedule cleanup at 1970-01-01T00:05:00.130000Z 2025-06-30T09:23:23.147502Z node 1 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:23.147569Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:23.147590Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:23.147602Z node 1 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:23.147654Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "2" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false TenantPolicy: NONE AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-30T09:23:23.147663Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "2" Duration: 60000000 2025-06-30T09:23:23.147674Z node 1 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 2, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-30T09:23:23.147681Z node 1 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:23.147699Z node 1 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-06-30T09:23:23.147706Z node 1 :CMS INFO: cluster_i ... " } } Actions { Type: SHUTDOWN_HOST Host: "30" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'30\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } Actions { Type: SHUTDOWN_HOST Host: "31" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'31\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } Actions { Type: SHUTDOWN_HOST Host: "32" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'32\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-30T09:23:29.346581Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:29.346661Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "user-r-1" Permissions { Id: "user-p-3" Action { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 } Deadline: 180333560 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 27 InterconnectPort: 12003 } } } Permissions { Id: "user-p-4" Action { Type: SHUTDOWN_HOST Host: "28" Duration: 60000000 } Deadline: 180333560 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 28 InterconnectPort: 12004 } } } } 2025-06-30T09:23:29.346815Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvGetConfigRequest { }, response# NKikimr::NCms::TEvCms::TEvGetConfigResponse { Status { Code: OK } Config { DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 30 } ClusterLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } } } 2025-06-30T09:23:29.346870Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:29.357608Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:29.357662Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } 2025-06-30T09:23:29.368971Z node 25 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12003 (27) (permission user-p-3 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:29.369002Z node 25 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12004 (28) (permission user-p-4 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:29.369060Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:29.369083Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:29.369095Z node 25 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:29.369173Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "29" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'29\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } Actions { Type: SHUTDOWN_HOST Host: "30" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'30\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } Actions { Type: SHUTDOWN_HOST Host: "31" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'31\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } Actions { Type: SHUTDOWN_HOST Host: "32" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'32\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-30T09:23:29.369186Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "29" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'29\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } 2025-06-30T09:23:29.369197Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 29, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 2, down nodes: 0 2025-06-30T09:23:29.369202Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 29, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 2, down nodes: 0 2025-06-30T09:23:29.369208Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:29.369224Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "30" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'30\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } 2025-06-30T09:23:29.369229Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 30, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 3, down nodes: 0 2025-06-30T09:23:29.369234Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 30, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 3, down nodes: 0 2025-06-30T09:23:29.369238Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:29.369249Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "31" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'31\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } 2025-06-30T09:23:29.369258Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 31, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 4, down nodes: 0 2025-06-30T09:23:29.369262Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 31, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 4, down nodes: 0 2025-06-30T09:23:29.369267Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:29.369277Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "32" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'32\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } 2025-06-30T09:23:29.369281Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 32, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 5, down nodes: 0 2025-06-30T09:23:29.369285Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 32, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 5, down nodes: 0 2025-06-30T09:23:29.369289Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:29.369309Z node 25 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-5, requestId# user-r-1, owner# user 2025-06-30T09:23:29.369316Z node 25 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12005 (29) (permission user-p-5 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:29.369323Z node 25 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-6, requestId# user-r-1, owner# user 2025-06-30T09:23:29.369328Z node 25 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12006 (30) (permission user-p-6 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:29.369334Z node 25 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-7, requestId# user-r-1, owner# user 2025-06-30T09:23:29.369340Z node 25 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12007 (31) (permission user-p-7 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:29.369345Z node 25 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-8, requestId# user-r-1, owner# user 2025-06-30T09:23:29.369351Z node 25 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12008 (32) (permission user-p-8 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:29.369361Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:29.369402Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-5, validity# 1970-01-01T00:03:00.436584Z, action# Type: SHUTDOWN_HOST Host: "29" Duration: 60000000 2025-06-30T09:23:29.369412Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-6, validity# 1970-01-01T00:03:00.436584Z, action# Type: SHUTDOWN_HOST Host: "30" Duration: 60000000 2025-06-30T09:23:29.369421Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-7, validity# 1970-01-01T00:03:00.436584Z, action# Type: SHUTDOWN_HOST Host: "31" Duration: 60000000 2025-06-30T09:23:29.369429Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-8, validity# 1970-01-01T00:03:00.436584Z, action# Type: SHUTDOWN_HOST Host: "32" Duration: 60000000 2025-06-30T09:23:29.369439Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-30T09:23:29.380437Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:29.380549Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-5" Action { Type: SHUTDOWN_HOST Host: "29" Duration: 60000000 } Deadline: 180436584 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 29 InterconnectPort: 12005 } } } Permissions { Id: "user-p-6" Action { Type: SHUTDOWN_HOST Host: "30" Duration: 60000000 } Deadline: 180436584 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 30 InterconnectPort: 12006 } } } Permissions { Id: "user-p-7" Action { Type: SHUTDOWN_HOST Host: "31" Duration: 60000000 } Deadline: 180436584 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 31 InterconnectPort: 12007 } } } Permissions { Id: "user-p-8" Action { Type: SHUTDOWN_HOST Host: "32" Duration: 60000000 } Deadline: 180436584 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 32 InterconnectPort: 12008 } } } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER [GOOD] Test command err: 2025-06-30T09:23:05.950725Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670507285104833:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:05.950769Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpXCYXHa/pdisk_1.dat 2025-06-30T09:23:06.042209Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:06.050175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:06.050206Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:06.057755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:06.063180Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 28093, node 1 2025-06-30T09:23:06.070772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:06.070786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:06.070788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:06.070840Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4093 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:06.116231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:06.456488Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670511580073022:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.456533Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.500655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Backup "/Root" to "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/"Create temporary directory "/Root/~backup_20250630T092306" in databaseProcess "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/table"Copy tables: { src: "/Root/table", dst: "/Root/~backup_20250630T092306/table" }Describe table "/Root/table"Describe table "/Root/~backup_20250630T092306/table"Backup table "/Root/~backup_20250630T092306/table" to "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/table"Write scheme into "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/table/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/table/permissions.pb"Read table "/Root/~backup_20250630T092306/table"Write data into "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/table/data_00.csv"Drop table "/Root/~backup_20250630T092306/table"2025-06-30T09:23:06.683693Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found 2025-06-30T09:23:06.683712Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2025-06-30T09:23:06.683715Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found Remove temporary directory "/Root/~backup_20250630T092306" in database2025-06-30T09:23:06.692604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Backup completed successfully2025-06-30T09:23:06.702942Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670511580074022:2361], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.703051Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:06.727826Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-30T09:23:06.727842Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-30T09:23:06.727846Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found Restore "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/" to "/Root"Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/table"}]Process "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/table"Read scheme from "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/table/scheme.pb"Restore table "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/table" to "/Root/table"2025-06-30T09:23:06.744309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Created "/Root/table"Read data from "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/table/data_00.csv"Restore index "byValue" on "/Root/table"2025-06-30T09:23:06.794609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.837830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.869335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:06.937666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715762:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:23:06.954617Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:06.966037Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found 2025-06-30T09:23:06.966053Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found Restore ACL "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/table" to "/Root/table"Read ACL from "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmpZXrElL/table/permissions.pb"2025-06-30T09:23:07.117169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully 2025-06-30T09:23:07.911073Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521670517422651518:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:07.911144Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;err ... 3tmp/tmp3svU0F/JsonDocumentTable/permissions.pb"2025-06-30T09:23:33.700489Z node 52 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully2025-06-30T09:23:33.715016Z node 52 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jz02c865a4cgm0vtrds99h6e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=52&id=YTBiOTAyNmItYzBhYWM5MGItMjYxYmVhNDEtNTMzODcyYzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:34.314921Z node 55 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[55:7521670631504142217:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:34.315098Z node 55 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp1Qt2sZ/pdisk_1.dat 2025-06-30T09:23:34.329456Z node 55 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:34.337913Z node 55 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 55 Type# 268639257 TServer::EnableGrpc on GrpcPort 13346, node 55 2025-06-30T09:23:34.342170Z node 55 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:34.342189Z node 55 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:34.342194Z node 55 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:34.342256Z node 55 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2455 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:34.414931Z node 55 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(55, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:34.414955Z node 55 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(55, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:34.416710Z node 55 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(55, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:34.419990Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:34.655830Z node 55 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [55:7521670631504143120:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:34.655844Z node 55 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [55:7521670631504143131:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:34.655848Z node 55 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:34.656475Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:34.660142Z node 55 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [55:7521670631504143134:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:23:34.714008Z node 55 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [55:7521670631504143201:2652] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:34.718932Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:34.786072Z node 55 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02c97t3cwdqw2tk9r8re94, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=55&id=NzdmYTk5OWYtZTE0YTIwZS01NTk4ZjAxZC00NzA2MDI2Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:34.806844Z node 55 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02c98566mtwgyrs727yh8n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=55&id=NzdmYTk5OWYtZTE0YTIwZS01NTk4ZjAxZC00NzA2MDI2Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/"Create temporary directory "/Root/~backup_20250630T092334" in databaseProcess "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/DyNumberTable"Copy tables: { src: "/Root/DyNumberTable", dst: "/Root/~backup_20250630T092334/DyNumberTable" }Describe table "/Root/DyNumberTable"Describe table "/Root/~backup_20250630T092334/DyNumberTable"Backup table "/Root/~backup_20250630T092334/DyNumberTable" to "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/DyNumberTable"Write scheme into "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/DyNumberTable/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/DyNumberTable/permissions.pb"Read table "/Root/~backup_20250630T092334/DyNumberTable"Write data into "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/DyNumberTable/data_00.csv"Drop table "/Root/~backup_20250630T092334/DyNumberTable"Remove temporary directory "/Root/~backup_20250630T092334" in database2025-06-30T09:23:34.864204Z node 55 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 55, TabletId: 72075186224037889 not found 2025-06-30T09:23:34.866475Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Backup completed successfullyRestore "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/" to "/Root"Resolved db base path: "/Root"2025-06-30T09:23:34.886580Z node 55 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 55, TabletId: 72075186224037888 not found List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/DyNumberTable"}]Process "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/DyNumberTable"Read scheme from "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/DyNumberTable/scheme.pb"Restore table "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/DyNumberTable" to "/Root/DyNumberTable"2025-06-30T09:23:34.890983Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Created "/Root/DyNumberTable"Read data from "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/DyNumberTable/data_00.csv"2025-06-30T09:23:34.911678Z node 55 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jz02c9bp813fr1b3s4w8g3nd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=55&id=MTI1ODkxNDktMjk0YWViY2QtNjBlZjM5Y2ItYWU2MDMzMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/DyNumberTable" to "/Root/DyNumberTable"Read ACL from "/home/runner/.ya/build/build_root/cl3w/002259/r3tmp/tmp4WaUiQ/DyNumberTable/permissions.pb"2025-06-30T09:23:34.917865Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully2025-06-30T09:23:34.931779Z node 55 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jz02c9c7efj4nt9jf36x8ba2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=55&id=NzdmYTk5OWYtZTE0YTIwZS01NTk4ZjAxZC00NzA2MDI2Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::NoUserAccessToScriptExecutionsTable [GOOD] Test command err: 2025-06-30T09:23:19.349299Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670565995644412:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:19.349331Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:23:19.386968Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670567579803752:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:19.386995Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0026e9/r3tmp/tmpwmoEFn/pdisk_1.dat 2025-06-30T09:23:19.517800Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:19.552629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:19.552662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:19.559997Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:23:19.561820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8325 2025-06-30T09:23:19.650893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:19.650927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:19.652716Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:19.951985Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-30T09:23:19.952971Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-30T09:23:19.954267Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-30T09:23:19.955011Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-30T09:23:19.964410Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-30T09:23:19.964328Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=ZDhjYTdmNzgtYzk4MDI4OGYtZTljZmI3ZGItNmE4ZTUwZjk=, workerId: [2:7521670567579803987:2267], database: , longSession: 1, local sessions count: 1 2025-06-30T09:23:19.964354Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-30T09:23:19.964412Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-30T09:23:19.964436Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-30T09:23:19.964440Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-30T09:23:19.964449Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-30T09:23:19.964466Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-30T09:23:19.964483Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:19.964492Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:19.964514Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:19.964519Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:19.964561Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:19.966706Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-30T09:23:19.966724Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-30T09:23:19.966771Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-30T09:23:19.966800Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:19.966814Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:19.966916Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZDhjYTdmNzgtYzk4MDI4OGYtZTljZmI3ZGItNmE4ZTUwZjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 2, targetId: [2:8678280833929343339:121] 2025-06-30T09:23:19.966924Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 2 timeout: 600.000000s actor id: [1:7521670565995644983:2454] 2025-06-30T09:23:19.966930Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:19.966933Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:19.967318Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670565995644984:2272], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:19.967334Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:19.967322Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZDhjYTdmNzgtYzk4MDI4OGYtZTljZmI3ZGItNmE4ZTUwZjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [2:7521670567579803987:2267] 2025-06-30T09:23:19.967343Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 3 timeout: 600.000000s actor id: [2:7521670567579803989:2117] 2025-06-30T09:23:19.974866Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670567579803990:2268], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:19.974918Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:20.021671Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jz02btrz9p3kw9xhea5emn8e", Created new session, sessionId: ydb://session/3?node_id=2&id=ZmIyNWIzOTUtMWYwOWU4ZjgtODEzY2EzYTEtODg3YmQyMTQ=, workerId: [2:7521670571874771297:2270], database: , longSession: 0, local sessions count: 2 2025-06-30T09:23:20.021760Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jz02btrz9p3kw9xhea5emn8e, Database: , DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZmIyNWIzOTUtMWYwOWU4ZjgtODEzY2EzYTEtODg3YmQyMTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 4, targetId: [2:7521670571874771297:2270] 2025-06-30T09:23:20.021767Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 4 timeout: 300.000000s actor id: [2:7521670571874771298:2120] 2025-06-30T09:23:20.022569Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670571874771299:2271], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:20.022602Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:20.022995Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670571874771304:2274], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:20.025392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:20.044244Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670571874771306:2275], DatabaseId: /dc-1, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-30T09:23:20.167635Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670571874771334:2130] txid# 281474976720658, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:20.193489Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jz02btrz9p3kw9xhea5emn8e", Forwarded response to sender actor, requestId: 4, sender: [2:7521670571874771296:2269], selfId: [2:7521670567579803721:2077], source: [2:7521670571874771297:2270] 2025-06-30T09:23:20.196739Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 3, sender: [1:7521670565995644423:2208], selfId: [2:7521670567579803721:2077], source: [2:7521670567579803987:2267] 2025-06-30T09:23:20.196763Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Sess ... l default not found or you don't have access permissions } 2025-06-30T09:23:21.615935Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670573825596761:2321], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:21.616890Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:21.625588Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670573825596763:2322], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:23:21.721694Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670573825596824:2984] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:21.750699Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 5, sender: [3:7521670573825596754:2317], selfId: [3:7521670569530628233:2181], source: [3:7521670573825596753:2316] 2025-06-30T09:23:21.750860Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 41ac9d6b-bc627eef-bc1d3a3b-71191c38, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=NTM5M2QwZWQtNGU0YmQyZGUtZjcwNDZkZTYtMmYyMTkyMzY=, TxId: 2025-06-30T09:23:21.750874Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 41ac9d6b-bc627eef-bc1d3a3b-71191c38, Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=NTM5M2QwZWQtNGU0YmQyZGUtZjcwNDZkZTYtMmYyMTkyMzY=, TxId: 2025-06-30T09:23:21.750879Z node 3 :KQP_PROXY DEBUG: kqp_script_executions.cpp:304: [ScriptExecutions] Create script execution operation. ExecutionId: 41ac9d6b-bc627eef-bc1d3a3b-71191c38. Result: SUCCESS. Issues: 2025-06-30T09:23:21.751437Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=M2UxM2I3MzctNmFiNjFmNmMtOGQ0ZjFlMzMtM2YyNDFlNzY=, workerId: [3:7521670573825596884:2334], database: /Root, longSession: 1, local sessions count: 2 2025-06-30T09:23:21.751473Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-30T09:23:21.751534Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TScriptProgressActor] TraceId: 41ac9d6b-bc627eef-bc1d3a3b-71191c38, Bootstrap. Database: /Root 2025-06-30T09:23:21.751556Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jz02bw6s8xgf1160ambwr5ey, Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=M2UxM2I3MzctNmFiNjFmNmMtOGQ0ZjFlMzMtM2YyNDFlNzY=, CurrentExecutionId: 41ac9d6b-bc627eef-bc1d3a3b-71191c38, CustomerSuppliedId: 01jz02bw6s8xgf1160ambwr5ey, PoolId: }. TEvQueryRequest, set timer for: 604800.000000s timeout: 604800.000000s cancelAfter: 0.000000s. Send request to target, requestId: 7, targetId: [3:7521670573825596884:2334] 2025-06-30T09:23:21.751562Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 7 timeout: 604800.000000s actor id: [3:7521670573825596888:3020] 2025-06-30T09:23:21.751721Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444992798307.799897s seconds to be completed 2025-06-30T09:23:21.751947Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=ZGZhNGE1ZTItNDg0ZjRlMWEtNjdkMzdjMmItMjg1ODI0ODM=, workerId: [3:7521670573825596898:2339], database: /Root, longSession: 1, local sessions count: 3 2025-06-30T09:23:21.751966Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-30T09:23:21.751979Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=3&id=NTM5M2QwZWQtNGU0YmQyZGUtZjcwNDZkZTYtMmYyMTkyMzY=, workerId: [3:7521670573825596753:2316], local sessions count: 2 2025-06-30T09:23:21.752054Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptProgressActor] TraceId: 41ac9d6b-bc627eef-bc1d3a3b-71191c38, RunDataQuery: -- TScriptProgressActor::OnRunQuery DECLARE $execution_id AS Text; DECLARE $database AS Text; DECLARE $plan AS JsonDocument; DECLARE $execution_status AS Int32; UPSERT INTO `.metadata/script_executions` (execution_id, database, plan, execution_status) VALUES ($execution_id, $database, $plan, $execution_status); 2025-06-30T09:23:21.752129Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=ZGZhNGE1ZTItNDg0ZjRlMWEtNjdkMzdjMmItMjg1ODI0ODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 9, targetId: [3:7521670573825596898:2339] 2025-06-30T09:23:21.752139Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 9 timeout: 300.000000s actor id: [3:7521670573825596902:3027] 2025-06-30T09:23:21.760563Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 41ac9d6b-bc627eef-bc1d3a3b-71191c38, Bootstrap. Database: /Root 2025-06-30T09:23:21.760882Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jz02bw6s8xgf1160ambwr5ey", Forwarded response to sender actor, requestId: 7, sender: [3:7521670573825596750:2932], selfId: [3:7521670569530628233:2181], source: [3:7521670573825596884:2334] 2025-06-30T09:23:21.760920Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444992798307.790698s seconds to be completed 2025-06-30T09:23:21.761358Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=NDljMDRlNGMtMmQ2OWYxODQtODY5ZDlmOWQtMTY4NjZkNDc=, workerId: [3:7521670573825596918:2346], database: /Root, longSession: 1, local sessions count: 3 2025-06-30T09:23:21.761392Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-30T09:23:21.761467Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 41ac9d6b-bc627eef-bc1d3a3b-71191c38, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-30T09:23:21.761577Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=NDljMDRlNGMtMmQ2OWYxODQtODY5ZDlmOWQtMTY4NjZkNDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 11, targetId: [3:7521670573825596918:2346] 2025-06-30T09:23:21.761589Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 11 timeout: 300.000000s actor id: [3:7521670573825596920:3035] 2025-06-30T09:23:21.764529Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: TraceId: "01jz02bwh228smj3c98k38yrbn", Request has 18444992798307.787094s seconds to be completed 2025-06-30T09:23:21.764904Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jz02bwh228smj3c98k38yrbn", Created new session, sessionId: ydb://session/3?node_id=3&id=NGM5M2EwYi02MGYxOGViZS04OTBlZjQwOC05YTkzNzNiNg==, workerId: [3:7521670573825596927:2350], database: /Root, longSession: 1, local sessions count: 4 2025-06-30T09:23:21.764936Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 01jz02bwh228smj3c98k38yrbn 2025-06-30T09:23:21.766246Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jz02bwh67g6w82e1kxc9gc1c, Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=NGM5M2EwYi02MGYxOGViZS04OTBlZjQwOC05YTkzNzNiNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 13, targetId: [3:7521670573825596927:2350] 2025-06-30T09:23:21.766260Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 13 timeout: 300.000000s actor id: [3:7521670573825596941:3041] 2025-06-30T09:23:21.769628Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7521670573825596947:3045], for# user@builtin, access# DescribeSchema 2025-06-30T09:23:21.769642Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7521670573825596947:3045], for# user@builtin, access# DescribeSchema 2025-06-30T09:23:21.771051Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7521670573825596942:2354], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/script_executions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:23:21.771808Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 9, sender: [3:7521670573825596901:2341], selfId: [3:7521670569530628233:2181], source: [3:7521670573825596898:2339] 2025-06-30T09:23:21.771881Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptProgressActor] TraceId: 41ac9d6b-bc627eef-bc1d3a3b-71191c38, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=ZGZhNGE1ZTItNDg0ZjRlMWEtNjdkMzdjMmItMjg1ODI0ODM=, TxId: 2025-06-30T09:23:21.771890Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=3&id=NGM5M2EwYi02MGYxOGViZS04OTBlZjQwOC05YTkzNzNiNg==, ActorId: [3:7521670573825596927:2350], ActorState: ExecuteState, TraceId: 01jz02bwh67g6w82e1kxc9gc1c, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:23:21.771892Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TScriptProgressActor] TraceId: 41ac9d6b-bc627eef-bc1d3a3b-71191c38, Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=ZGZhNGE1ZTItNDg0ZjRlMWEtNjdkMzdjMmItMjg1ODI0ODM=, TxId: 2025-06-30T09:23:21.771926Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jz02bwh67g6w82e1kxc9gc1c", Forwarded response to sender actor, requestId: 13, sender: [3:7521670573825596938:2352], selfId: [3:7521670569530628233:2181], source: [3:7521670573825596927:2350] 2025-06-30T09:23:21.772310Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=3&id=ZGZhNGE1ZTItNDg0ZjRlMWEtNjdkMzdjMmItMjg1ODI0ODM=, workerId: [3:7521670573825596898:2339], local sessions count: 3 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::MergeBoxes [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:11116:2156] recipient: [1:10913:2167] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:11116:2156] recipient: [1:10913:2167] Leader for TabletID 72057594037932033 is [1:11213:2169] sender: [1:11214:2156] recipient: [1:10913:2167] 2025-06-30T09:22:39.329098Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:22:39.330041Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:22:39.330109Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:22:39.330440Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:22:39.330558Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:22:39.330587Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:39.330591Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:22:39.330681Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:22:39.331468Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:22:39.331495Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:22:39.331521Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:22:39.331540Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:39.331549Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:22:39.331559Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:11213:2169] sender: [1:11238:2156] recipient: [1:110:2157] 2025-06-30T09:22:39.342490Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:22:39.342544Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:39.352926Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:22:39.352972Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:39.352984Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:22:39.352996Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:39.353017Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:22:39.353027Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:39.353031Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:22:39.353041Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:39.363375Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:22:39.363421Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:39.373805Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:22:39.373859Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:22:39.374123Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:22:39.374131Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:22:39.374169Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:22:39.374177Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:22:39.376395Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk0" } Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" } Drive { Path: "/dev/disk3" } Drive { Path: "/dev/disk4" } Drive { Path: "/dev/disk5" } Drive { Path: "/dev/disk6" } Drive { Path: "/dev/disk7" } Drive { Path: "/dev/disk8" Type: SSD } Drive { Path: "/dev/disk9" Type: SSD } Drive { Path: "/dev/disk10" Type: SSD } Drive { Path: "/dev/disk11" Type: SSD } Drive { Path: "/dev/disk12" Type: SSD } Drive { Path: "/dev/disk13" Type: SSD } Drive { Path: "/dev/disk14" Type: SSD } Drive { Path: "/dev/disk15" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12051 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12052 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12053 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12054 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12055 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12056 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12057 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12058 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12059 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12060 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12061 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12062 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12063 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12064 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12065 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12066 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12067 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12068 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12069 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12070 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12071 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12072 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12073 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12074 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12075 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12076 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12077 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12078 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12079 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12080 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12081 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12082 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12083 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12084 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12085 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12086 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12087 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12088 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12089 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12090 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12091 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12092 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12093 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12094 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12095 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12096 } HostConfigId: 1 } Host { Ke ... LER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 275:1001 Path# /dev/disk2 2025-06-30T09:23:33.904136Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 275:1002 Path# /dev/disk3 2025-06-30T09:23:33.904139Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 276:1000 Path# /dev/disk1 2025-06-30T09:23:33.904142Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 276:1001 Path# /dev/disk2 2025-06-30T09:23:33.904145Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 276:1002 Path# /dev/disk3 2025-06-30T09:23:33.904148Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 277:1000 Path# /dev/disk1 2025-06-30T09:23:33.904151Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 277:1001 Path# /dev/disk2 2025-06-30T09:23:33.904154Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 277:1002 Path# /dev/disk3 2025-06-30T09:23:33.904157Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 278:1000 Path# /dev/disk1 2025-06-30T09:23:33.904161Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 278:1001 Path# /dev/disk2 2025-06-30T09:23:33.904164Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 278:1002 Path# /dev/disk3 2025-06-30T09:23:33.904167Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 279:1000 Path# /dev/disk1 2025-06-30T09:23:33.904170Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 279:1001 Path# /dev/disk2 2025-06-30T09:23:33.904173Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 279:1002 Path# /dev/disk3 2025-06-30T09:23:33.904176Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 280:1000 Path# /dev/disk1 2025-06-30T09:23:33.904179Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 280:1001 Path# /dev/disk2 2025-06-30T09:23:33.904182Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 280:1002 Path# /dev/disk3 2025-06-30T09:23:33.904185Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 281:1000 Path# /dev/disk1 2025-06-30T09:23:33.904189Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 281:1001 Path# /dev/disk2 2025-06-30T09:23:33.904192Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 281:1002 Path# /dev/disk3 2025-06-30T09:23:33.904196Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 282:1000 Path# /dev/disk1 2025-06-30T09:23:33.904200Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 282:1001 Path# /dev/disk2 2025-06-30T09:23:33.904203Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 282:1002 Path# /dev/disk3 2025-06-30T09:23:33.904206Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 283:1000 Path# /dev/disk1 2025-06-30T09:23:33.904209Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 283:1001 Path# /dev/disk2 2025-06-30T09:23:33.904212Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 283:1002 Path# /dev/disk3 2025-06-30T09:23:33.904215Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 284:1000 Path# /dev/disk1 2025-06-30T09:23:33.904219Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 284:1001 Path# /dev/disk2 2025-06-30T09:23:33.904222Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 284:1002 Path# /dev/disk3 2025-06-30T09:23:33.904225Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 285:1000 Path# /dev/disk1 2025-06-30T09:23:33.904228Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 285:1001 Path# /dev/disk2 2025-06-30T09:23:33.904231Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 285:1002 Path# /dev/disk3 2025-06-30T09:23:33.904234Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 286:1000 Path# /dev/disk1 2025-06-30T09:23:33.904237Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 286:1001 Path# /dev/disk2 2025-06-30T09:23:33.904240Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 286:1002 Path# /dev/disk3 2025-06-30T09:23:33.904244Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 287:1000 Path# /dev/disk1 2025-06-30T09:23:33.904247Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 287:1001 Path# /dev/disk2 2025-06-30T09:23:33.904250Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 287:1002 Path# /dev/disk3 2025-06-30T09:23:33.904253Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 288:1000 Path# /dev/disk1 2025-06-30T09:23:33.904256Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 288:1001 Path# /dev/disk2 2025-06-30T09:23:33.904260Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 288:1002 Path# /dev/disk3 2025-06-30T09:23:33.904265Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 289:1000 Path# /dev/disk1 2025-06-30T09:23:33.904270Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 289:1001 Path# /dev/disk2 2025-06-30T09:23:33.904275Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 289:1002 Path# /dev/disk3 2025-06-30T09:23:33.904280Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 290:1000 Path# /dev/disk1 2025-06-30T09:23:33.904286Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 290:1001 Path# /dev/disk2 2025-06-30T09:23:33.904291Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 290:1002 Path# /dev/disk3 2025-06-30T09:23:33.904294Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 291:1000 Path# /dev/disk1 2025-06-30T09:23:33.904297Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 291:1001 Path# /dev/disk2 2025-06-30T09:23:33.904300Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 291:1002 Path# /dev/disk3 2025-06-30T09:23:33.904303Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 292:1000 Path# /dev/disk1 2025-06-30T09:23:33.904307Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 292:1001 Path# /dev/disk2 2025-06-30T09:23:33.904311Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 292:1002 Path# /dev/disk3 2025-06-30T09:23:33.904314Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 293:1000 Path# /dev/disk1 2025-06-30T09:23:33.904319Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 293:1001 Path# /dev/disk2 2025-06-30T09:23:33.904322Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 293:1002 Path# /dev/disk3 2025-06-30T09:23:33.904325Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 294:1000 Path# /dev/disk1 2025-06-30T09:23:33.904328Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 294:1001 Path# /dev/disk2 2025-06-30T09:23:33.904331Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 294:1002 Path# /dev/disk3 2025-06-30T09:23:33.904334Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 295:1000 Path# /dev/disk1 2025-06-30T09:23:33.904337Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 295:1001 Path# /dev/disk2 2025-06-30T09:23:33.904340Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 295:1002 Path# /dev/disk3 2025-06-30T09:23:33.904343Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 296:1000 Path# /dev/disk1 2025-06-30T09:23:33.904346Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 296:1001 Path# /dev/disk2 2025-06-30T09:23:33.904349Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 296:1002 Path# /dev/disk3 2025-06-30T09:23:33.904352Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 297:1000 Path# /dev/disk1 2025-06-30T09:23:33.904355Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 297:1001 Path# /dev/disk2 2025-06-30T09:23:33.904358Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 297:1002 Path# /dev/disk3 2025-06-30T09:23:33.904361Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 298:1000 Path# /dev/disk1 2025-06-30T09:23:33.904364Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 298:1001 Path# /dev/disk2 2025-06-30T09:23:33.904367Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 298:1002 Path# /dev/disk3 2025-06-30T09:23:33.904370Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 299:1000 Path# /dev/disk1 2025-06-30T09:23:33.904373Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 299:1001 Path# /dev/disk2 2025-06-30T09:23:33.904376Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 299:1002 Path# /dev/disk3 2025-06-30T09:23:33.904379Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 300:1000 Path# /dev/disk1 2025-06-30T09:23:33.904382Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 300:1001 Path# /dev/disk2 2025-06-30T09:23:33.904385Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 300:1002 Path# /dev/disk3 2025-06-30T09:23:33.943467Z node 251 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 251 Type# 268639257 2025-06-30T09:23:33.945514Z node 251 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { MergeBoxes { OriginBoxId: 2 OriginBoxGeneration: 1 TargetBoxId: 1 TargetBoxGeneration: 1 StoragePoolIdMap { OriginStoragePoolId: 1 TargetStoragePoolId: 2 } } } } 2025-06-30T09:23:33.960017Z node 251 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { ReadBox { BoxId: 1 } } Command { QueryBaseConfig { } } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-DYNUMBER [GOOD] Test command err: 2025-06-30T09:23:03.786246Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670498304239746:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:03.786272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002661/r3tmp/tmpsvMSx8/pdisk_1.dat 2025-06-30T09:23:04.009954Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14802, node 1 2025-06-30T09:23:04.080778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:04.080791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:04.080793Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:04.080858Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28908 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:04.116702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:04.127241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:04.127273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:04.130959Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:04.593985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670502599207978:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:04.594021Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:04.644791Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7521670498304239916:2118] Handle TEvProposeTransaction 2025-06-30T09:23:04.644806Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7521670498304239916:2118] TxId# 281474976715658 ProcessProposeTransaction 2025-06-30T09:23:04.644832Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7521670498304239916:2118] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7521670502599207999:2606] 2025-06-30T09:23:04.655632Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7521670502599207999:2606] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 10 SplitByLoadSettings { Enabled: true } } } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-06-30T09:23:04.655666Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7521670502599207999:2606] txid# 281474976715658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:23:04.655824Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7521670502599207999:2606] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:23:04.655837Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7521670502599207999:2606] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:23:04.655867Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7521670502599207999:2606] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:23:04.655899Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7521670502599207999:2606] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:23:04.655910Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7521670502599207999:2606] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-30T09:23:04.655962Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7521670502599207999:2606] txid# 281474976715658 HANDLE EvClientConnected 2025-06-30T09:23:04.656381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:04.658642Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7521670502599207999:2606] txid# 281474976715658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715658} 2025-06-30T09:23:04.658656Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7521670502599207999:2606] txid# 281474976715658 SEND to# [1:7521670502599207998:2302] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 53} 2025-06-30T09:23:04.746574Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7521670498304239916:2118] Handle TEvNavigate describe path /Root/table 2025-06-30T09:23:04.748599Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7521670502599208143:2722] HANDLE EvNavigateScheme /Root/table 2025-06-30T09:23:04.748738Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7521670502599208143:2722] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-30T09:23:04.748788Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7521670502599208143:2722] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/table" Options { ShowPrivateTable: false } 2025-06-30T09:23:04.749272Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7521670502599208143:2722] Handle TEvDescribeSchemeResult Forward to# [1:7521670502599208141:2308] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/table" PathDescription { Self { Name: "table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275384775 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 10 SplitByLoadSettings { Enabled: true } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: ... ters.cpp:346: HandleScheme TEvExternalStorage::TEvHeadObjectResponse: self# [52:7521670641963025825:2200], result# HeadObjectResult { ETag: 7639027e6c57dadc57c0d5a95af4ff63 ContentLength: 360 } REQUEST: GET /test_bucket/DyNumberTable/scheme.pb HTTP/1.1 HEADERS: Host: localhost:13337 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: EACB3936-1229-4AD8-99F5-2E4326C0F5B7 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250630/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=dacf72613a889afe90be312d0d3bc988bd32825d2ce16b460cafad6b987cea8b content-type: application/xml range: bytes=0-359 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250630T092336Z S3_MOCK::HttpServeRead: /test_bucket/DyNumberTable/scheme.pb / 360 2025-06-30T09:23:36.744775Z node 52 :IMPORT DEBUG: schemeshard_import_getters.cpp:475: HandleScheme TEvExternalStorage::TEvGetObjectResponse: self# [52:7521670641963025825:2200], result# 7639027e6c57dadc57c0d5a95af4ff63 2025-06-30T09:23:36.744882Z node 52 :IMPORT INFO: schemeshard_import_getters.cpp:692: Reply: self# [52:7521670641963025825:2200], success# 1, error# 2025-06-30T09:23:36.744913Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:36.744919Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:993: TImport::TTxProgress: OnSchemeResult: id# 281474976715665, itemIdx# 0, success# 1 2025-06-30T09:23:36.745017Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:633: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-30T09:23:36.746356Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:36.746384Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:36.746386Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1222: TImport::TTxProgress: OnAllocateResult: txId# 281474976710760, id# 281474976715665 2025-06-30T09:23:36.746393Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:423: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710760 2025-06-30T09:23:36.746417Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:36.746702Z node 52 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:36.747241Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:36.747252Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1318: TImport::TTxProgress: OnModifyResult: txId# 281474976710760, status# StatusAccepted 2025-06-30T09:23:36.747275Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:647: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976710760 Issue: '' } 2025-06-30T09:23:36.747793Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:36.757100Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:36.757112Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1476: TImport::TTxProgress: OnNotifyResult: txId# 281474976710760 2025-06-30T09:23:36.757137Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:633: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-30T09:23:36.757444Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:36.757465Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:36.757468Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1222: TImport::TTxProgress: OnAllocateResult: txId# 281474976710761, id# 281474976715665 2025-06-30T09:23:36.757475Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:524: TImport::TTxProgress: Restore propose: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710761 2025-06-30T09:23:36.757570Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:36.757649Z node 52 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976710761:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h:563) 2025-06-30T09:23:36.758000Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:36.758009Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1318: TImport::TTxProgress: OnModifyResult: txId# 281474976710761, status# StatusAccepted 2025-06-30T09:23:36.758021Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:647: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976710761 Issue: '' } 2025-06-30T09:23:36.758245Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete REQUEST: HEAD /test_bucket/DyNumberTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:13337 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C624F5B0-2EFD-42B7-84D3-BA7DA508D1C7 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250630/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=1eb1ea2e1c0b275e1d9d4fd45070f06f566ed71dac6320e822e6d99bc489e201 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250630T092336Z S3_MOCK::HttpServeRead: /test_bucket/DyNumberTable/data_00.csv / 7 REQUEST: GET /test_bucket/DyNumberTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:13337 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B8700203-EBDE-457B-887F-C49D4D98C115 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250630/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=6092ba766fd9a2c8fe5000a0dd70ccfb68b9919dafadfc5ea62a11699245a126 content-type: application/xml range: bytes=0-6 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250630T092336Z S3_MOCK::HttpServeRead: /test_bucket/DyNumberTable/data_00.csv / 7 2025-06-30T09:23:36.774575Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:36.774588Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1476: TImport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-30T09:23:36.775267Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:36.939573Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [52:7521670641963026013:2354] [0] Resolve database: name# /Root 2025-06-30T09:23:36.939714Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [52:7521670641963026013:2354] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:23:36.939720Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [52:7521670641963026013:2354] [0] Send request: schemeShardId# 72057594046644480 2025-06-30T09:23:36.939902Z node 52 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [52:7521670641963026013:2354] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715665 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:13337" scheme: HTTP bucket: "test_bucket" items { source_prefix: "DyNumberTable" destination_path: "/Root/DyNumberTable" } } StartTime { seconds: 1751275416 } EndTime { seconds: 1751275416 } } 2025-06-30T09:23:36.954230Z node 52 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [52:7521670641963024439:2140] Handle TEvExecuteKqpTransaction 2025-06-30T09:23:36.954250Z node 52 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [52:7521670641963024439:2140] TxId# 281474976715666 ProcessProposeKqpTransaction 2025-06-30T09:23:36.954541Z node 52 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jz02cbbc1dq0tb812c88q7qz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=52&id=MTNlZjRiNzItOGIwZGIzM2UtZDA0YmExYTktN2U5NTA2ZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> BackupPathTest::ChecksumsForSchemaMappingFiles [GOOD] Test command err: 2025-06-30T09:23:01.208451Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670491750590636:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:01.208594Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002796/r3tmp/tmpPw4FnZ/pdisk_1.dat 2025-06-30T09:23:01.290037Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:01.309558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:01.309591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:01.311233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17795, node 1 2025-06-30T09:23:01.319574Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:23:01.343002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:01.343018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:01.343021Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:01.343074Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13982 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:01.412001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:01.714104Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670491750591532:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:01.714135Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:01.714998Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670491750591544:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:01.715202Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7521670491750590816:2142] Handle TEvProposeTransaction 2025-06-30T09:23:01.715211Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7521670491750590816:2142] TxId# 281474976715658 ProcessProposeTransaction 2025-06-30T09:23:01.715228Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7521670491750590816:2142] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7521670491750591547:2600] 2025-06-30T09:23:01.728202Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7521670491750591547:2600] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-30T09:23:01.728257Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7521670491750591547:2600] txid# 281474976715658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:23:01.728261Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7521670491750591547:2600] txid# 281474976715658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-30T09:23:01.728756Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7521670491750591547:2600] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:23:01.728771Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7521670491750591547:2600] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:23:01.728801Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7521670491750591547:2600] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:23:01.728849Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7521670491750591547:2600] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:23:01.728863Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7521670491750591547:2600] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-30T09:23:01.728912Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7521670491750591547:2600] txid# 281474976715658 HANDLE EvClientConnected 2025-06-30T09:23:01.728951Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521670491750591572:2606], Recipient [1:7521670491750590938:2210]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:01.728957Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:01.728960Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:23:01.728967Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521670491750591547:2600], Recipient [1:7521670491750590938:2210]: {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-30T09:23:01.728969Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:23:01.729761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: ".metadata/workload_manager/pools/default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } TxId: 281474976715658 TabletId: 72057594046644480 Owner: "metadata@system" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:23:01.729896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-30T09:23:01.729939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: .metadata, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-30T09:23:01.729954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-30T09:23:01.729959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976715658:0 type: TxMkDir target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-30T09:23:01.729976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:23:01.729980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976715658:1, at schemeshard: 72057594046644480 2025-06-30T09:23:01.729988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 2], parent name: .metadata, child name: workload_manager, child id: [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-30T09:23:01.729990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 0 2025-06-30T09:23:01.729992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976715658:1 type: TxMkDir target ... ackup_restore_common.h:233: TRestore TProposedWaitParts, opId: 281474976710765:0 HandleReply TEvSchemaChanged at tablet# 72057594046644480 message# Source { RawX1: 7521670644266934294 RawX2: 4503861620377920 } Origin: 72075186224037894 State: 2 TxId: 281474976710765 Step: 0 Generation: 1 OpResult { Success: false Explain: "Checksum mismatch for Prefix/Table2/data_00.csv expected# f3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, got# e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-30T09:23:37.339790Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710765:0, shardIdx: 72057594046644480:7, shard: 72075186224037894, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-30T09:23:37.339795Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976710765:0, at schemeshard: 72057594046644480 2025-06-30T09:23:37.339798Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710765:0, datashard: 72075186224037894, at schemeshard: 72057594046644480 2025-06-30T09:23:37.339802Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710765:0 129 -> 240 2025-06-30T09:23:37.339841Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TRestore, opId# 281474976710765:0, reason# domain is not a serverless db, domain# /Root, domainPathId# [OwnerId: 72057594046644480, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046644480, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-30T09:23:37.339892Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:23:37.340238Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976710765:0, at schemeshard: 72057594046644480 2025-06-30T09:23:37.340249Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:23:37.340253Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976710765:0 2025-06-30T09:23:37.340268Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [61:7521670644266934294:2368] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710765 at schemeshard: 72057594046644480 2025-06-30T09:23:37.340301Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435072, Sender [61:7521670635676997547:2196], Recipient [61:7521670635676997547:2196]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-30T09:23:37.340307Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-30T09:23:37.340314Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710765:0, at schemeshard: 72057594046644480 2025-06-30T09:23:37.340318Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976710765:0 ProgressState 2025-06-30T09:23:37.340331Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-30T09:23:37.340336Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710765:0 progress is 1/1 2025-06-30T09:23:37.340338Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710765 ready parts: 1/1 2025-06-30T09:23:37.340341Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710765:0 progress is 1/1 2025-06-30T09:23:37.340342Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710765 ready parts: 1/1 2025-06-30T09:23:37.340344Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710765, ready parts: 1/1, is published: true 2025-06-30T09:23:37.340351Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [61:7521670635676997547:2196] message: TxId: 281474976710765 2025-06-30T09:23:37.340359Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710765 ready parts: 1/1 2025-06-30T09:23:37.340366Z node 61 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710765:0 2025-06-30T09:23:37.340368Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710765:0 2025-06-30T09:23:37.340394Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 3 2025-06-30T09:23:37.340681Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-30T09:23:37.340702Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [61:7521670635676997547:2196] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710765 at schemeshard: 72057594046644480 2025-06-30T09:23:37.340736Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124998, Sender [61:7521670635676997547:2196], Recipient [61:7521670635676997547:2196]: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710765 2025-06-30T09:23:37.340743Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5119: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletionResult 2025-06-30T09:23:37.340744Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710765 2025-06-30T09:23:37.340746Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710765 2025-06-30T09:23:37.340754Z node 61 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:37.340759Z node 61 :IMPORT DEBUG: schemeshard_import__create.cpp:1476: TImport::TTxProgress: OnNotifyResult: txId# 281474976710765 2025-06-30T09:23:37.340788Z node 61 :IMPORT NOTICE: schemeshard_import__create.cpp:757: TImport::TTxProgress: issues during restore, cancelling, info# { Id: 281474976715672 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Prefix_6/Table2' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 20] State: Transferring SubState: Subscribed WaitTxId: 0 Issue: 'shard: 72057594046644480:7, error: Checksum mismatch for Prefix/Table2/data_00.csv expected# f3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, got# e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' } 2025-06-30T09:23:37.340806Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_xxport__tx_base.h:63: SendNotifications: : id# 281474976715672, subscribers count# 0 2025-06-30T09:23:37.341053Z node 61 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:37.341512Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [61:7521670644266934426:3883], Recipient [61:7521670635676997547:2196]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:23:37.341521Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:23:37.341523Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:23:37.368019Z node 61 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [61:7521670644266934439:2372] [0] Resolve database: name# /Root 2025-06-30T09:23:37.368150Z node 61 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [61:7521670644266934439:2372] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:23:37.368158Z node 61 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [61:7521670644266934439:2372] [0] Send request: schemeShardId# 72057594046644480 2025-06-30T09:23:37.368224Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [61:7521670644266934442:3897], Recipient [61:7521670635676997547:2196]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:37.368235Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:37.368237Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:23:37.368264Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 275251202, Sender [61:7521670644266934439:2372], Recipient [61:7521670635676997547:2196]: NKikimrImport.TEvGetImportRequest Request { Id: 281474976715672 } DatabaseName: "/Root" 2025-06-30T09:23:37.368270Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5073: StateWork, processing event TEvImport::TEvGetImportRequest 2025-06-30T09:23:37.368347Z node 61 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [61:7521670644266934439:2372] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715672 Status: CANCELLED Issues { message: "shard: 72057594046644480:7, error: Checksum mismatch for Prefix/Table2/data_00.csv expected# f3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, got# e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" severity: 1 } Progress: PROGRESS_CANCELLED ImportFromS3Settings { endpoint: "localhost:15078" scheme: HTTP bucket: "test_bucket" source_prefix: "Prefix" destination_path: "/Root/Prefix_6" } StartTime { seconds: 1751275417 } EndTime { seconds: 1751275417 } } 2025-06-30T09:23:37.368521Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [61:7521670644266934442:3897], Recipient [61:7521670635676997547:2196]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:23:37.368528Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:23:37.368529Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sequence_reboots/unittest >> TSequenceReboots::CreateMultipleSequencesHaveInitialSequenceShard [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:52.865148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:52.865170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:52.865174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:52.865178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:52.865183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:52.865186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:52.865193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:52.865206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:52.865295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:52.865375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:52.879005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:52.879029Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:52.879118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:52.883313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:52.884542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:52.884586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:52.887535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:52.887605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:52.887790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:52.887860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:52.889168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:52.889221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:52.889540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:52.889554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:52.889565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:52.889574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:52.889581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:52.889614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:52.891979Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:52.919353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:52.919466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:52.919546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:52.919556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:52.919608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:52.919625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:52.920685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:52.920744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:52.920821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:52.920834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:52.920841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:52.920848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:52.921462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:52.921477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:52.921484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:52.921930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:52.921943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:52.921950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:52.921959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:52.922643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:52.923817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:52.923873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:52.924128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:52.924164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... :25.015283Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-30T09:23:25.015289Z node 83 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 1 2025-06-30T09:23:25.015292Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [83:363:2341] 2025-06-30T09:23:25.015295Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-30T09:23:25.015727Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:23:25.016107Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:23:25.016140Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:23:25.016167Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-30T09:23:25.016172Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-30T09:23:25.016190Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [83:363:2341] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 1003 at schemeshard: 72057594046678944 2025-06-30T09:23:25.016205Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:23:25.016211Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [83:439:2416] 2025-06-30T09:23:25.016254Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [83:443:2420], Recipient [83:128:2152]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:23:25.016259Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:23:25.016265Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1005 TestWaitNotification: OK eventTxId 1004 TestWaitNotification: OK eventTxId 1003 2025-06-30T09:23:25.016349Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [83:490:2467], Recipient [83:128:2152]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/seq1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-30T09:23:25.016354Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:23:25.016369Z node 83 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/seq1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:23:25.016424Z node 83 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/seq1" took 44us result status StatusSuccess 2025-06-30T09:23:25.016522Z node 83 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/seq1" PathDescription { Self { Name: "seq1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeSequence CreateFinished: true CreateTxId: 1005 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SequenceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SequenceDescription { Name: "seq1" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SequenceShard: 72075186233409546 MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 Cache: 1 Increment: 1 Cycle: false DataType: "Int64" } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:25.016627Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [83:491:2468], Recipient [83:128:2152]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/seq2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-30T09:23:25.016631Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:23:25.016638Z node 83 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/seq2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:23:25.016653Z node 83 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/seq2" took 15us result status StatusSuccess 2025-06-30T09:23:25.016682Z node 83 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/seq2" PathDescription { Self { Name: "seq2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeSequence CreateFinished: true CreateTxId: 1004 CreateStep: 5000005 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SequenceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SequenceDescription { Name: "seq2" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 1 SequenceShard: 72075186233409546 MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 Cache: 1 Increment: 1 Cycle: false DataType: "Int64" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:25.016759Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [83:492:2469], Recipient [83:128:2152]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/seq3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-30T09:23:25.016764Z node 83 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:23:25.016770Z node 83 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/seq3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:23:25.016780Z node 83 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/seq3" took 10us result status StatusSuccess 2025-06-30T09:23:25.016807Z node 83 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/seq3" PathDescription { Self { Name: "seq3" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeSequence CreateFinished: true CreateTxId: 1003 CreateStep: 5000006 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SequenceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SequenceDescription { Name: "seq3" PathId { OwnerId: 72057594046678944 LocalId: 6 } Version: 1 SequenceShard: 72075186233409546 MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 Cache: 1 Increment: 1 Cycle: false DataType: "Int64" } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ProposeErrorEvWrite [GOOD] Test command err: 2025-06-30T09:23:20.007370Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:630:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:20.007484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:20.007533Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:23:20.007613Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:627:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:20.007659Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:20.007691Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004757/r3tmp/tmpuZ0ftU/pdisk_1.dat 2025-06-30T09:23:20.141961Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:20.254473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:20.391813Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:20.391933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:20.403200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:20.403250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:20.420546Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:23:20.420752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:20.420895Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:20.701630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:21.330642Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1506:2912], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:21.330690Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1516:2917], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:21.330873Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:21.332861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:21.504707Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:21.504775Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:21.938274Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1520:2920], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:23:22.112902Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1658:2998] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:22.194008Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:96: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Begin literal execution. Operation timeout: 0.000000s, cancelAfter: (empty maybe) 2025-06-30T09:23:22.194106Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:125: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Begin literal execution, txs: 1 2025-06-30T09:23:22.194122Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-30T09:23:22.194132Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:135: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (ToStream (Just (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3)))))) )))) ) 2025-06-30T09:23:22.194152Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:241: Create result channelId: 1 from task: 1 with index: 0 2025-06-30T09:23:22.195216Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:275: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Execution is complete, results: 1 2025-06-30T09:23:22.197641Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:96: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jz02bw3hbvayywwz71cxxh8b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjI1MzA5YjAtM2E2NDY3Mi02MTNlMjcxMC1kMjgwZmFiMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Begin literal execution. Operation timeout: 299.423985s, cancelAfter: (empty maybe) 2025-06-30T09:23:22.197661Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:125: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jz02bw3hbvayywwz71cxxh8b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjI1MzA5YjAtM2E2NDY3Mi02MTNlMjcxMC1kMjgwZmFiMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Begin literal execution, txs: 1 2025-06-30T09:23:22.197670Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-30T09:23:22.197676Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:135: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jz02bw3hbvayywwz71cxxh8b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjI1MzA5YjAtM2E2NDY3Mi02MTNlMjcxMC1kMjgwZmFiMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (ToStream (Just (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3)))))) )))) ) 2025-06-30T09:23:22.197684Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:241: Create result channelId: 1 from task: 1 with index: 0 2025-06-30T09:23:22.197868Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:275: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jz02bw3hbvayywwz71cxxh8b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjI1MzA5YjAtM2E2NDY3Mi02MTNlMjcxMC1kMjgwZmFiMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Execution is complete, results: 1 2025-06-30T09:23:22.197960Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:198: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jz02bw3hbvayywwz71cxxh8b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjI1MzA5YjAtM2E2NDY3Mi02MTNlMjcxMC1kMjgwZmFiMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Bootstrap done, become ReadyState 2025-06-30T09:23:22.198070Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:608: ActorId: [1:1684:2910] TxId: 281474976715660. Ctx: { TraceId: 01jz02bw3hbvayywwz71cxxh8b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjI1MzA5YjAtM2E2NDY3Mi02MTNlMjcxMC1kMjgwZmFiMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 2, stages: 1 2025-06-30T09:23:22.198077Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-30T09:23:22.198112Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:623: ActorId: [1:1684:2910] TxId: 281474976715660. Ctx: { TraceId: 01jz02bw3hbvayywwz71cxxh8b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjI1MzA5YjAtM2E2NDY3Mi02MTNlMjcxMC1kMjgwZmFiMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got request, become WaitResolveState 2025-06-30T09:23:22.198190Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715660. Resolved key sets: 1 2025-06-30T09:23:22.198233Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715660. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-30T09:23:22.198260Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2035: ActorId: [1:1684:2910] TxId: 281474976715660. Ctx: { TraceId: 01jz02bw3hbvayywwz71cxxh8b, Database: , DatabaseId: /Root, SessionId: ydb://s ... TUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:25.799127Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-30T09:23:25.799193Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [3:1959:3156] 2025-06-30T09:23:25.799203Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:1959:3156], channels: 0 2025-06-30T09:23:25.799212Z node 3 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-30T09:23:25.799216Z node 3 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2809: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Updating channels after the creation of compute actors 2025-06-30T09:23:25.799219Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [3:1959:3156] 2025-06-30T09:23:25.799222Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:1959:3156], channels: 0 2025-06-30T09:23:25.799229Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [3:1959:3156], 2025-06-30T09:23:25.799234Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:1959:3156], 2025-06-30T09:23:25.799238Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-30T09:23:25.799441Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1959:3156], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-30T09:23:25.799452Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [3:1959:3156], 2025-06-30T09:23:25.799456Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:1959:3156], 2025-06-30T09:23:25.799634Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1959:3156], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 146 Tasks { TaskId: 1 CpuTimeUs: 36 FinishTimeMs: 1751275405799 EgressBytes: 10 EgressRows: 1 ComputeCpuTimeUs: 9 BuildCpuTimeUs: 27 HostName: "ghrun-x4qzxsyz2q" NodeId: 3 CreateTimeMs: 1751275405799 UpdateTimeMs: 1751275405799 } MaxMemoryUsage: 1048576 } 2025-06-30T09:23:25.799654Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [3:1959:3156] 2025-06-30T09:23:25.799667Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:276: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send Commit to BufferActor=[3:1955:3156] 2025-06-30T09:23:25.799677Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000146s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-30T09:23:25.813143Z node 3 :KQP_COMPUTE WARN: kqp_write_actor.cpp:715: SelfId: [3:1962:3156], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:1946:3156]Got OUT_OF_SPACE for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:1962:3156]. Ignored this error. 2025-06-30T09:23:25.813198Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:1955:3156], SessionActorId: [3:1946:3156], statusCode=OVERLOADED. Issue=
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2006 . sessionActorId=[3:1946:3156]. isRollback=0 2025-06-30T09:23:25.813281Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1938: SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, ActorId: [3:1946:3156], ActorState: ExecuteState, TraceId: 01jz02c0ex9gg28hd2w005q1rr, got TEvKqpBuffer::TEvError in ExecuteState, status: OVERLOADED send to: [3:1956:3156] from: [3:1955:3156] 2025-06-30T09:23:25.813324Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:814: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got EvAbortExecution, status: OVERLOADED, message: {
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2006 } 2025-06-30T09:23:25.813339Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. OVERLOADED: {
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2006 } 2025-06-30T09:23:25.813354Z node 3 :KQP_EXECUTER INFO: kqp_executer_impl.h:1952: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. task: 1, does not have the CA id yet or is already complete 2025-06-30T09:23:25.813418Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2063: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ReplyErrorAndDie. Response: Status: OVERLOADED Issues { message: "Tablet 72075186224037888 is out of space. Table `/Root/table-1`." issue_code: 2006 severity: 1 } Result { Stats { CpuTimeUs: 146 } } , to ActorId: [3:1946:3156] 2025-06-30T09:23:25.813428Z node 3 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2877: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shutdown immediately - nothing to wait 2025-06-30T09:23:25.813462Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-30T09:23:25.813468Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2203: ActorId: [3:1956:3156] TxId: 281474976715672. Ctx: { TraceId: 01jz02c0ex9gg28hd2w005q1rr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-06-30T09:23:25.813497Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=3&id=OWJmYTYzNjMtMWZjNzgxMzctZmU2ZDc5MTUtZmZlMjhiMDQ=, ActorId: [3:1946:3156], ActorState: ExecuteState, TraceId: 01jz02c0ex9gg28hd2w005q1rr, Create QueryResponse for error on request, msg: |82.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |82.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |82.8%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::ConcurrentUpdateTable [GOOD] Test command err: 2025-06-30T09:23:19.763047Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670567023074633:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:19.763180Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0026cf/r3tmp/tmpVWkXyg/pdisk_1.dat 2025-06-30T09:23:19.854432Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:19.862838Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670567023074440:2080] 1751275399757449 != 1751275399757452 2025-06-30T09:23:19.865469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:19.865505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:19.876094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26679 TServer::EnableGrpc on GrpcPort 5983, node 1 2025-06-30T09:23:19.919178Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:19.919199Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:19.919203Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:19.919265Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-30T09:23:19.939241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:19.942209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:23:20.483472Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-30T09:23:20.484147Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-30T09:23:20.497885Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:20.497924Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:20.497962Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-30T09:23:20.497969Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-30T09:23:20.497980Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-30T09:23:20.498141Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:20.498731Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-30T09:23:20.498764Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-30T09:23:20.498772Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-30T09:23:20.498805Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-30T09:23:20.498806Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-30T09:23:20.498810Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-30T09:23:20.498841Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-30T09:23:20.498852Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-30T09:23:20.498855Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-30T09:23:20.499770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:20.501044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:20.501282Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-30T09:23:20.501292Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976715658 2025-06-30T09:23:20.501470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:20.501807Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-30T09:23:20.501811Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976715660 2025-06-30T09:23:20.501820Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-30T09:23:20.501822Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976715659 2025-06-30T09:23:20.527535Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976715658. Doublechecking... 2025-06-30T09:23:20.537164Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976715660. Doublechecking... 2025-06-30T09:23:20.540316Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976715659. Doublechecking... 2025-06-30T09:23:20.589551Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-30T09:23:20.613581Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-30T09:23:20.617183Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-30T09:23:20.618928Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: a1c562bd-3b152627-ecc3aa39-50734032, Bootstrap. Database: /dc-1 2025-06-30T09:23:20.629268Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444992798308.922361s seconds to be completed 2025-06-30T09:23:20.630096Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=OWU0YTI4YzktNjJkZjRiYmUtZWRjY2M2MmUtZWE4NmViZWE=, workerId: [1:7521670571318042588:2292], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-30T09:23:20.630129Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-30T09:23:20.630647Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: a1c562bd-3b152627-ecc3aa39-50734032, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-30T09:23:20.630867Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=OWU0YTI4YzktNjJkZjRiYmUtZWRjY2M2MmUtZWE4NmViZWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Sen ... Request has 18444992798307.603072s seconds to be completed 2025-06-30T09:23:21.948865Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=NDUzN2ExZmQtNDU5Yzg2YmUtZGIxYzkxYjItN2UwOGNjYw==, workerId: [2:7521670574290964819:2349], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-30T09:23:21.948886Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-30T09:23:21.948965Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 37ce7b17-56d27aee-4775b8d4-3ff912e0, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-30T09:23:21.949094Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NDUzN2ExZmQtNDU5Yzg2YmUtZGIxYzkxYjItN2UwOGNjYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 14, targetId: [2:7521670574290964819:2349] 2025-06-30T09:23:21.949103Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 14 timeout: 300.000000s actor id: [2:7521670574290964821:2692] 2025-06-30T09:23:21.952256Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 14, sender: [2:7521670574290964820:2350], selfId: [2:7521670574290963628:2078], source: [2:7521670574290964819:2349] 2025-06-30T09:23:21.952365Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 37ce7b17-56d27aee-4775b8d4-3ff912e0, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDUzN2ExZmQtNDU5Yzg2YmUtZGIxYzkxYjItN2UwOGNjYw==, TxId: 2025-06-30T09:23:21.952372Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 37ce7b17-56d27aee-4775b8d4-3ff912e0, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDUzN2ExZmQtNDU5Yzg2YmUtZGIxYzkxYjItN2UwOGNjYw==, TxId: 2025-06-30T09:23:21.952469Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 37ce7b17-56d27aee-4775b8d4-3ff912e0, Bootstrap. Database: /dc-1 2025-06-30T09:23:21.952501Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NDUzN2ExZmQtNDU5Yzg2YmUtZGIxYzkxYjItN2UwOGNjYw==, workerId: [2:7521670574290964819:2349], local sessions count: 2 2025-06-30T09:23:21.952520Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444992798307.599099s seconds to be completed 2025-06-30T09:23:21.952975Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=ZWRlYzRiMTctOGEwYzJkZDQtZTZlMDhhZTQtYTAwMDhmMDU=, workerId: [2:7521670574290964843:2358], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-30T09:23:21.952997Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-30T09:23:21.953061Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 37ce7b17-56d27aee-4775b8d4-3ff912e0, RunDataQuery: -- TSaveScriptFinalStatusActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, finalization_status, meta, customer_supplied_id, user_token, script_sinks, script_secret_names FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-30T09:23:21.953149Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZWRlYzRiMTctOGEwYzJkZDQtZTZlMDhhZTQtYTAwMDhmMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 16, targetId: [2:7521670574290964843:2358] 2025-06-30T09:23:21.953154Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 16 timeout: 300.000000s actor id: [2:7521670574290964845:2697] 2025-06-30T09:23:21.987263Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-30T09:23:21.997511Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-30T09:23:21.997878Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-30T09:23:21.998443Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-30T09:23:22.003190Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-30T09:23:22.016414Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-30T09:23:22.020514Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-30T09:23:22.023550Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 16, sender: [2:7521670574290964844:2359], selfId: [2:7521670574290963628:2078], source: [2:7521670574290964843:2358] 2025-06-30T09:23:22.023681Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 37ce7b17-56d27aee-4775b8d4-3ff912e0, State: Get operation info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZWRlYzRiMTctOGEwYzJkZDQtZTZlMDhhZTQtYTAwMDhmMDU=, TxId: 01jz02bws34ge47k9vbjp0spag 2025-06-30T09:23:22.023855Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 37ce7b17-56d27aee-4775b8d4-3ff912e0, State: Get operation info, RunDataQuery: -- TSaveScriptFinalStatusActor::FinishScriptExecution DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $operation_status AS Int32; DECLARE $execution_status AS Int32; DECLARE $finalization_status AS Int32; DECLARE $issues AS JsonDocument; DECLARE $plan AS JsonDocument; DECLARE $stats AS JsonDocument; DECLARE $ast AS Optional; DECLARE $ast_compressed AS Optional; DECLARE $ast_compression_method AS Optional; DECLARE $operation_ttl AS Interval; DECLARE $customer_supplied_id AS Text; DECLARE $user_token AS Text; DECLARE $script_sinks AS Optional; DECLARE $script_secret_names AS Optional; DECLARE $applicate_script_external_effect_required AS Bool; UPDATE `.metadata/script_executions` SET operation_status = $operation_status, execution_status = $execution_status, finalization_status = IF($applicate_script_external_effect_required, $finalization_status, NULL), issues = $issues, plan = $plan, end_ts = CurrentUtcTimestamp(), stats = $stats, ast = $ast, ast_compressed = $ast_compressed, ast_compression_method = $ast_compression_method, expire_at = IF($operation_ttl > CAST(0 AS Interval), CurrentUtcTimestamp() + $operation_ttl, NULL), customer_supplied_id = IF($applicate_script_external_effect_required, $customer_supplied_id, NULL), user_token = IF($applicate_script_external_effect_required, $user_token, NULL), script_sinks = IF($applicate_script_external_effect_required, $script_sinks, NULL), script_secret_names = IF($applicate_script_external_effect_required, $script_secret_names, NULL) WHERE database = $database AND execution_id = $execution_id; DELETE FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id; 2025-06-30T09:23:22.024045Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZWRlYzRiMTctOGEwYzJkZDQtZTZlMDhhZTQtYTAwMDhmMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 17, targetId: [2:7521670574290964843:2358] 2025-06-30T09:23:22.024058Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 17 timeout: 300.000000s actor id: [2:7521670578585932181:2716] 2025-06-30T09:23:22.027722Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-30T09:23:22.032853Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-30T09:23:22.038945Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-30T09:23:22.047896Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NjZhNTVlYmItNzM1Mjk2ZmQtZGUzYzc0Y2ItZWQ4NmFhNTk=, workerId: [2:7521670574290964564:2326], local sessions count: 2 2025-06-30T09:23:22.097799Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 17, sender: [2:7521670578585932180:2370], selfId: [2:7521670574290963628:2078], source: [2:7521670574290964843:2358] 2025-06-30T09:23:22.097943Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 37ce7b17-56d27aee-4775b8d4-3ff912e0, State: Update final status, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZWRlYzRiMTctOGEwYzJkZDQtZTZlMDhhZTQtYTAwMDhmMDU=, TxId: 2025-06-30T09:23:22.097974Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 37ce7b17-56d27aee-4775b8d4-3ff912e0, State: Update final status, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZWRlYzRiMTctOGEwYzJkZDQtZTZlMDhhZTQtYTAwMDhmMDU=, TxId: 2025-06-30T09:23:22.097978Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:2658: [ScriptExecutions] Finish script execution operation. ExecutionId: 37ce7b17-56d27aee-4775b8d4-3ff912e0. SUCCESS. Issues: 2025-06-30T09:23:22.098549Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=YjhlOWY0MmItZTY4MGIzZDItMjdlMDY0NzEtMTg5ZjU0NTQ=, workerId: [2:7521670574290964533:2310], local sessions count: 1 2025-06-30T09:23:22.098658Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ZWRlYzRiMTctOGEwYzJkZDQtZTZlMDhhZTQtYTAwMDhmMDU=, workerId: [2:7521670574290964843:2358], local sessions count: 0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::Mirror3dcPermissions [GOOD] Test command err: 2025-06-30T09:23:21.480547Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:21.481056Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:21.486409Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:21.486520Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:21.486967Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:21.487093Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:21.487897Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:21.487933Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:21.487982Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:21.488093Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:21.489838Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:21.489895Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:21.489933Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:21.489965Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:21.522901Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:21.545946Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:21.546113Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:21.548217Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:21.548615Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:21.548629Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:21.548645Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:21.548651Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:21.548673Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:21.548719Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:21.548756Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:21.552124Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-30T09:23:21.583726Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:21.583813Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:21.628285Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:21.628346Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:21.628458Z node 1 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:21.628920Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "stor ... scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:27.528091Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:27.528193Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:69: Loaded config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:27.528222Z node 25 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:27.528314Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:27.528373Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 } 2025-06-30T09:23:27.550953Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:27.620301Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:27.620424Z node 25 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:27.623034Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-30T09:23:27.623065Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 2025-06-30T09:23:27.623081Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 30, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 3 2025-06-30T09:23:27.623144Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Number of data centers with unavailable vdisks: 3. Locked: VDisk [0:1:1:5:0] (::1:/30/pdisk-270.data) is locked by this request. Down: Host ::1:12008 (32) is down, Host ::1:12005 (29) is down, Host ::1:12002 (26) is down) 2025-06-30T09:23:27.623245Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Number of data centers with unavailable vdisks: 3. Locked: VDisk [0:1:1:5:0] (::1:/30/pdisk-270.data) is locked by this request. Down: Host ::1:12008 (32) is down, Host ::1:12005 (29) is down, Host ::1:12002 (26) is down" } Deadline: 420242000 } 2025-06-30T09:23:27.623461Z node 25 :CMS INFO: cms.cpp:104: OnTabletDead: 72057594037936128 2025-06-30T09:23:27.623468Z node 25 :CMS DEBUG: cms.cpp:1208: TCms::Cleanup 2025-06-30T09:23:27.625903Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:27.626609Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:27.626656Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:27.626988Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:27.627063Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:27.627163Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:27.627219Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:69: Loaded config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:27.627236Z node 25 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:27.627289Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:27.627318Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 } 2025-06-30T09:23:27.649998Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:27.716213Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:27.716301Z node 25 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:27.717616Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "32" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-30T09:23:27.717625Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "32" Services: "storage" Duration: 60000000 2025-06-30T09:23:27.717634Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 32, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 2 2025-06-30T09:23:27.717675Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Number of data centers with unavailable vdisks: 3. Locked: VDisk [0:1:2:7:0] (::1:/32/pdisk-288.data) is locked by this request. Down: Host ::1:12005 (29) is down, Host ::1:12002 (26) is down) 2025-06-30T09:23:27.717748Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "32" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Number of data centers with unavailable vdisks: 3. Locked: VDisk [0:1:2:7:0] (::1:/32/pdisk-288.data) is locked by this request. Down: Host ::1:12005 (29) is down, Host ::1:12002 (26) is down" } Deadline: 420348000 } 2025-06-30T09:23:27.728773Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:27.744088Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:27.744202Z node 25 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:27.745462Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-30T09:23:27.745473Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 2025-06-30T09:23:27.745485Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 30, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 2 2025-06-30T09:23:27.745722Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:27.745800Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } Deadline: 180448000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 30 InterconnectPort: 12006 } } } } 2025-06-30T09:23:27.745969Z node 25 :CMS INFO: cms.cpp:104: OnTabletDead: 72057594037936128 2025-06-30T09:23:27.745974Z node 25 :CMS DEBUG: cms.cpp:1208: TCms::Cleanup 2025-06-30T09:23:27.747792Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:27.748479Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:27.748530Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:27.748828Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:27.748899Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:27.748996Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:27.749062Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:69: Loaded config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:27.749082Z node 25 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:27.749131Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:27.749170Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 } 2025-06-30T09:23:27.771100Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:27.795638Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:27.795727Z node 25 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:27.797465Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "27" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-30T09:23:27.797493Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "27" Services: "storage" Duration: 60000000 2025-06-30T09:23:27.797508Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 3 2025-06-30T09:23:27.797571Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Locked: VDisk [0:1:0:2:0] (::1:/27/pdisk-243.data) is locked by this request. Down: Host ::1:12006 (30) is down, Host ::1:12005 (29) is down, Host ::1:12002 (26) is down) 2025-06-30T09:23:27.797646Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "27" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: VDisk [0:1:0:2:0] (::1:/27/pdisk-243.data) is locked by this request. Down: Host ::1:12006 (30) is down, Host ::1:12005 (29) is down, Host ::1:12002 (26) is down" } Deadline: 420554000 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::SimpleUpdateTable [GOOD] Test command err: 2025-06-30T09:23:20.143194Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670570147286671:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:20.143300Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0026b0/r3tmp/tmp2KSjUM/pdisk_1.dat 2025-06-30T09:23:20.212201Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670570147286461:2080] 1751275400139053 != 1751275400139056 2025-06-30T09:23:20.214784Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:20.243760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:20.243818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:20.244395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17355 TServer::EnableGrpc on GrpcPort 26769, node 1 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-30T09:23:20.262997Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:20.263022Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:20.263025Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:20.263082Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-30T09:23:20.288123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:20.299839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:23:20.570779Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-30T09:23:20.571406Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-30T09:23:20.577522Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-30T09:23:20.577543Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-30T09:23:20.577553Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-30T09:23:20.577575Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-30T09:23:20.577602Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:20.577617Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:20.580415Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:20.580462Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-30T09:23:20.581186Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-30T09:23:20.581190Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-30T09:23:20.581202Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-30T09:23:20.581279Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-30T09:23:20.581281Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-30T09:23:20.581285Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-30T09:23:20.581300Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-30T09:23:20.581302Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-30T09:23:20.581313Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-30T09:23:20.582507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:20.583121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:20.583609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:20.585391Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-30T09:23:20.585408Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976715660 2025-06-30T09:23:20.585625Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-30T09:23:20.585647Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976715659 2025-06-30T09:23:20.585687Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-30T09:23:20.585690Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976715658 2025-06-30T09:23:20.651573Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976715660. Doublechecking... 2025-06-30T09:23:20.674522Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976715658. Doublechecking... 2025-06-30T09:23:20.674596Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976715659. Doublechecking... 2025-06-30T09:23:20.722787Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-30T09:23:20.739251Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-30T09:23:20.769198Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-30T09:23:20.770687Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 9d8b2094-6f3517ea-bbfb92ae-e35b4425, Bootstrap. Database: /dc-1 2025-06-30T09:23:20.773141Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444992798308.778493s seconds to be completed 2025-06-30T09:23:20.774232Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=NGRlZDYxN2ItNjlkMDQ4MTQtNmUyZWFhZDQtM2Q1MTdkNGQ=, workerId: [1:7521670570147287312:2292], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-30T09:23:20.774289Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-30T09:23:20.774681Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 9d8b2094-6f3517ea-bbfb92ae-e35b4425, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-30T09:23:20.774921Z node 1 :KQP_PROXY DEBUG: ... 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-30T09:23:21.973297Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 462b689b-b61cd80b-3e006c86-c37ff308, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-30T09:23:21.973415Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZjFkYTlhYzItNWYxMTdmODUtNWMyMTNjNWEtZjBjNWRkMjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 14, targetId: [2:7521670573912539154:2348] 2025-06-30T09:23:21.973426Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 14 timeout: 300.000000s actor id: [2:7521670573912539156:2605] 2025-06-30T09:23:21.976975Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 14, sender: [2:7521670573912539155:2349], selfId: [2:7521670573912538130:2147], source: [2:7521670573912539154:2348] 2025-06-30T09:23:21.977068Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 462b689b-b61cd80b-3e006c86-c37ff308, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZjFkYTlhYzItNWYxMTdmODUtNWMyMTNjNWEtZjBjNWRkMjQ=, TxId: 2025-06-30T09:23:21.977085Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 462b689b-b61cd80b-3e006c86-c37ff308, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZjFkYTlhYzItNWYxMTdmODUtNWMyMTNjNWEtZjBjNWRkMjQ=, TxId: 2025-06-30T09:23:21.977169Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 462b689b-b61cd80b-3e006c86-c37ff308, Bootstrap. Database: /dc-1 2025-06-30T09:23:21.977215Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ZjFkYTlhYzItNWYxMTdmODUtNWMyMTNjNWEtZjBjNWRkMjQ=, workerId: [2:7521670573912539154:2348], local sessions count: 2 2025-06-30T09:23:21.977230Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444992798307.574389s seconds to be completed 2025-06-30T09:23:21.977736Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=ZTdkZThkNmYtY2M3MjkwZjItZTEyMzRmZDYtNGZmMDBjYzQ=, workerId: [2:7521670573912539178:2357], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-30T09:23:21.977767Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-30T09:23:21.977812Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 462b689b-b61cd80b-3e006c86-c37ff308, RunDataQuery: -- TSaveScriptFinalStatusActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, finalization_status, meta, customer_supplied_id, user_token, script_sinks, script_secret_names FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-30T09:23:21.977944Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZTdkZThkNmYtY2M3MjkwZjItZTEyMzRmZDYtNGZmMDBjYzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 16, targetId: [2:7521670573912539178:2357] 2025-06-30T09:23:21.977958Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 16 timeout: 300.000000s actor id: [2:7521670573912539180:2610] 2025-06-30T09:23:21.986889Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-30T09:23:21.987070Z node 2 :KQP_PROXY NOTICE: table_creator.cpp:365: Table test_table updater. Adding columns. New columns: col4, col5. Existing columns: col1, col2, col3 2025-06-30T09:23:21.987085Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table test_table updater. Full table path:/dc-1/test/test_table 2025-06-30T09:23:21.987678Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:23:21.988211Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715670 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 } 2025-06-30T09:23:21.988225Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:261: Table test_table updater. Subscribe on create table tx: 281474976715670 2025-06-30T09:23:21.991784Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:290: Table test_table updater. Request: alter. Transaction completed: 281474976715670. Doublechecking... 2025-06-30T09:23:22.063637Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-30T09:23:22.069503Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 16, sender: [2:7521670573912539179:2358], selfId: [2:7521670573912538130:2147], source: [2:7521670573912539178:2357] 2025-06-30T09:23:22.069772Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 462b689b-b61cd80b-3e006c86-c37ff308, State: Get operation info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZTdkZThkNmYtY2M3MjkwZjItZTEyMzRmZDYtNGZmMDBjYzQ=, TxId: 01jz02bwtedsgpkns4xy9g9bxd 2025-06-30T09:23:22.069922Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 462b689b-b61cd80b-3e006c86-c37ff308, State: Get operation info, RunDataQuery: -- TSaveScriptFinalStatusActor::FinishScriptExecution DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $operation_status AS Int32; DECLARE $execution_status AS Int32; DECLARE $finalization_status AS Int32; DECLARE $issues AS JsonDocument; DECLARE $plan AS JsonDocument; DECLARE $stats AS JsonDocument; DECLARE $ast AS Optional; DECLARE $ast_compressed AS Optional; DECLARE $ast_compression_method AS Optional; DECLARE $operation_ttl AS Interval; DECLARE $customer_supplied_id AS Text; DECLARE $user_token AS Text; DECLARE $script_sinks AS Optional; DECLARE $script_secret_names AS Optional; DECLARE $applicate_script_external_effect_required AS Bool; UPDATE `.metadata/script_executions` SET operation_status = $operation_status, execution_status = $execution_status, finalization_status = IF($applicate_script_external_effect_required, $finalization_status, NULL), issues = $issues, plan = $plan, end_ts = CurrentUtcTimestamp(), stats = $stats, ast = $ast, ast_compressed = $ast_compressed, ast_compression_method = $ast_compression_method, expire_at = IF($operation_ttl > CAST(0 AS Interval), CurrentUtcTimestamp() + $operation_ttl, NULL), customer_supplied_id = IF($applicate_script_external_effect_required, $customer_supplied_id, NULL), user_token = IF($applicate_script_external_effect_required, $user_token, NULL), script_sinks = IF($applicate_script_external_effect_required, $script_sinks, NULL), script_secret_names = IF($applicate_script_external_effect_required, $script_secret_names, NULL) WHERE database = $database AND execution_id = $execution_id; DELETE FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id; 2025-06-30T09:23:22.070206Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZTdkZThkNmYtY2M3MjkwZjItZTEyMzRmZDYtNGZmMDBjYzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 17, targetId: [2:7521670573912539178:2357] 2025-06-30T09:23:22.070215Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 17 timeout: 300.000000s actor id: [2:7521670578207506540:2650] 2025-06-30T09:23:22.079332Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=YWVhZWUyYi1lNGMyNWQ4Ny1iM2E4NzcwOC1mMzgxNzAwYg==, workerId: [2:7521670573912538998:2326], local sessions count: 2 2025-06-30T09:23:22.163937Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 17, sender: [2:7521670578207506539:2371], selfId: [2:7521670573912538130:2147], source: [2:7521670573912539178:2357] 2025-06-30T09:23:22.164094Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 462b689b-b61cd80b-3e006c86-c37ff308, State: Update final status, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZTdkZThkNmYtY2M3MjkwZjItZTEyMzRmZDYtNGZmMDBjYzQ=, TxId: 2025-06-30T09:23:22.164127Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 462b689b-b61cd80b-3e006c86-c37ff308, State: Update final status, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZTdkZThkNmYtY2M3MjkwZjItZTEyMzRmZDYtNGZmMDBjYzQ=, TxId: 2025-06-30T09:23:22.164131Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:2658: [ScriptExecutions] Finish script execution operation. ExecutionId: 462b689b-b61cd80b-3e006c86-c37ff308. SUCCESS. Issues: 2025-06-30T09:23:22.164242Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ZTdkZThkNmYtY2M3MjkwZjItZTEyMzRmZDYtNGZmMDBjYzQ=, workerId: [2:7521670573912539178:2357], local sessions count: 1 2025-06-30T09:23:22.164530Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ZDljZGRjYS1mOWYyZWQzYS1hMzk5ZTYxMi04YTNkNjQxYw==, workerId: [2:7521670573912538970:2310], local sessions count: 0 2025-06-30T09:23:22.247778Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] Test command err: 2025-06-30T09:23:02.050537Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670493063183426:2174];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:02.050565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0026ec/r3tmp/tmpMTQYak/pdisk_1.dat 2025-06-30T09:23:02.167927Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:02.176403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:02.176428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:02.180807Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19854, node 1 2025-06-30T09:23:02.211022Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:02.211034Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:02.211036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:02.211083Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18557 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:02.275100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:02.283188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:23:02.567271Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670493063184236:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:02.567275Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670493063184225:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:02.567291Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:02.567402Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7521670493063183494:2140] Handle TEvProposeTransaction 2025-06-30T09:23:02.567418Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7521670493063183494:2140] TxId# 281474976715658 ProcessProposeTransaction 2025-06-30T09:23:02.567436Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7521670493063183494:2140] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7521670493063184240:2605] 2025-06-30T09:23:02.576880Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7521670493063184240:2605] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-30T09:23:02.576923Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7521670493063184240:2605] txid# 281474976715658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:23:02.576928Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7521670493063184240:2605] txid# 281474976715658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-30T09:23:02.577293Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7521670493063184240:2605] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:23:02.577302Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7521670493063184240:2605] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:23:02.577327Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7521670493063184240:2605] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:23:02.577361Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7521670493063184240:2605] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:23:02.577372Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7521670493063184240:2605] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-30T09:23:02.577415Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7521670493063184240:2605] txid# 281474976715658 HANDLE EvClientConnected 2025-06-30T09:23:02.577746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:02.579638Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7521670493063184240:2605] txid# 281474976715658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715658} 2025-06-30T09:23:02.579659Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7521670493063184240:2605] txid# 281474976715658 SEND to# [1:7521670493063184239:2303] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 53} 2025-06-30T09:23:02.589564Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670493063184239:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:23:02.679848Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7521670493063183494:2140] Handle TEvProposeTransaction 2025-06-30T09:23:02.679866Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7521670493063183494:2140] TxId# 281474976715659 ProcessProposeTransaction 2025-06-30T09:23:02.679890Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7521670493063183494:2140] Cookie# 0 userReqId# "" txid# 281474976715659 SEND to# [1:7521670493063184310:2655] 2025-06-30T09:23:02.680870Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7521670493063184310:2655] txid# 281474976715659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-30T09:23:02.680900Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7521670493063184310:2655] txid# 281474976715659 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:23:02.680904Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7521670493063184310:2655] txid# 281474976715659 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-30T09:23:02.681119Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7521670493063184310:2655] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:23:02.681133Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7521670493063184310:2655] txid# 281474976715659 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:23:02.681232Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7521670493063184310:2655] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:23:02.681306Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7521670493063184310:2655] HANDLE EvNavigateKeySetResult, txid# 281474976715659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T0 ... tResponse: self# [46:7521670615219071535:2201], result# HeadObjectResult { ETag: bbd61e62ca46a1a9addf08d5c7507ce3 ContentLength: 365 } REQUEST: GET /test_bucket/JsonDocumentTable/scheme.pb HTTP/1.1 HEADERS: Host: localhost:14636 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: F3B54FD3-7213-45A8-92E9-37B682270889 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250630/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=15a87c07af814f60a84d98fce94ddf74b6b2d912b65f66f84e948c3da754aef0 content-type: application/xml range: bytes=0-364 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250630T092330Z S3_MOCK::HttpServeRead: /test_bucket/JsonDocumentTable/scheme.pb / 365 2025-06-30T09:23:30.716260Z node 46 :IMPORT DEBUG: schemeshard_import_getters.cpp:475: HandleScheme TEvExternalStorage::TEvGetObjectResponse: self# [46:7521670615219071535:2201], result# bbd61e62ca46a1a9addf08d5c7507ce3 2025-06-30T09:23:30.716374Z node 46 :IMPORT INFO: schemeshard_import_getters.cpp:692: Reply: self# [46:7521670615219071535:2201], success# 1, error# 2025-06-30T09:23:30.716412Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:30.716418Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:993: TImport::TTxProgress: OnSchemeResult: id# 281474976715667, itemIdx# 0, success# 1 2025-06-30T09:23:30.716528Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:633: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-30T09:23:30.718475Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:30.718527Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:30.718531Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1222: TImport::TTxProgress: OnAllocateResult: txId# 281474976710760, id# 281474976715667 2025-06-30T09:23:30.718548Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:423: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710760 2025-06-30T09:23:30.718602Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:30.719046Z node 46 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:30.719784Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:30.719793Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1318: TImport::TTxProgress: OnModifyResult: txId# 281474976710760, status# StatusAccepted 2025-06-30T09:23:30.719828Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:647: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976710760 Issue: '' } 2025-06-30T09:23:30.720408Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:30.731050Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:30.731064Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1476: TImport::TTxProgress: OnNotifyResult: txId# 281474976710760 2025-06-30T09:23:30.731086Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:633: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-30T09:23:30.731280Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:30.731300Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:30.731302Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1222: TImport::TTxProgress: OnAllocateResult: txId# 281474976710761, id# 281474976715667 2025-06-30T09:23:30.731313Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:524: TImport::TTxProgress: Restore propose: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710761 2025-06-30T09:23:30.731419Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:30.731503Z node 46 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976710761:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h:563) 2025-06-30T09:23:30.731782Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:30.731787Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1318: TImport::TTxProgress: OnModifyResult: txId# 281474976710761, status# StatusAccepted 2025-06-30T09:23:30.731797Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:647: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976710761 Issue: '' } 2025-06-30T09:23:30.732021Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete REQUEST: HEAD /test_bucket/JsonDocumentTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:14636 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 9C42E552-8F2C-454C-94CB-C370C9BB044E amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250630/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=1cb5e9f5184fa2f750a408a525d1649e18be9a744afc5b3c867ae1ae5d94c48a content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250630T092330Z S3_MOCK::HttpServeRead: /test_bucket/JsonDocumentTable/data_00.csv / 32 REQUEST: GET /test_bucket/JsonDocumentTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:14636 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: ED78E7C6-8B4F-4C3C-A2C6-7E6EF2207B8A amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250630/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=2a342ba13bd4ff605d84f43d97d9fbe917e65f79a98857794f15fb7a8320b494 content-type: application/xml range: bytes=0-31 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250630T092330Z S3_MOCK::HttpServeRead: /test_bucket/JsonDocumentTable/data_00.csv / 32 2025-06-30T09:23:30.748855Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:23:30.748869Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1476: TImport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-30T09:23:30.749145Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:23:30.908790Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [46:7521670615219071723:2358] [0] Resolve database: name# /Root 2025-06-30T09:23:30.908937Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [46:7521670615219071723:2358] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:23:30.908948Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [46:7521670615219071723:2358] [0] Send request: schemeShardId# 72057594046644480 2025-06-30T09:23:30.909122Z node 46 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [46:7521670615219071723:2358] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715667 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:14636" scheme: HTTP bucket: "test_bucket" items { source_prefix: "JsonDocumentTable" destination_path: "/Root/JsonDocumentTable" } } StartTime { seconds: 1751275410 } EndTime { seconds: 1751275410 } } 2025-06-30T09:23:30.922846Z node 46 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [46:7521670610924102814:2140] Handle TEvExecuteKqpTransaction 2025-06-30T09:23:30.922862Z node 46 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [46:7521670610924102814:2140] TxId# 281474976715668 ProcessProposeKqpTransaction 2025-06-30T09:23:30.923114Z node 46 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jz02c5ex4hh2chhbr0wx1xvf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=46&id=MmExZjQyMmQtM2FhZDRlY2UtODFmMzM3NTgtZDg5OGJmMTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |82.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/slow/unittest >> TPQTestSlow::TestOnDiskStoredSourceIds [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] 2025-06-30T09:22:32.931651Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:32.931688Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927938 is [1:157:2176] sender: [1:158:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-30T09:22:32.942669Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:32.946423Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-30T09:22:32.947258Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-30T09:22:32.948062Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-30T09:22:32.948748Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-30T09:22:32.949324Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:190:2201] 2025-06-30T09:22:32.952781Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|32b3bbae-d9d517bf-275ca318-6723b8a6_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:32.954297Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|84c869af-ba9dc724-c0ae2215-fbc26d07_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:32.959031Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|54da827f-4f868ed4-d30cfcb9-fcdd76ac_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:32.960948Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|dff422b6-80312f23-6dd0123e-e1844324_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:32.962586Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b1805fe8-c20bf3f2-87f64d7d-1f3f97aa_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:32.964360Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d0176367-cc149f9c-729bd64c-21443c12_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:112:2057] recipient: [2:105:2137] 2025-06-30T09:22:33.179338Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:33.179372Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:158:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:181:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:111:2141]) on event NKikimr::TEvPersQueue::TEvUpdateConfigBuilder ! Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:183:2057] recipient: [2:103:2136] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:186:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:187:2057] recipient: [2:185:2195] Leader for TabletID 72057594037927937 is [2:188:2196] sender: [2:189:2057] recipient: [2:185:2195] 2025-06-30T09:22:33.197265Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:33.197307Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [2:111:2141]) rebooted! !Reboot 72057594037927937 (actor [2:111:2141]) tablet resolver refreshed! new actor is[2:188:2196] Leader for TabletID 72057594037927937 is [2:188:2196] sender: [2:268:2057] recipient: [2:14:2061] 2025-06-30T09:22:34.780697Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:34.780891Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 2 actor [2:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-30T09:22:34.781057Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:274:2258] 2025-06-30T09:22:34.781617Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:274:2258] 2025-06-30T09:22:34.781998Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:275:2259] 2025-06-30T09:22:34.782312Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:275:2259] 2025-06-30T09:22:34.784090Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8b949e71-8aa7370a-41fcb6f2-3061cd9e_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:34.784926Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|daae5ff4-1eac37bf-78ce7726-934f42af_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:34.789634Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8671b356-d2307ad2-c3256228-76720f97_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:34.791677Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|53f16887-a4e0edeb-a13af66b-b77b8afa_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:34.793278Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|61eb038e-aa6e990e-96925b6d-d32c08_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:22:34.795272Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|f950b854-19b4c91a-a8867705-5fec5cc1_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:112:2057] recipient: [3:105:2137] 2025-06-30T09:22:35.040004Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:35.040043Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927938 is [3:157:2176] sender: [3:158:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:183:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:111:2141]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:185:2057] recipient: [3:103:2136] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:187:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:189:2057] recipient: [3:188:2197] Leader for TabletID 72057594037927937 is [3:190:2198] sender: [3:191:2057] recipient: [3:188:2197] 2025-06-30T09:22:35.052826Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:35.052862Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [3:111:2141]) rebooted! !Reboot 72057594037927937 (actor [3:111:2141]) tablet resolver refreshed! new actor is[3:190:2198] Leader for TabletID 72057594037927937 is [3:190:2198] sender: [3:270:2057] recipient: [3:14:2061] 2025-06-30T09:22:36.618440Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:22:36.618717Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 3 actor [3:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-06-30T09:22:36.618999Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72 ... 7937, Partition: 0, State: StateInit] bootstrapping 0 [47:189:2200] 2025-06-30T09:23:26.875509Z node 47 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [47:189:2200] 2025-06-30T09:23:26.875801Z node 47 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [47:190:2201] 2025-06-30T09:23:26.876102Z node 47 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [47:190:2201] 2025-06-30T09:23:26.877435Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|45d0a627-4b39e7d3-d0b454eb-80bb3e07_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:26.878107Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c26b7b45-d7f2d7fe-340f1603-fae2e0af_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:26.881337Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6f07ab19-fee72df7-e12842a2-4315cb49_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:26.882653Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|5befaff2-56b427c9-49789e55-7a6f42c8_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:26.883758Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1662bd75-57fddc72-b3212c77-f1c2237c_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:26.884758Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|fcbd59c9-95b8138b-98e112dd-65a89a94_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default !Reboot 72057594037927937 (actor [47:111:2141]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [47:111:2141] sender: [47:287:2057] recipient: [47:103:2136] Leader for TabletID 72057594037927937 is [47:111:2141] sender: [47:290:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [47:111:2141] sender: [47:291:2057] recipient: [47:289:2282] Leader for TabletID 72057594037927937 is [47:292:2283] sender: [47:293:2057] recipient: [47:289:2282] 2025-06-30T09:23:26.893313Z node 47 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:23:26.893355Z node 47 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-30T09:23:26.893470Z node 47 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [47:341:2324] 2025-06-30T09:23:26.893931Z node 47 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [47:342:2325] 2025-06-30T09:23:26.895244Z node 47 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:23:26.895259Z node 47 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [47:341:2324] 2025-06-30T09:23:26.895390Z node 47 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-30T09:23:26.895394Z node 47 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [47:342:2325] !Reboot 72057594037927937 (actor [47:111:2141]) rebooted! !Reboot 72057594037927937 (actor [47:111:2141]) tablet resolver refreshed! new actor is[47:292:2283] Leader for TabletID 72057594037927937 is [47:292:2283] sender: [47:390:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:107:2057] recipient: [48:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:107:2057] recipient: [48:105:2137] Leader for TabletID 72057594037927937 is [48:111:2141] sender: [48:112:2057] recipient: [48:105:2137] 2025-06-30T09:23:28.324633Z node 48 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:23:28.324672Z node 48 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [48:153:2057] recipient: [48:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [48:153:2057] recipient: [48:151:2172] Leader for TabletID 72057594037927938 is [48:157:2176] sender: [48:158:2057] recipient: [48:151:2172] Leader for TabletID 72057594037927937 is [48:111:2141] sender: [48:183:2057] recipient: [48:14:2061] 2025-06-30T09:23:28.329305Z node 48 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:23:28.329529Z node 48 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 48 actor [48:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 48 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 48 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 48 Important: false } 2025-06-30T09:23:28.329647Z node 48 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [48:189:2200] 2025-06-30T09:23:28.330129Z node 48 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [48:189:2200] 2025-06-30T09:23:28.330466Z node 48 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [48:190:2201] 2025-06-30T09:23:28.330834Z node 48 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [48:190:2201] 2025-06-30T09:23:28.332304Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|398ff63a-f3ba2298-ff7f4514-6d60e940_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:28.333206Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8efc1d8a-1e1427e-50cd63ac-87b4a0c3_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:28.337150Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|39011b56-f7f43d3c-3198fc7-8244db16_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:28.338452Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|2627f74-4c6ca2ad-38c7fbb2-8d505fda_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:28.339560Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|26050488-8bb53a38-382c8d69-ad737abe_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:28.340791Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a3aeeff5-165195f5-a267a236-56c5afa6_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:107:2057] recipient: [49:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:107:2057] recipient: [49:105:2137] Leader for TabletID 72057594037927937 is [49:111:2141] sender: [49:112:2057] recipient: [49:105:2137] 2025-06-30T09:23:28.585220Z node 49 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:23:28.585252Z node 49 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [49:153:2057] recipient: [49:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [49:153:2057] recipient: [49:151:2172] Leader for TabletID 72057594037927938 is [49:157:2176] sender: [49:158:2057] recipient: [49:151:2172] Leader for TabletID 72057594037927937 is [49:111:2141] sender: [49:181:2057] recipient: [49:14:2061] 2025-06-30T09:23:28.590178Z node 49 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-30T09:23:28.590459Z node 49 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 49 actor [49:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 49 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 49 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 49 Important: false } 2025-06-30T09:23:28.590677Z node 49 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [49:187:2198] 2025-06-30T09:23:28.591428Z node 49 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [49:187:2198] 2025-06-30T09:23:28.592002Z node 49 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [49:188:2199] 2025-06-30T09:23:28.592560Z node 49 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [49:188:2199] 2025-06-30T09:23:28.594869Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8480cb78-bfb5c012-6872cf1b-1bb9b272_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:28.595696Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|2190bc5c-2d618e61-be167abf-ef085eb6_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:28.599370Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6047ea80-bf24397c-e6c7a9a0-c18371cf_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:28.600750Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|477989d8-38ac6eb4-eb2167f7-fbfa51ca_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:28.602271Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ef529b81-3cd6a251-f191d193-88449552_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-30T09:23:28.603903Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c9d28b9c-5011e493-556d9c84-6ac307fc_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default |82.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |82.8%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> BackupRestore::SkipEmptyDirsOnRestore [GOOD] Test command err: 2025-06-30T09:23:05.052387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670505091968526:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:05.052558Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpPx7kAw/pdisk_1.dat 2025-06-30T09:23:05.148420Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:05.154333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:05.154363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:05.161936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63799, node 1 2025-06-30T09:23:05.179159Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:23:05.189901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:05.189921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:05.189925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:05.190001Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16235 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:05.232713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:05.589493Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670505091969296:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:05.589661Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:05.589818Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670505091969308:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:05.589941Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7521670505091968570:2141] Handle TEvProposeTransaction 2025-06-30T09:23:05.589948Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7521670505091968570:2141] TxId# 281474976715658 ProcessProposeTransaction 2025-06-30T09:23:05.589962Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7521670505091968570:2141] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7521670505091969311:2604] 2025-06-30T09:23:05.604163Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7521670505091969311:2604] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-30T09:23:05.604216Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7521670505091969311:2604] txid# 281474976715658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:23:05.604221Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7521670505091969311:2604] txid# 281474976715658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-30T09:23:05.604752Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7521670505091969311:2604] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:23:05.604769Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7521670505091969311:2604] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:23:05.604810Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7521670505091969311:2604] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:23:05.604860Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7521670505091969311:2604] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:23:05.604874Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7521670505091969311:2604] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-30T09:23:05.604922Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7521670505091969311:2604] txid# 281474976715658 HANDLE EvClientConnected 2025-06-30T09:23:05.604967Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521670505091969336:2610], Recipient [1:7521670505091968693:2206]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:05.604972Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:05.604975Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:23:05.604982Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521670505091969311:2604], Recipient [1:7521670505091968693:2206]: {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-30T09:23:05.604984Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:23:05.605824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: ".metadata/workload_manager/pools/default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } TxId: 281474976715658 TabletId: 72057594046644480 Owner: "metadata@system" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:23:05.605937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-30T09:23:05.605974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: .metadata, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-30T09:23:05.605987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-30T09:23:05.605993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976715658:0 type: TxMkDir target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-30T09:23:05.606006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:23:05.606011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976715658:1, at schemeshard: 72057594046644480 2025-06-30T09:23:05.606019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 2], parent name: .metadata, child name: workload_manager, child id: [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-30T09:23:05.606023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 0 2025-06-30T09:23:05.606025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976715658:1 type: TxMkDir target ... 39.637232Z node 55 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:39.743205Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:23:39.790953Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:39.846661Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:39.902869Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:23:39.952011Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:23:40.004976Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:40.016637Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:23:40.200284Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710688:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:40.216329Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateReplication, opId: 281474976710689:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp:473) 2025-06-30T09:23:40.256658Z node 55 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037892:1][55:7521670656940860813:2619] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:16:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-30T09:23:40.259982Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710694:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-30T09:23:40.272371Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710695:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:40.281980Z node 55 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request Backup "/Root" to "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpjjyfuC/"Create temporary directory "/Root/~backup_20250630T092341" in databaseProcess "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpjjyfuC/table"Process "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpjjyfuC/replication"Backup async replication "/Root/replication" to "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpjjyfuC/replication"Write async replication into "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpjjyfuC/replication/create_async_replication.sql"Write ACL into "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpjjyfuC/replication/permissions.pb"Process "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpjjyfuC/replica"Copy tables: { src: "/Root/table", dst: "/Root/~backup_20250630T092341/table" }, { src: "/Root/replica", dst: "/Root/~backup_20250630T092341/replica" }Describe table "/Root/table"Describe table "/Root/~backup_20250630T092341/table"Backup table "/Root/~backup_20250630T092341/table" to "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpjjyfuC/table"Write scheme into "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpjjyfuC/table/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpjjyfuC/table/permissions.pb"Read table "/Root/~backup_20250630T092341/table"Write data into "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpjjyfuC/table/data_00.csv"Drop table "/Root/~backup_20250630T092341/table"Describe table "/Root/replica"2025-06-30T09:23:41.291519Z node 55 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 55, TabletId: 72075186224037898 not found Skip table "/Root/replica", because it is a replica tableDrop table "/Root/~backup_20250630T092341/replica"Remove temporary directory "/Root/~backup_20250630T092341" in database2025-06-30T09:23:41.305828Z node 55 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 55, TabletId: 72075186224037897 not found 2025-06-30T09:23:41.306870Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710708:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Backup completed successfully 2025-06-30T09:23:41.891244Z node 58 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[58:7521670659641447280:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:41.891262Z node 58 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpwq9AVb/pdisk_1.dat 2025-06-30T09:23:41.903748Z node 58 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25628, node 58 2025-06-30T09:23:41.914663Z node 58 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:41.914683Z node 58 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:41.914684Z node 58 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:41.914715Z node 58 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61359 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:41.991590Z node 58 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(58, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:41.991617Z node 58 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(58, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:41.993129Z node 58 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(58, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:41.994784Z node 58 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Restore "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpVX9xPX/" to "/Root"Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpVX9xPX/"},{"type":"Directory","path":"/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpVX9xPX/with_one_dir"},{"type":"Directory","path":"/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpVX9xPX/with_one_file"}]Process "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpVX9xPX/with_one_dir"Restore empty directory "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpVX9xPX/with_one_dir" to "/Root/with_one_dir"Process "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpVX9xPX/with_one_file"Restore empty directory "/home/runner/.ya/build/build_root/cl3w/002277/r3tmp/tmpVX9xPX/with_one_file" to "/Root/with_one_file"Restore completed successfully >> TPersQueueTest::DirectReadCorrectOffsetsOnRestart [GOOD] >> TPersQueueTest::DirectReadBadCases >> TCmsTest::ScheduledEmergencyDuringRollingRestart |82.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |82.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |82.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |82.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |82.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |82.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |82.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |82.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |82.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |82.8%| [TA] $(B)/ydb/core/tx/datashard/ut_kqp_errors/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap+useOltpSink |82.8%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_errors/test-results/unittest/{meta.json ... results_accumulator.log} |82.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |82.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |82.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.8%| [LD] {RESULT} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |82.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |82.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |82.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.8%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |82.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled+UseSink |82.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |82.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |82.8%| [LD] {RESULT} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut >> KqpDataIntegrityTrails::Upsert-LogEnabled-UseSink |82.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.8%| [TA] $(B)/ydb/core/mind/bscontroller/ut_bscontroller/test-results/unittest/{meta.json ... results_accumulator.log} |82.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/apps/ydbd/ydbd |82.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects >> TCmsTest::RequestReplaceBrokenDevices [GOOD] >> TCmsTest::PermissionDuration >> KqpDataIntegrityTrails::Upsert+LogEnabled-UseSink |82.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydbd/ydbd |82.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |82.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.9%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |82.9%| [LD] {RESULT} $(B)/ydb/apps/ydbd/ydbd >> TMaintenanceApiTest::ManyActionGroupsWithSingleAction [GOOD] >> TMaintenanceApiTest::LastRefreshTime >> FolderServiceTest::TFolderServiceAdapter |82.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> TAccessServiceTest::PassRequestId >> FolderServiceTest::TFolderService >> TCmsTest::WalleCleanupTest [GOOD] >> TCmsTest::VDisksEvictionShouldFailWhileSentinelIsDisabled >> TAccessServiceTest::Authenticate |82.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap+useOltpSink [GOOD] >> FolderServiceTest::TFolderServiceAdapter [GOOD] |82.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::PassRequestId [GOOD] >> KqpDataIntegrityTrails::Select >> TAccessServiceTest::Authenticate [GOOD] >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap+useOltpSink >> TReplicaTest::Commit |82.9%| [TA] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap+useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 12432, MsgBus: 18904 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0044ec/r3tmp/tmpiQLXPH/pdisk_1.dat TServer::EnableGrpc on GrpcPort 12432, node 1 TClient is connected to server localhost:18904 TClient is connected to server localhost:18904 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> KqpDataIntegrityTrails::Upsert-LogEnabled+UseSink [GOOD] |82.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/{meta.json ... results_accumulator.log} >> TReplicaTest::Merge >> TReplicaTest::CommitWithoutHandshake >> KqpDataIntegrityTrails::Upsert-LogEnabled-UseSink [GOOD] >> TReplicaTest::Handshake >> Backup::ProposeBackup >> TServiceAccountServiceTest::IssueToken [GOOD] |82.9%| [TA] $(B)/ydb/core/tx/schemeshard/ut_base/test-results/unittest/{meta.json ... results_accumulator.log} |82.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |82.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderServiceAdapter [GOOD] Test command err: 2025-06-30T09:23:48.064861Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670692046690943:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:48.064883Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d82/r3tmp/tmp4VyEu6/pdisk_1.dat 2025-06-30T09:23:48.125632Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:48.125882Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670692046690923:2080] 1751275428064731 != 1751275428064734 TClient is connected to server localhost:14299 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:48.167467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:48.167523Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:48.168629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:48.197989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:48.204582Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [13427cf493b0] Connect to grpc://localhost:62344 2025-06-30T09:23:48.204824Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13427cf493b0] Request ListFoldersRequest { id: "i_am_exists" } 2025-06-30T09:23:48.208584Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [13427cf493b0] Response ListFoldersResponse { result { cloud_id: "cloud_from_old_service" } } 2025-06-30T09:23:48.210794Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [13427cf49bf0] Connect to grpc://localhost:28720 2025-06-30T09:23:48.210998Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13427cf49bf0] Request ResolveFoldersRequest { folder_ids: "i_am_exists" } 2025-06-30T09:23:48.213320Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [13427cf49bf0] Response ResolveFoldersResponse { resolved_folders { cloud_id: "cloud_from_new_service" } } 2025-06-30T09:23:48.213552Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13427cf49bf0] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-30T09:23:48.214322Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [13427cf49bf0] Status 5 Not Found 2025-06-30T09:23:48.214505Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13427cf493b0] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-30T09:23:48.215150Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [13427cf493b0] Status 5 Not Found |82.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> TReplicaTest::Commit [GOOD] >> TReplicaTest::AckNotifications >> TColumnShardTestReadWrite::WriteExoticTypes >> TReplicaTest::Merge [GOOD] >> TReplicaTest::IdempotencyUpdatesWithoutSubscribers >> KqpDataIntegrityTrails::Upsert+LogEnabled-UseSink [GOOD] >> TReplicaTest::CommitWithoutHandshake [GOOD] >> TReplicaTest::CommitWithStaleGeneration >> TReplicaTest::Handshake [GOOD] >> TReplicaTest::DoubleUnsubscribe ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::PassRequestId [GOOD] Test command err: 2025-06-30T09:23:48.143748Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670693275383333:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:48.143772Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d6e/r3tmp/tmp2Yu9tY/pdisk_1.dat 2025-06-30T09:23:48.204268Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:48.204502Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670693275383307:2080] 1751275428143494 != 1751275428143497 TClient is connected to server localhost:11985 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:48.242407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:48.245729Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:48.245758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:48.246825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:48.247037Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [72f0bc2928b0]{trololo} Connect to grpc://localhost:9216 2025-06-30T09:23:48.247459Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [72f0bc2928b0]{trololo} Request AuthenticateRequest { iam_token: "**** (717F937C)" } 2025-06-30T09:23:48.249779Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [72f0bc2928b0]{trololo} Response AuthenticateResponse { subject { user_account { id: "1234" } } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::Authenticate [GOOD] Test command err: 2025-06-30T09:23:48.252163Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670690953641287:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:48.252193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d42/r3tmp/tmpaX8a8V/pdisk_1.dat 2025-06-30T09:23:48.307338Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:48.307583Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670690953641266:2080] 1751275428252017 != 1751275428252020 TClient is connected to server localhost:20256 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:48.383891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:48.383921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:48.384770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:48.384853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:23:48.416607Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [30397dfe3bf0] Connect to grpc://localhost:23823 2025-06-30T09:23:48.417119Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [30397dfe3bf0] Request AuthenticateRequest { iam_token: "**** (047D44F1)" } 2025-06-30T09:23:48.419890Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [30397dfe3bf0] Status 7 Permission Denied 2025-06-30T09:23:48.420109Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [30397dfe3bf0] Request AuthenticateRequest { iam_token: "**** (342498C1)" } 2025-06-30T09:23:48.420785Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [30397dfe3bf0] Response AuthenticateResponse { subject { user_account { id: "1234" } } } >> KqpWorkloadServiceActors::TestPoolFetcher >> TReplicaTest::AckNotifications [GOOD] >> TReplicaTest::AckNotificationsUponPathRecreation >> TCmsTest::ScheduledEmergencyDuringRollingRestart [GOOD] >> TCmsTest::ScheduledWalleRequestDuringRollingRestart ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 4363, MsgBus: 61444 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00449f/r3tmp/tmpZO9IM2/pdisk_1.dat TServer::EnableGrpc on GrpcPort 4363, node 1 TClient is connected to server localhost:61444 TClient is connected to server localhost:61444 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> TReplicaTest::IdempotencyUpdatesWithoutSubscribers [GOOD] >> TReplicaTest::StrongNotificationAfterCommit >> RemoteTopicReader::ReadTopic >> TReplicaTest::CommitWithStaleGeneration [GOOD] >> TReplicaTest::Delete >> TReplicaTest::DoubleUnsubscribe [GOOD] >> TReplicaTest::DoubleDelete >> TReplicaTest::AckNotificationsUponPathRecreation [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 18169, MsgBus: 26643 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004495/r3tmp/tmpbz1iY7/pdisk_1.dat TServer::EnableGrpc on GrpcPort 18169, node 1 TClient is connected to server localhost:26643 TClient is connected to server localhost:26643 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... |82.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_topic_reader/unittest >> TReplicaTest::StrongNotificationAfterCommit [GOOD] >> FolderServiceTest::TFolderService [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert+LogEnabled-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 1476, MsgBus: 63453 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004491/r3tmp/tmpJVevDs/pdisk_1.dat TServer::EnableGrpc on GrpcPort 1476, node 1 TClient is connected to server localhost:63453 TClient is connected to server localhost:63453 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... |82.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_topic_reader/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> TServiceAccountServiceTest::IssueToken [GOOD] Test command err: 2025-06-30T09:23:48.244077Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670693378813716:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:48.244096Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d9c/r3tmp/tmpqcKjdO/pdisk_1.dat 2025-06-30T09:23:48.315975Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:21192 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:48.347018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:48.347061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:48.348144Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:48.388564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:48.717472Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670693007261071:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:48.717503Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d9c/r3tmp/tmpd3pIpH/pdisk_1.dat 2025-06-30T09:23:48.730513Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:48.730956Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670693007261052:2080] 1751275428717369 != 1751275428717372 TClient is connected to server localhost:23681 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:48.821667Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:48.821711Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:48.822146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:48.822664Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected |82.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_topic_reader/unittest >> TReplicaTest::DoubleDelete [GOOD] >> TReplicaTest::Delete [GOOD] >> KqpDataIntegrityTrails::Select [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::AckNotificationsUponPathRecreation [GOOD] Test command err: 2025-06-30T09:23:48.678172Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-30T09:23:48.678198Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-30T09:23:48.678216Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-30T09:23:48.678221Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:7:2054] Commit generation: owner# 1, generation# 1 2025-06-30T09:23:48.678229Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [1:8:2055] 2025-06-30T09:23:48.678233Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 2 2025-06-30T09:23:48.885278Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:9:2056] 2025-06-30T09:23:48.885296Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# path 2025-06-30T09:23:48.885330Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:9:2056], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2025-06-30T09:23:48.885350Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-30T09:23:48.885354Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-30T09:23:48.885389Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-30T09:23:48.885394Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-30T09:23:48.886290Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-30T09:23:48.886336Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:9:2056] 2025-06-30T09:23:48.886354Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 40 2025-06-30T09:23:48.886360Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-30T09:23:48.886364Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [2:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-30T09:23:48.886376Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [2:9:2056] 2025-06-30T09:23:49.094505Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-30T09:23:49.094529Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-30T09:23:49.094566Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-30T09:23:49.094575Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-30T09:23:49.094589Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 2, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-30T09:23:49.094611Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2025-06-30T09:23:49.094632Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2025-06-30T09:23:49.094651Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-30T09:23:49.094655Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-30T09:23:49.094662Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 3, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-30T09:23:49.094700Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-30T09:23:49.094705Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-06-30T09:23:49.094710Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-30T09:23:49.094721Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7:2054] Upsert description: path# path 2025-06-30T09:23:49.094728Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2025-06-30T09:23:49.094756Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-30T09:23:49.094773Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 3 }: sender# [3:9:2056] |82.9%| [TA] $(B)/ydb/core/tx/schemeshard/ut_index_build/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::StrongNotificationAfterCommit [GOOD] Test command err: 2025-06-30T09:23:48.766160Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:9:2056] 2025-06-30T09:23:48.766181Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7:2054] Upsert description: path# path 2025-06-30T09:23:48.766206Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-06-30T09:23:48.766224Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:10:2057] 2025-06-30T09:23:48.766227Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-06-30T09:23:48.766232Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:10:2057], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-30T09:23:48.766241Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-30T09:23:48.766245Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-30T09:23:48.766277Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2025-06-30T09:23:48.766281Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-30T09:23:48.767004Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-30T09:23:48.767046Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 40 2025-06-30T09:23:48.767050Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-30T09:23:48.767053Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [1:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-30T09:23:48.973051Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-30T09:23:48.973075Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-30T09:23:48.973097Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:9:2056] 2025-06-30T09:23:48.973103Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-06-30T09:23:48.973119Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-30T09:23:48.973153Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-30T09:23:48.973158Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-30T09:23:48.973171Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-30T09:23:48.973208Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 40 2025-06-30T09:23:48.973213Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-30T09:23:48.973218Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [2:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-30T09:23:48.973231Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [2:9:2056] 2025-06-30T09:23:48.973252Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:7:2054] Unsubscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 1] 2025-06-30T09:23:48.973260Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-30T09:23:48.973265Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-30T09:23:48.973269Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [2:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-30T09:23:48.973276Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-30T09:23:48.973280Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-06-30T09:23:48.973286Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-30T09:23:48.973298Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 2] DomainOwnerId: 0 }: sender# [2:10:2057] 2025-06-30T09:23:48.973305Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:10:2057], path# [OwnerId: 1, LocalPathId: 2], domainOwnerId# 0, capabilities# 2025-06-30T09:23:49.184921Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 1 }: sender# [3:9:2056] 2025-06-30T09:23:49.184938Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7:2054] Upsert description: path# path 2025-06-30T09:23:49.184951Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 1, capabilities# 2025-06-30T09:23:49.184969Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-30T09:23:49.184974Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-30T09:23:49.184981Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-30T09:23:49.184984Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:7:2054] Commit generation: owner# 1, generation# 1 2025-06-30T09:23:49.184995Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:997: [3:7:2054] Handle NKikimr::NSchemeBoard::TReplica::TEvPrivate::TEvSendStrongNotifications { Owner: 1 } |82.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_topic_reader/unittest |82.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_topic_reader/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::Delete [GOOD] Test command err: 2025-06-30T09:23:48.825703Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-30T09:23:48.825728Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:969: [1:7:2054] Reject commit from unknown populator: sender# [1:8:2055], owner# 1, generation# 1 2025-06-30T09:23:48.825737Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-30T09:23:48.825741Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-30T09:23:49.032589Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 0 }: sender# [2:8:2055] 2025-06-30T09:23:49.032612Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 0 2025-06-30T09:23:49.032622Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:9:2056] 2025-06-30T09:23:49.032625Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-30T09:23:49.032632Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [2:9:2056] 2025-06-30T09:23:49.032636Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:7:2054] Commit generation: owner# 1, generation# 1 2025-06-30T09:23:49.032641Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 0 }: sender# [2:8:2055] 2025-06-30T09:23:49.032645Z node 2 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:979: [2:7:2054] Reject commit from stale populator: sender# [2:8:2055], owner# 1, generation# 0, pending generation# 1 2025-06-30T09:23:49.032649Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [2:8:2055] 2025-06-30T09:23:49.032651Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 2 2025-06-30T09:23:49.241253Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-30T09:23:49.241283Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-30T09:23:49.241346Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-30T09:23:49.241355Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], deletion# false 2025-06-30T09:23:49.242623Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 42, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-30T09:23:49.242691Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2025-06-30T09:23:49.242726Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-06-30T09:23:49.242780Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 42, LocalPathId: 1] DomainOwnerId: 0 }: sender# [3:10:2057] 2025-06-30T09:23:49.242789Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:10:2057], path# [OwnerId: 42, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-30T09:23:49.242819Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2025-06-30T09:23:49.242825Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], deletion# true 2025-06-30T09:23:49.242831Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 42, LocalPathId: 1] 2025-06-30T09:23:49.242862Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:11:2058] 2025-06-30T09:23:49.242869Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:11:2058], path# path, domainOwnerId# 0, capabilities# 2025-06-30T09:23:49.242881Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 42, LocalPathId: 1] DomainOwnerId: 0 }: sender# [3:12:2059] 2025-06-30T09:23:49.242887Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:12:2059], path# [OwnerId: 42, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-30T09:23:49.242898Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:13:2060] 2025-06-30T09:23:49.242902Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:13:2060], path# path, domainOwnerId# 0, capabilities# ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderService [GOOD] Test command err: 2025-06-30T09:23:48.159677Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670693222994399:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:48.159709Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002d79/r3tmp/tmpXWx193/pdisk_1.dat 2025-06-30T09:23:48.209804Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:48.210877Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670693222994378:2080] 1751275428159503 != 1751275428159506 TClient is connected to server localhost:5489 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:48.263127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:48.263161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:48.266882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:48.294609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:48.298841Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [72d7fc52d930] Connect to grpc://localhost:28543 2025-06-30T09:23:48.301565Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [72d7fc52d930] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-30T09:23:48.303670Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [72d7fc52d930] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:28543: Failed to connect to remote host: Connection refused 2025-06-30T09:23:48.304208Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [72d7fc52d930] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-30T09:23:48.304425Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [72d7fc52d930] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:28543: Failed to connect to remote host: Connection refused 2025-06-30T09:23:49.162218Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:49.304678Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [72d7fc52d930] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-30T09:23:49.305784Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [72d7fc52d930] Status 5 Not Found 2025-06-30T09:23:49.305930Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [72d7fc52d930] Request ResolveFoldersRequest { folder_ids: "i_am_exists" } 2025-06-30T09:23:49.306719Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [72d7fc52d930] Response ResolveFoldersResponse { resolved_folders { cloud_id: "response_cloud_id" } } >> TCmsTest::PermissionDuration [GOOD] >> TCmsTest::RacyStartCollecting >> KqpWorkloadServiceActors::TestPoolFetcher [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherAclValidation >> Backup::ProposeBackup [GOOD] >> EvWrite::AbortInTransaction >> TMaintenanceApiTest::LastRefreshTime [GOOD] >> TMaintenanceApiTest::ForceAvailabilityMode >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::DoubleDelete [GOOD] Test command err: 2025-06-30T09:23:48.851086Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-30T09:23:48.851109Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-30T09:23:49.057329Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-30T09:23:49.057348Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-30T09:23:49.057398Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-30T09:23:49.057405Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-30T09:23:49.058446Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-30T09:23:49.058485Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:8:2055] 2025-06-30T09:23:49.058510Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-30T09:23:49.058532Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] 2025-06-30T09:23:49.058548Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:7:2054] Unsubscribe: subscriber# [2:8:2055], path# path 2025-06-30T09:23:49.058555Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] 2025-06-30T09:23:49.283290Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-30T09:23:49.283315Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-30T09:23:49.283337Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2025-06-30T09:23:49.283344Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7:2054] Upsert description: path# path 2025-06-30T09:23:49.283363Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-06-30T09:23:49.283398Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-30T09:23:49.283405Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-30T09:23:49.283418Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-30T09:23:49.283449Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2025-06-30T09:23:49.283454Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-30T09:23:49.283458Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-30T09:23:49.283472Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:10:2057] 2025-06-30T09:23:49.283478Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:10:2057], path# path, domainOwnerId# 0, capabilities# 2025-06-30T09:23:49.283489Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2025-06-30T09:23:49.283494Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true |82.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_topic_reader/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Select [GOOD] Test command err: Trying to start YDB, gRPC: 13092, MsgBus: 25973 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00448a/r3tmp/tmpZfnlEj/pdisk_1.dat TServer::EnableGrpc on GrpcPort 13092, node 1 TClient is connected to server localhost:25973 TClient is connected to server localhost:25973 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> YdbTableSplit::MergeByNoLoadAfterSplit >> YdbTableSplit::SplitByLoadWithUpdates >> YdbTableSplit::SplitByLoadWithDeletes >> TCmsTest::VDisksEvictionShouldFailWhileSentinelIsDisabled [GOOD] >> TCmsTest::VDisksEvictionShouldFailOnUnsupportedAction >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap+useOltpSink [GOOD] >> TColumnShardTestReadWrite::WriteExoticTypes [GOOD] >> EvWrite::AbortInTransaction [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherAclValidation [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherNotExistingPool ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap+useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 13426, MsgBus: 19153 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004487/r3tmp/tmp1G0p0l/pdisk_1.dat TServer::EnableGrpc on GrpcPort 13426, node 1 TClient is connected to server localhost:19153 TClient is connected to server localhost:19153 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_rw/unittest >> EvWrite::AbortInTransaction [GOOD] Test command err: 2025-06-30T09:23:48.943321Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:98;event=initialize_shard;step=OnActivateExecutor; 2025-06-30T09:23:48.949068Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:116;event=initialize_shard;step=initialize_tiring_finished; 2025-06-30T09:23:48.949157Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-30T09:23:48.950074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:23:48.950148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:23:48.950197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:23:48.950226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:23:48.950247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:23:48.950269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:23:48.950296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:23:48.950318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:23:48.950340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:23:48.950365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:23:48.950385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:23:48.950410Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:23:48.957537Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-30T09:23:48.957600Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-06-30T09:23:48.957612Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-30T09:23:48.957646Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-30T09:23:48.957702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:23:48.957716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:23:48.957723Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-30T09:23:48.957734Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-30T09:23:48.957745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:23:48.957752Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:23:48.957758Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-30T09:23:48.957780Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-30T09:23:48.957789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:23:48.957797Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:23:48.957802Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-30T09:23:48.957815Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-30T09:23:48.957823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:23:48.957831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:23:48.957837Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-30T09:23:48.957847Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:23:48.957855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:23:48.957860Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-30T09:23:48.957890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:23:48.957900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:23:48.957905Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-30T09:23:48.957928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:23:48.957938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:23:48.957943Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-30T09:23:48.957958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:23:48.957967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:23:48.957972Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-30T09:23:48.957983Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:23:48.957993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:23:48.958000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:23:48.958005Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-30T09:23:48.958047Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=12; 2025-06-30T09:23:48.958057Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=6; 2025-06-30T09:23:48.958067Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=5; 2025-06-30T09:23:48.958080Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=8; 2025-06-30T09:23:48.958093Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-30T09:23:48.958106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025- ... ritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:65;memory_size=156;data_size=132;sum=624;count=8;size_of_meta=112; 2025-06-30T09:23:50.353747Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=228;data_size=204;sum=912;count=4;size_of_portion=184; 2025-06-30T09:23:50.354047Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 44 2025-06-30T09:23:50.354097Z node 2 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=2;operation_id=1; 2025-06-30T09:23:50.365185Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 44 2025-06-30T09:23:50.365418Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=222;problem=finished; 2025-06-30T09:23:50.365443Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=222;problem=finished; 2025-06-30T09:23:50.365479Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1751275430795 at tablet 9437184, mediator 0 2025-06-30T09:23:50.365486Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[5] execute at tablet 9437184 2025-06-30T09:23:50.365491Z node 2 :TX_COLUMNSHARD ERROR: ctor_logger.h:56: TxPlanStep[5] Ignore old txIds [112] for step 1751275430795 last planned step 1751275430795 at tablet 9437184 2025-06-30T09:23:50.365498Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[5] complete at tablet 9437184 2025-06-30T09:23:50.365551Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1751275430795:max} readable: {1751275430795:max} at tablet 9437184 2025-06-30T09:23:50.365571Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-30T09:23:50.365987Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1751275430795:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } } } ; 2025-06-30T09:23:50.366004Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1751275430795:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } } } ; 2025-06-30T09:23:50.366202Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1751275430795:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":4,"inputs":[{"from":5}]},{"owner_id":5,"inputs":[{"from":6}]},{"owner_id":6,"inputs":[]}],"nodes":{"2":{"p":{"i":"1","p":{"address":{"name":"key","id":1}},"o":"1","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"p":{"data":[{"name":"key","id":1},{"name":"field","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":6},"5":{"p":{"i":"0","p":{"data":[{"name":"key","id":1},{"name":"field","id":2}]},"o":"1,2","t":"FetchOriginalData"},"w":4,"id":5},"4":{"p":{"i":"2","p":{"address":{"name":"field","id":2}},"o":"2","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"1,2","t":"Projection"},"w":18,"id":0}}}; 2025-06-30T09:23:50.366225Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1751275430795:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-30T09:23:50.366334Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1751275430795:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:169;event=TTxScan started;actor_id=[2:181:2193];trace_detailed=; 2025-06-30T09:23:50.366440Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1,2;column_names=field,key;);; 2025-06-30T09:23:50.366460Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; 2025-06-30T09:23:50.366510Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:181:2193];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-30T09:23:50.366521Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:181:2193];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-30T09:23:50.366528Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:181:2193];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-30T09:23:50.366535Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [2:181:2193] finished for tablet 9437184 2025-06-30T09:23:50.366576Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:181:2193];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[2:180:2192];stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ack","l_ack","f_processing","l_processing","f_ProduceResults","l_ProduceResults","f_Finish","l_Finish"],"t":0}],"full":{"a":1751275430366322,"name":"_full_task","f":1751275430366322,"d_finished":0,"c":0,"l":1751275430366544,"d":222},"events":[{"name":"bootstrap","f":1751275430366357,"d_finished":129,"c":1,"l":1751275430366486,"d":129},{"a":1751275430366505,"name":"ack","f":1751275430366505,"d_finished":0,"c":0,"l":1751275430366544,"d":39},{"a":1751275430366501,"name":"processing","f":1751275430366501,"d_finished":0,"c":0,"l":1751275430366544,"d":43},{"name":"ProduceResults","f":1751275430366481,"d_finished":24,"c":2,"l":1751275430366533,"d":24},{"a":1751275430366533,"name":"Finish","f":1751275430366533,"d_finished":0,"c":0,"l":1751275430366544,"d":11}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-30T09:23:50.366585Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:181:2193];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[2:180:2192];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-30T09:23:50.366607Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:181:2193];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[2:180:2192];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ack","l_ack","f_processing","l_processing","f_ProduceResults","l_ProduceResults","f_Finish","l_Finish"],"t":0}],"full":{"a":1751275430366322,"name":"_full_task","f":1751275430366322,"d_finished":0,"c":0,"l":1751275430366590,"d":268},"events":[{"name":"bootstrap","f":1751275430366357,"d_finished":129,"c":1,"l":1751275430366486,"d":129},{"a":1751275430366505,"name":"ack","f":1751275430366505,"d_finished":0,"c":0,"l":1751275430366590,"d":85},{"a":1751275430366501,"name":"processing","f":1751275430366501,"d_finished":0,"c":0,"l":1751275430366590,"d":89},{"name":"ProduceResults","f":1751275430366481,"d_finished":24,"c":2,"l":1751275430366533,"d":24},{"a":1751275430366533,"name":"Finish","f":1751275430366533,"d_finished":0,"c":0,"l":1751275430366590,"d":57}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-30T09:23:50.366621Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:181:2193];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-30T09:23:50.366219Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-30T09:23:50.366627Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:181:2193];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-30T09:23:50.366635Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:181:2193];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:49;event=insert_to_cache;key=string;records=0;size=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=string;records=0;count=0; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteExoticTypes [GOOD] Test command err: 2025-06-30T09:23:48.970286Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:98;event=initialize_shard;step=OnActivateExecutor; 2025-06-30T09:23:48.975801Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:116;event=initialize_shard;step=initialize_tiring_finished; 2025-06-30T09:23:48.975895Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-30T09:23:48.976807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:23:48.976879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:23:48.976922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:23:48.976945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:23:48.976965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:23:48.976991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:23:48.977012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:23:48.977033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:23:48.977054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:23:48.977077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:23:48.977097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:23:48.977120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:23:48.982973Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-30T09:23:48.983089Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-06-30T09:23:48.983099Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-30T09:23:48.983132Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-30T09:23:48.983177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:23:48.983192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:23:48.983199Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-30T09:23:48.983211Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-30T09:23:48.983219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:23:48.983224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:23:48.983227Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-30T09:23:48.983241Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-30T09:23:48.983247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:23:48.983253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:23:48.983256Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-30T09:23:48.983264Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-30T09:23:48.983269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:23:48.983274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:23:48.983277Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-30T09:23:48.983284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:23:48.983290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:23:48.983292Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-30T09:23:48.983313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:23:48.983321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:23:48.983324Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-30T09:23:48.983341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:23:48.983347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:23:48.983350Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-30T09:23:48.983360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:23:48.983366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:23:48.983369Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-30T09:23:48.983375Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:23:48.983381Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:23:48.983386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:23:48.983390Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-30T09:23:48.983437Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=15; 2025-06-30T09:23:48.983448Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=6; 2025-06-30T09:23:48.983457Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=5; 2025-06-30T09:23:48.983470Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=9; 2025-06-30T09:23:48.983484Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-30T09:23:48.983499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025- ... [{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"19,19,19,19,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"20,20,20,20,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"21,21,21,21,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"22,22,22,22,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"23,23,23,23,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"24,24,24,24,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"25,25,25,25,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"26,26,26,26,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"27,27,27,27,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"28,28,28,28,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"29,29,29,29,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"30,30,30,30,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"31,31,31,31,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"32,32,32,32,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"33,33,33,33,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"34,34,34,34,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"35,35,35,35,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"36,36,36,36,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"37,37,37,37,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"38,38,38,38,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"39,39,39,39,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"40,40,40,40,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"41,41,41,41,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"42,42,42,42,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"43,43,43,43,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"44,44,44,44,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"45,45,45,45,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"46,46,46,46,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"47,47,47,47,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"48,48,48,48,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"49,49,49,49,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"50,50,50,50,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"51,51,51,51,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"52,52,52,52,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"53,53,53,53,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"54,54,54,54,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"55,55,55,55,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"56,56,56,56,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"57,57,57,57,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"58,58,58,58,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"59,59,59,59,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"60,60,60,60,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"61,61,61,61,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"62,62,62,62,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"63,63,63,63,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"64,64,64,64,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"65,65,65,65,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"66,66,66,66,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"67,67,67,67,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"68,68,68,68,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"69,69,69,69,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"70,70,70,70,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"71,71,71,71,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"72,72,72,72,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"73,73,73,73,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"74,74,74,74,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"75,75,75,75,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"76,76,76,76,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"77,77,77,77,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"78,78,78,78,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"79,79,79,79,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"80,80,80,80,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"81,81,81,81,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"82,82,82,82,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"83,83,83,83,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"84,84,84,84,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"85,85,85,85,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"86,86,86,86,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"87,87,87,87,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"88,88,88,88,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"89,89,89,89,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"90,90,90,90,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"91,91,91,91,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"92,92,92,92,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"93,93,93,93,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"94,94,94,94,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"95,95,95,95,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"96,96,96,96,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"97,97,97,97,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"98,98,98,98,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"99,99,99,99,"}}]}; >> TPersQueueTest::PreferredCluster_DisabledRemoteClusterAndWriteSessionsWithDifferentPreferredClusterAndLaterRemoteClusterEnabled_SessionWithMismatchedClusterDiesAfterPreferredClusterEnabledAndOtherSessionsAlive [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherNotExistingPool [GOOD] >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndCloseClientSessionWithEnabledRemotePreferredClusterDelaySec_SessionDiesOnlyAfterDelay >> KqpWorkloadServiceActors::TestDefaultPoolUsePermissions >> TCmsTest::ScheduledWalleRequestDuringRollingRestart [GOOD] >> TCmsTest::SamePriorityRequest >> RemoteTopicReader::ReadTopic [GOOD] |83.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base/test-results/unittest/{meta.json ... results_accumulator.log} >> TMaintenanceApiTest::ForceAvailabilityMode [GOOD] >> TCmsTest::RacyStartCollecting [GOOD] >> TCmsTest::PriorityRange >> KqpWorkloadServiceActors::TestDefaultPoolUsePermissions [GOOD] >> TCmsTest::VDisksEvictionShouldFailOnUnsupportedAction [GOOD] >> KqpWorkloadServiceActors::TestDefaultPoolAdminPermissions ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_topic_reader/unittest >> RemoteTopicReader::ReadTopic [GOOD] Test command err: 2025-06-30T09:23:49.284615Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670696159272050:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:49.284702Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a57/r3tmp/tmpyYNKZh/pdisk_1.dat 2025-06-30T09:23:49.326467Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:49.332000Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TClient is connected to server localhost:31674 TServer::EnableGrpc on GrpcPort 19906, node 1 2025-06-30T09:23:49.352916Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:49.352929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:49.352931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:49.352973Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31674 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:23:49.386215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:49.386251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:49.387446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:49.412325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:50.286210Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:50.288232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:50.347167Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700454240185:2321], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.347167Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700454240196:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.347189Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.347669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:50.351819Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670700454240199:2325], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-30T09:23:50.450954Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670700454240239:2458] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:50.540821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:23:50.590704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:50.648344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:23:50.702900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:23:50.763611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:23:50.967369Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7521670700454240803:2706] Handshake: worker# [1:7521670696159272593:2288] 2025-06-30T09:23:50.967908Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7521670700454240803:2706] Create read session: session# [1:7521670700454240804:2287] 2025-06-30T09:23:50.968014Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7521670700454240803:2706] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-30T09:23:50.970139Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7521670700454240803:2706] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_1_249615530254981391_v1 } } 2025-06-30T09:23:50.971044Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7521670700454240803:2706] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 0 SeqNo: 1 CreateTime: 2025-06-30T09:23:50.866000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-30T09:23:50.971172Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7521670700454240803:2706] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-30T09:23:50.991959Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7521670700454240803:2706] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 1 SeqNo: 2 CreateTime: 2025-06-30T09:23:50.989000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-30T09:23:51.090202Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7521670704749208194:2738] Handshake: worker# [1:7521670696159272593:2288] 2025-06-30T09:23:51.090703Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7521670704749208194:2738] Create read session: session# [1:7521670704749208195:2287] 2025-06-30T09:23:51.090831Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7521670704749208194:2738] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-30T09:23:51.093238Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7521670704749208194:2738] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_2_12145482256981977815_v1 } } 2025-06-30T09:23:51.094170Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7521670704749208194:2738] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 1 SeqNo: 2 CreateTime: 2025-06-30T09:23:50.989000Z MessageGroupId: producer ProducerId: producer }] } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::ForceAvailabilityMode [GOOD] Test command err: 2025-06-30T09:23:46.215894Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:46.217352Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:46.219407Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:46.219455Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:46.219495Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:46.219582Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:46.220326Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:46.220404Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:46.220695Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:46.221070Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:46.222379Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:46.222408Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:46.222441Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:46.222466Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:46.250707Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:46.283091Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:46.283165Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:46.284296Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:46.284394Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:46.284399Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:46.284405Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:46.284408Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:46.284440Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:46.284462Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:46.284524Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:46.286085Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-30T09:23:46.328342Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:46.328395Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:46.370248Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:46.370293Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:46.370369Z node 1 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:46.370764Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "stor ... 0.176325Z node 17 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 24, with state: Up, with limit: 1, with ratio limit: 30, locked nodes: 7, down nodes: 0 2025-06-30T09:23:50.176393Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:50.176408Z node 17 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# test-user-p-1, requestId# test-user-r-1, owner# test-user 2025-06-30T09:23:50.176416Z node 17 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12001 (17) (permission test-user-p-1 until 1970-01-01T00:12:00Z) 2025-06-30T09:23:50.176422Z node 17 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# test-user-p-2, requestId# test-user-r-1, owner# test-user 2025-06-30T09:23:50.176428Z node 17 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12002 (18) (permission test-user-p-2 until 1970-01-01T00:12:00Z) 2025-06-30T09:23:50.176433Z node 17 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# test-user-p-3, requestId# test-user-r-1, owner# test-user 2025-06-30T09:23:50.176439Z node 17 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12003 (19) (permission test-user-p-3 until 1970-01-01T00:12:00Z) 2025-06-30T09:23:50.176444Z node 17 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# test-user-p-4, requestId# test-user-r-1, owner# test-user 2025-06-30T09:23:50.176450Z node 17 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12004 (20) (permission test-user-p-4 until 1970-01-01T00:12:00Z) 2025-06-30T09:23:50.176455Z node 17 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# test-user-p-5, requestId# test-user-r-1, owner# test-user 2025-06-30T09:23:50.176461Z node 17 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12005 (21) (permission test-user-p-5 until 1970-01-01T00:12:00Z) 2025-06-30T09:23:50.176465Z node 17 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# test-user-p-6, requestId# test-user-r-1, owner# test-user 2025-06-30T09:23:50.176471Z node 17 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12006 (22) (permission test-user-p-6 until 1970-01-01T00:12:00Z) 2025-06-30T09:23:50.176476Z node 17 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# test-user-p-7, requestId# test-user-r-1, owner# test-user 2025-06-30T09:23:50.176482Z node 17 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12007 (23) (permission test-user-p-7 until 1970-01-01T00:12:00Z) 2025-06-30T09:23:50.176488Z node 17 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# test-user-p-8, requestId# test-user-r-1, owner# test-user 2025-06-30T09:23:50.176493Z node 17 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12008 (24) (permission test-user-p-8 until 1970-01-01T00:12:00Z) 2025-06-30T09:23:50.176505Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:50.176546Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# test-user-p-1, validity# 1970-01-01T00:12:00.130000Z, action# Type: SHUTDOWN_HOST Host: "17" Duration: 600000000 2025-06-30T09:23:50.176557Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# test-user-p-2, validity# 1970-01-01T00:12:00.130000Z, action# Type: SHUTDOWN_HOST Host: "18" Duration: 600000000 2025-06-30T09:23:50.176567Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# test-user-p-3, validity# 1970-01-01T00:12:00.130000Z, action# Type: SHUTDOWN_HOST Host: "19" Duration: 600000000 2025-06-30T09:23:50.176576Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# test-user-p-4, validity# 1970-01-01T00:12:00.130000Z, action# Type: SHUTDOWN_HOST Host: "20" Duration: 600000000 2025-06-30T09:23:50.176586Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# test-user-p-5, validity# 1970-01-01T00:12:00.130000Z, action# Type: SHUTDOWN_HOST Host: "21" Duration: 600000000 2025-06-30T09:23:50.176595Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# test-user-p-6, validity# 1970-01-01T00:12:00.130000Z, action# Type: SHUTDOWN_HOST Host: "22" Duration: 600000000 2025-06-30T09:23:50.176603Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# test-user-p-7, validity# 1970-01-01T00:12:00.130000Z, action# Type: SHUTDOWN_HOST Host: "23" Duration: 600000000 2025-06-30T09:23:50.176611Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# test-user-p-8, validity# 1970-01-01T00:12:00.130000Z, action# Type: SHUTDOWN_HOST Host: "24" Duration: 600000000 2025-06-30T09:23:50.176620Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# test-user-r-1, owner# test-user 2025-06-30T09:23:50.176671Z node 17 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 0.100000s 2025-06-30T09:23:50.176681Z node 17 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-30T09:23:50.176703Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 17, wbId# [17:8388350642965737326:1634689637] 2025-06-30T09:23:50.176710Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 18, wbId# [18:8388350642965737326:1634689637] 2025-06-30T09:23:50.176716Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 19, wbId# [19:8388350642965737326:1634689637] 2025-06-30T09:23:50.176721Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 20, wbId# [20:8388350642965737326:1634689637] 2025-06-30T09:23:50.176726Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 21, wbId# [21:8388350642965737326:1634689637] 2025-06-30T09:23:50.176731Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 22, wbId# [22:8388350642965737326:1634689637] 2025-06-30T09:23:50.176736Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 23, wbId# [23:8388350642965737326:1634689637] 2025-06-30T09:23:50.176742Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 24, wbId# [24:8388350642965737326:1634689637] 2025-06-30T09:23:50.178103Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 17, response# PDiskStateInfo { PDiskId: 17 CreateTime: 0 ChangeTime: 0 Path: "/17/pdisk-17.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120130 2025-06-30T09:23:50.178278Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 22, response# PDiskStateInfo { PDiskId: 22 CreateTime: 0 ChangeTime: 0 Path: "/22/pdisk-22.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120130 2025-06-30T09:23:50.178320Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 23, response# PDiskStateInfo { PDiskId: 23 CreateTime: 0 ChangeTime: 0 Path: "/23/pdisk-23.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120130 2025-06-30T09:23:50.178338Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 18, response# PDiskStateInfo { PDiskId: 18 CreateTime: 0 ChangeTime: 0 Path: "/18/pdisk-18.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120130 2025-06-30T09:23:50.178355Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 19, response# PDiskStateInfo { PDiskId: 19 CreateTime: 0 ChangeTime: 0 Path: "/19/pdisk-19.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120130 2025-06-30T09:23:50.178370Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 20, response# PDiskStateInfo { PDiskId: 20 CreateTime: 0 ChangeTime: 0 Path: "/20/pdisk-20.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120130 2025-06-30T09:23:50.178386Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 21, response# PDiskStateInfo { PDiskId: 21 CreateTime: 0 ChangeTime: 0 Path: "/21/pdisk-21.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120130 2025-06-30T09:23:50.178401Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 24, response# PDiskStateInfo { PDiskId: 24 CreateTime: 0 ChangeTime: 0 Path: "/24/pdisk-24.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120130 2025-06-30T09:23:50.178415Z node 17 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-30T09:23:50.189283Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:50.189445Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "test-user" Actions { Type: SHUTDOWN_HOST Host: "17" Duration: 600000000 } Actions { Type: SHUTDOWN_HOST Host: "18" Duration: 600000000 } Actions { Type: SHUTDOWN_HOST Host: "19" Duration: 600000000 } Actions { Type: SHUTDOWN_HOST Host: "20" Duration: 600000000 } Actions { Type: SHUTDOWN_HOST Host: "21" Duration: 600000000 } Actions { Type: SHUTDOWN_HOST Host: "22" Duration: 600000000 } Actions { Type: SHUTDOWN_HOST Host: "23" Duration: 600000000 } Actions { Type: SHUTDOWN_HOST Host: "24" Duration: 600000000 } PartialPermissionAllowed: true Schedule: true DryRun: false Reason: "" AvailabilityMode: MODE_FORCE_RESTART MaintenanceTaskId: "task-1" }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "test-user-r-1" Permissions { Id: "test-user-p-1" Action { Type: SHUTDOWN_HOST Host: "17" Duration: 600000000 } Deadline: 720130000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 17 InterconnectPort: 12001 } } } Permissions { Id: "test-user-p-2" Action { Type: SHUTDOWN_HOST Host: "18" Duration: 600000000 } Deadline: 720130000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 18 InterconnectPort: 12002 } } } Permissions { Id: "test-user-p-3" Action { Type: SHUTDOWN_HOST Host: "19" Duration: 600000000 } Deadline: 720130000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 19 InterconnectPort: 12003 } } } Permissions { Id: "test-user-p-4" Action { Type: SHUTDOWN_HOST Host: "20" Duration: 600000000 } Deadline: 720130000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 20 InterconnectPort: 12004 } } } Permissions { Id: "test-user-p-5" Action { Type: SHUTDOWN_HOST Host: "21" Duration: 600000000 } Deadline: 720130000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 21 InterconnectPort: 12005 } } } Permissions { Id: "test-user-p-6" Action { Type: SHUTDOWN_HOST Host: "22" Duration: 600000000 } Deadline: 720130000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 22 InterconnectPort: 12006 } } } Permissions { Id: "test-user-p-7" Action { Type: SHUTDOWN_HOST Host: "23" Duration: 600000000 } Deadline: 720130000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 23 InterconnectPort: 12007 } } } Permissions { Id: "test-user-p-8" Action { Type: SHUTDOWN_HOST Host: "24" Duration: 600000000 } Deadline: 720130000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 24 InterconnectPort: 12008 } } } } 2025-06-30T09:23:50.189463Z node 17 :CMS DEBUG: cms.cpp:1063: Schedule cleanup at 1970-01-01T00:32:00.130000Z ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::VDisksEvictionShouldFailOnUnsupportedAction [GOOD] Test command err: 2025-06-30T09:23:46.203120Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:46.203583Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:46.206557Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:46.206629Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:46.207096Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:46.207208Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:46.207716Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:46.207745Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:46.207786Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:46.207882Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:46.209042Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:46.209086Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:46.209115Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:46.209159Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:46.240864Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:46.262301Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:46.262380Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:46.263642Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:46.263824Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:46.263830Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:46.263837Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:46.263840Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:46.263853Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:46.263883Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:46.263905Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:46.265461Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-30T09:23:46.296781Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:46.296845Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:46.297122Z node 1 :CMS INFO: cms.cpp:104: OnTabletDead: 72057594037936128 2025-06-30T09:23:46.297133Z node 1 :CMS DEBUG: cms.cpp:1208: TCms::Cleanup 2025-06-30T09:23:46.298623Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:46.299241Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:46.299280Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:46.299612Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:46.299706Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:46.299841Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:46.299918Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:69: Loaded config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:46.299941Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:46.300003Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:46.300056Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 } 2025-06-30T09:23:46.417419Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:46.476232Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:46.476289Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:46.476375Z node 1 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:46.476612Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "Wall-E" Actions { Type: RESTART_SERVICES Host: "4" Services: "storage" Duration: 6000000000 } PartialPermissionAllowed: true Schedule: false DryRun: false 2025-06-30T09:23:46.476625Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "4" Services: "storage" Duration: 6000000000 2025-06-30T09:23:46.476643Z node 1 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 4, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:46.476691Z node 1 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:46.476715Z node 1 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# Wall-E-p-1, requestId# Wall-E-r-1, owner# Wall-E 2025-06-30T09:23:46.476726Z node 1 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12004 (4) (permission Wa ... Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 23 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 24 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 PileId: 0 } Timestamp: 120029000 } } 2025-06-30T09:23:50.237619Z node 17 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-17-17" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 17 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-18-18" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 18 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-19-19" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 19 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-20-20" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 20 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 21 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 22 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 23 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 24 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 PileId: 0 } Timestamp: 120029000 } 2025-06-30T09:23:50.237662Z node 17 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.003000s 2025-06-30T09:23:50.237672Z node 17 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-30T09:23:50.237732Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: REPLACE_DEVICES Host: "17" Devices: "pdisk-17-17" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true 2025-06-30T09:23:50.237741Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: REPLACE_DEVICES Host: "17" Devices: "pdisk-17-17" Duration: 60000000 2025-06-30T09:23:50.237750Z node 17 :CMS DEBUG: cms.cpp:398: Result: WRONG_REQUEST (reason: Unable to evict vdisks to perform action: REPLACE_DEVICES) 2025-06-30T09:23:50.237772Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:50.237823Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-30T09:23:50.237878Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 17, wbId# [17:8388350642965737326:1634689637] 2025-06-30T09:23:50.237884Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 18, wbId# [18:8388350642965737326:1634689637] 2025-06-30T09:23:50.237889Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 19, wbId# [19:8388350642965737326:1634689637] 2025-06-30T09:23:50.237893Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 20, wbId# [20:8388350642965737326:1634689637] 2025-06-30T09:23:50.237898Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 21, wbId# [21:8388350642965737326:1634689637] 2025-06-30T09:23:50.237902Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 22, wbId# [22:8388350642965737326:1634689637] 2025-06-30T09:23:50.237908Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 23, wbId# [23:8388350642965737326:1634689637] 2025-06-30T09:23:50.237913Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 24, wbId# [24:8388350642965737326:1634689637] 2025-06-30T09:23:50.239157Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 17, response# PDiskStateInfo { PDiskId: 17 CreateTime: 0 ChangeTime: 0 Path: "/17/pdisk-17.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-30T09:23:50.239337Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 21, response# PDiskStateInfo { PDiskId: 21 CreateTime: 0 ChangeTime: 0 Path: "/21/pdisk-21.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-30T09:23:50.239365Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 22, response# PDiskStateInfo { PDiskId: 22 CreateTime: 0 ChangeTime: 0 Path: "/22/pdisk-22.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-30T09:23:50.239381Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 23, response# PDiskStateInfo { PDiskId: 23 CreateTime: 0 ChangeTime: 0 Path: "/23/pdisk-23.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-30T09:23:50.239391Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 18, response# PDiskStateInfo { PDiskId: 18 CreateTime: 0 ChangeTime: 0 Path: "/18/pdisk-18.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-30T09:23:50.239402Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 19, response# PDiskStateInfo { PDiskId: 19 CreateTime: 0 ChangeTime: 0 Path: "/19/pdisk-19.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-30T09:23:50.239412Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 20, response# PDiskStateInfo { PDiskId: 20 CreateTime: 0 ChangeTime: 0 Path: "/20/pdisk-20.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-30T09:23:50.239423Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 24, response# PDiskStateInfo { PDiskId: 24 CreateTime: 0 ChangeTime: 0 Path: "/24/pdisk-24.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-30T09:23:50.239434Z node 17 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-30T09:23:50.259958Z node 17 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:50.312314Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:50.312413Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: REPLACE_DEVICES Host: "17" Devices: "pdisk-17-17" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: WRONG_REQUEST Reason: "Unable to evict vdisks to perform action: REPLACE_DEVICES" } RequestId: "user-r-1" } >> KqpWorkloadServiceActors::TestDefaultPoolAdminPermissions [GOOD] >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit |83.0%| [TA] $(B)/ydb/core/persqueue/ut/slow/test-results/unittest/{meta.json ... results_accumulator.log} |83.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build/test-results/unittest/{meta.json ... results_accumulator.log} >> TCmsTest::SamePriorityRequest [GOOD] >> TCmsTest::SamePriorityRequest2 >> TCmsTest::PriorityRange [GOOD] >> TPersQueueTest::DirectReadBadCases [GOOD] >> TColumnShardTestSchema::RebootHotTiers [GOOD] >> TPersQueueTest::CheckACLForGrpcRead [GOOD] >> TPersQueueTest::CheckKillBalancer >> TPersQueueTest::DirectReadWrongGeneration |83.0%| [TA] $(B)/ydb/core/kqp/ut/scheme/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::PriorityRange [GOOD] Test command err: 2025-06-30T09:23:46.118891Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:46.120014Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:46.122084Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:46.122115Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:46.122163Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:46.122247Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:46.123092Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:46.123167Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:46.123613Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:46.123822Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:46.124741Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:46.124774Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:46.124809Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:46.124871Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:46.155322Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:46.187992Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:46.188113Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:46.190022Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:46.190191Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:46.190201Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:46.190212Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:46.190217Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:46.190266Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:46.190298Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:46.190410Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:46.193147Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-30T09:23:46.235664Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:46.235756Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:46.274885Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:46.275046Z node 1 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:46.275351Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-1-1" State: DOWN Timestamp: 120028000 } Timestamp: 120028000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp ... state for unknown PDisk 24:24 2025-06-30T09:23:50.194040Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:50.194093Z node 17 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:50.194138Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-30T09:23:50.194145Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 2025-06-30T09:23:50.194157Z node 17 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 17, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:50.194166Z node 17 :CMS DEBUG: cms.cpp:398: Result: DISALLOW (reason: Affected group 0 has no parity parts) 2025-06-30T09:23:50.194187Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW Reason: "Affected group 0 has no parity parts" } } 2025-06-30T09:23:51.860326Z node 25 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:51.861370Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:51.863309Z node 25 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:51.863346Z node 25 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:51.863371Z node 25 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:51.863425Z node 25 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:51.864196Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:51.864266Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:51.864590Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:51.864634Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:51.865369Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:51.865388Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:51.865422Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:51.865438Z node 25 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:51.887069Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:51.919775Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:51.919879Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:51.919908Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:51.919999Z node 25 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:51.920007Z node 25 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:51.920017Z node 25 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:51.920024Z node 25 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:51.920077Z node 25 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:51.920103Z node 25 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:51.920131Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:51.920519Z node 25 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 25 PDiskId: 25 Path: "/25/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 26 PDiskId: 26 Path: "/26/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 27 PDiskId: 27 Path: "/27/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 28 PDiskId: 28 Path: "/28/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 29 PDiskId: 29 Path: "/29/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 30 PDiskId: 30 Path: "/30/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 31 PDiskId: 31 Path: "/31/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 32 PDiskId: 32 Path: "/32/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1000 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1000 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1000 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1000 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1000 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1000 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1000 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1001 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1001 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1001 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1001 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1001 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1001 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1001 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1002 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1002 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1002 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1002 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1002 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1002 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1002 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1003 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1003 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1003 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1003 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1003 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1003 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1003 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1003 } } } } Success: true 2025-06-30T09:23:51.963017Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:51.963089Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:51.963320Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -101 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: WRONG_REQUEST Reason: "Priority value is out of range" } } 2025-06-30T09:23:51.963426Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: 101 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: WRONG_REQUEST Reason: "Priority value is out of range" } } >> TCmsTest::SamePriorityRequest2 [GOOD] |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |83.0%| [TA] {RESULT} $(B)/ydb/core/persqueue/ut/slow/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootHotTiers [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=151275968.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=151275968.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=151275968.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=131275968.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=151275968.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=151275968.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=131274768.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=131275968.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=131275968.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=131274768.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=131274768.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=131274768.000000s;Name=;Codec=}; 2025-06-30T09:22:49.087889Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:98;event=initialize_shard;step=OnActivateExecutor; 2025-06-30T09:22:49.093137Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:116;event=initialize_shard;step=initialize_tiring_finished; 2025-06-30T09:22:49.093255Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-30T09:22:49.094203Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:22:49.094281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:22:49.094343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:22:49.094367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:22:49.094389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:22:49.094412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:22:49.094435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:22:49.094457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:22:49.094484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:22:49.094505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:22:49.094531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:22:49.094553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:22:49.103213Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-30T09:22:49.103361Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-06-30T09:22:49.103378Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-30T09:22:49.103431Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-30T09:22:49.103494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:22:49.103513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:22:49.103521Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-30T09:22:49.103535Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-30T09:22:49.103549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:22:49.103560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:22:49.103567Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-30T09:22:49.103594Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-30T09:22:49.103606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:22:49.103617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:22:49.103623Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-30T09:22:49.103638Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-30T09:22:49.103648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:22:49.103659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:22:49.103665Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-30T09:22:49.103677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:22:49.103688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:22:49.103694Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-30T09:22:49.103732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:22:49.103745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:22:49.103752Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-30T09:22:49.103784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:22:49.103795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:22:49.103802Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-30T09:22:49.103837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:78 ... osite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=1072; 2025-06-30T09:23:53.497164Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=2; 2025-06-30T09:23:53.497205Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=33; 2025-06-30T09:23:53.497212Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=1156; 2025-06-30T09:23:53.497221Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=1173; 2025-06-30T09:23:53.497229Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=2; 2025-06-30T09:23:53.497246Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=10; 2025-06-30T09:23:53.497251Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=1273; 2025-06-30T09:23:53.497282Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=22; 2025-06-30T09:23:53.497302Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=12; 2025-06-30T09:23:53.497328Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=18; 2025-06-30T09:23:53.497347Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=9; 2025-06-30T09:23:53.498382Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=1018; 2025-06-30T09:23:53.499451Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=1034; 2025-06-30T09:23:53.499481Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=3; 2025-06-30T09:23:53.499490Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=1; 2025-06-30T09:23:53.499499Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=1; 2025-06-30T09:23:53.499519Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=13; 2025-06-30T09:23:53.499527Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=1; 2025-06-30T09:23:53.499545Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=12; 2025-06-30T09:23:53.499552Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=1; 2025-06-30T09:23:53.499574Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=9; 2025-06-30T09:23:53.499593Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=12; 2025-06-30T09:23:53.499718Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=116; 2025-06-30T09:23:53.499729Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=5159; 2025-06-30T09:23:53.499779Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=29251936;raw_bytes=43173354;count=6;records=480000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-30T09:23:53.499829Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];process=SwitchToWork;fline=columnshard.cpp:73;event=initialize_shard;step=SwitchToWork; 2025-06-30T09:23:53.499843Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];process=SwitchToWork;fline=columnshard.cpp:76;event=initialize_shard;step=SignalTabletActive; 2025-06-30T09:23:53.499865Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-30T09:23:53.502921Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-30T09:23:53.502986Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-30T09:23:53.503022Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-30T09:23:53.503043Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1751275118949;tx_id=18446744073709551615;;current_snapshot_ts=1751275370427; 2025-06-30T09:23:53.503054Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-30T09:23:53.503067Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-30T09:23:53.503075Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-30T09:23:53.503106Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-30T09:23:53.503792Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:248;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-30T09:23:53.503832Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:237;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-30T09:23:53.503846Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-30T09:23:53.503851Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-30T09:23:53.503859Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-30T09:23:53.503878Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-30T09:23:53.503891Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1751275118949;tx_id=18446744073709551615;;current_snapshot_ts=1751275370427; 2025-06-30T09:23:53.503899Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-30T09:23:53.503909Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-30T09:23:53.503916Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-30T09:23:53.503937Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2024:3833];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 160000/9752224 80000/4886744 0/0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::SamePriorityRequest2 [GOOD] Test command err: 2025-06-30T09:23:47.189925Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-30T09:23:47.190417Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-30T09:23:47.192941Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-30T09:23:47.193022Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-30T09:23:47.193446Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:23:47.193545Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-30T09:23:47.194128Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-30T09:23:47.194158Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-30T09:23:47.194199Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-30T09:23:47.194295Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-30T09:23:47.195527Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-30T09:23:47.195573Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-30T09:23:47.195603Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-30T09:23:47.195649Z node 1 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-06-30T09:23:47.228209Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-30T09:23:47.250308Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-30T09:23:47.250433Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:47.252226Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:47.252518Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-30T09:23:47.252532Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-30T09:23:47.252544Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-30T09:23:47.252549Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-30T09:23:47.252567Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-30T09:23:47.252605Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-30T09:23:47.252633Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-30T09:23:47.255856Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-30T09:23:47.287606Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-30T09:23:47.287669Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-30T09:23:47.331637Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:47.331688Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:47.331770Z node 1 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:47.332172Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 PileId: 0 } Hosts { Name: "::1" State: UP Services { Name: "stor ... 09:23:53.387232Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:53.387355Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Deadline: 180026512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 25 InterconnectPort: 12001 } } } } 2025-06-30T09:23:53.387368Z node 25 :CMS DEBUG: cms.cpp:1063: Schedule cleanup at 1970-01-01T00:05:00.026512Z 2025-06-30T09:23:53.415764Z node 25 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:53.415868Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:53.415893Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:53.415908Z node 25 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:53.416086Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-30T09:23:53.416097Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 2025-06-30T09:23:53.416108Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-30T09:23:53.416136Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: ) 2025-06-30T09:23:53.416158Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:53.416234Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-2, owner# user, order# 2, priority# -80, body# User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-30T09:23:53.427218Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:53.427312Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } RequestId: "user-r-2" Deadline: 420129024 } 2025-06-30T09:23:53.427439Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-1 2025-06-30T09:23:53.427448Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-30T09:23:53.427459Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-30T09:23:53.427480Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-1, reason# explicit remove 2025-06-30T09:23:53.438347Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-30T09:23:53.438418Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-1" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-30T09:23:53.461182Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:53.461246Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:53.461405Z node 25 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:53.461591Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-30T09:23:53.461608Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } 2025-06-30T09:23:53.461620Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:53.461661Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:53.461703Z node 25 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-06-30T09:23:53.461713Z node 25 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12002 (26) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:53.461729Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:53.461770Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:03:00.232048Z, action# Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 2025-06-30T09:23:53.461781Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-2, owner# user 2025-06-30T09:23:53.475582Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:53.475700Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-2" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-2" Action { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } Deadline: 180232048 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } 2025-06-30T09:23:53.475860Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-2 2025-06-30T09:23:53.475872Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-30T09:23:53.475889Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-30T09:23:53.475915Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-2, reason# explicit remove 2025-06-30T09:23:53.486890Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-30T09:23:53.486974Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-30T09:23:53.570133Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-30T09:23:53.570168Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-30T09:23:53.570180Z node 25 :CMS DEBUG: cluster_info.cpp:974: Timestamp: 1970-01-01T00:02:00Z 2025-06-30T09:23:53.570285Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has temporary lock, VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-30T09:23:53.570293Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has temporary lock, VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } 2025-06-30T09:23:53.570301Z node 25 :CMS DEBUG: node_checkers.cpp:99: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-30T09:23:53.570327Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-30T09:23:53.570343Z node 25 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-3, requestId# user-r-1, owner# user 2025-06-30T09:23:53.570348Z node 25 :CMS INFO: cluster_info.cpp:778: Adding lock for Host ::1:12002 (26) (permission user-p-3 until 1970-01-01T00:03:00Z) 2025-06-30T09:23:53.570356Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-30T09:23:53.570386Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-3, validity# 1970-01-01T00:03:00.335072Z, action# Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 2025-06-30T09:23:53.570394Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-30T09:23:53.581253Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-30T09:23:53.581346Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-3" Action { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } Deadline: 180335072 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_remove_queue_generates_event[tables_format_v0] >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit [GOOD] |83.0%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/scheme/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit [GOOD] Test command err: 2025-06-30T09:23:49.106545Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670697672427701:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:49.106571Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00338f/r3tmp/tmppXZwpI/pdisk_1.dat 2025-06-30T09:23:49.150456Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:49.150633Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670697672427682:2080] 1751275429106411 != 1751275429106414 TServer::EnableGrpc on GrpcPort 14236, node 1 2025-06-30T09:23:49.160870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:49.160881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:49.160882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:49.160921Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30222 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:49.209375Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:49.209422Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:49.210475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:49.234390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:49.422939Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-30T09:23:49.428599Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-30T09:23:49.428636Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670697672428287:2288], Start check tables existence, number paths: 2 2025-06-30T09:23:49.428739Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-30T09:23:49.428747Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-30T09:23:49.428963Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670697672428287:2288], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-30T09:23:49.428982Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670697672428287:2288], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-30T09:23:49.428987Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7521670697672428287:2288], Successfully finished 2025-06-30T09:23:49.429019Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-30T09:23:49.429334Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZGM1NDE5NGQtNDRiNGQwMWItMzU3NTBkY2QtYzI4ZGQ0MTc=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZGM1NDE5NGQtNDRiNGQwMWItMzU3NTBkY2QtYzI4ZGQ0MTc= 2025-06-30T09:23:49.429381Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZGM1NDE5NGQtNDRiNGQwMWItMzU3NTBkY2QtYzI4ZGQ0MTc=, ActorId: [1:7521670697672428303:2289], ActorState: unknown state, session actor bootstrapped 2025-06-30T09:23:49.431647Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670697672428305:2292], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-30T09:23:49.432592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:49.432996Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670697672428305:2292], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976715658 2025-06-30T09:23:49.433027Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670697672428305:2292], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-30T09:23:49.434784Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670697672428305:2292], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:23:49.533770Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670697672428305:2292], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-30T09:23:49.534859Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670697672428356:2324] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:49.534896Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670697672428305:2292], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-30T09:23:49.535006Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670697672428363:2330], DatabaseId: Root, PoolId: sample_pool_id, Start pool fetching 2025-06-30T09:23:49.535238Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670697672428363:2330], DatabaseId: Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-30T09:23:49.536262Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2413: SessionId: ydb://session/3?node_id=1&id=ZGM1NDE5NGQtNDRiNGQwMWItMzU3NTBkY2QtYzI4ZGQ0MTc=, ActorId: [1:7521670697672428303:2289], ActorState: ReadyState, Session closed due to explicit close event 2025-06-30T09:23:49.536311Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=1&id=ZGM1NDE5NGQtNDRiNGQwMWItMzU3NTBkY2QtYzI4ZGQ0MTc=, ActorId: [1:7521670697672428303:2289], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:23:49.536319Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=1&id=ZGM1NDE5NGQtNDRiNGQwMWItMzU3NTBkY2QtYzI4ZGQ0MTc=, ActorId: [1:7521670697672428303:2289], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-30T09:23:49.536323Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2644: SessionId: ydb://session/3?node_id=1&id=ZGM1NDE5NGQtNDRiNGQwMWItMzU3NTBkY2QtYzI4ZGQ0MTc=, ActorId: [1:7521670697672428303:2289], ActorState: unknown state, Cleanup temp tables: 0 2025-06-30T09:23:49.536343Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2735: SessionId: ydb://session/3?node_id=1&id=ZGM1NDE5NGQtNDRiNGQwMWItMzU3NTBkY2QtYzI4ZGQ0MTc=, ActorId: [1:7521670697672428303:2289], ActorState: unknown state, Session actor destroyed 2025-06-30T09:23:49.760711Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670695076704196:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:49.760740Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00338f/r3tmp/tmpGfdHMf/pdisk_1.dat 2025-06-30T09:23:49.775112Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:49.775536Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670695076704176:2080] 1751275429760615 != 1751275429760618 TServer::EnableGrpc on GrpcPort 29675, node 2 2025-06-30T09:23:49.785821Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:49.785836Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:49.785838Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:49.785886Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2928 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ... d: ydb://session/3?node_id=6&id=MzE3MDU3MGUtZTI1Njk3ODAtNzdhNzU5ZTEtYzZhMjRiZGE=, ActorId: [6:7521670708821379276:2293], ActorState: unknown state, Cleanup temp tables: 0 2025-06-30T09:23:56.976258Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2735: SessionId: ydb://session/3?node_id=6&id=MzE3MDU3MGUtZTI1Njk3ODAtNzdhNzU5ZTEtYzZhMjRiZGE=, ActorId: [6:7521670708821379276:2293], ActorState: unknown state, Session actor destroyed 2025-06-30T09:23:56.986199Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1797: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxb4gsnbg33x2gtfjkh, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-30T09:23:56.986277Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2056: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxb4gsnbg33x2gtfjkh, txInfo Status: Committed Kind: ReadWrite TotalDuration: 13.101 ServerDuration: 12.966 QueriesCount: 2 2025-06-30T09:23:56.986316Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2211: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxb4gsnbg33x2gtfjkh, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-30T09:23:56.986337Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxb4gsnbg33x2gtfjkh, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:23:56.986341Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxb4gsnbg33x2gtfjkh, EndCleanup, isFinal: 0 2025-06-30T09:23:56.986352Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2368: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxb4gsnbg33x2gtfjkh, Sent query response back to proxy, proxyRequestId: 510, proxyId: [6:7521670708821378620:2159] 2025-06-30T09:23:56.986703Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, TxId: 2025-06-30T09:23:56.986757Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:197: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, RunDataQuery: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); 2025-06-30T09:23:56.986891Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ReadyState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, received request, proxyRequestId: 511 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); rpcActor: [6:7521670726001254631:4235] database: /Root databaseId: /Root pool id: default 2025-06-30T09:23:56.986902Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ReadyState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, request placed into pool from cache: default 2025-06-30T09:23:56.987752Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1395: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, ExecutePhyTx, tx: 0x000011D8A2403218 literal: 0 commit: 0 txCtx.DeferredEffects.size(): 0 2025-06-30T09:23:56.987777Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1546: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, Sending to Executer TraceId: 0 8 2025-06-30T09:23:56.987801Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1604: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, Created new KQP executer: [6:7521670726001254634:4230] isRollback: 0 2025-06-30T09:23:56.992527Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1797: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-06-30T09:23:56.992581Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1395: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, ExecutePhyTx, tx: 0x000011D8A2403198 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-30T09:23:56.992921Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1797: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-30T09:23:56.992995Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2056: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, txInfo Status: Committed Kind: ReadOnly TotalDuration: 5.317 ServerDuration: 5.251 QueriesCount: 2 2025-06-30T09:23:56.993048Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2211: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-30T09:23:56.993069Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:23:56.993072Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, EndCleanup, isFinal: 0 2025-06-30T09:23:56.993094Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2368: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ExecuteState, TraceId: 01jz02cyxt28h5kjf51c18qbmy, Sent query response back to proxy, proxyRequestId: 511, proxyId: [6:7521670708821378620:2159] 2025-06-30T09:23:56.993392Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, TxId: 2025-06-30T09:23:56.993421Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, Finish with SUCCESS, SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, TxId: 2025-06-30T09:23:56.993500Z node 6 :KQP_WORKLOAD_SERVICE TRACE: pool_handlers_actors.cpp:746: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7521670713116346890:2302], DatabaseId: /Root, PoolId: sample_pool_id, succefully refreshed pool state, in flight: 0, delayed: 0 2025-06-30T09:23:56.993514Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2413: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ReadyState, Session closed due to explicit close event 2025-06-30T09:23:56.993521Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2571: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-30T09:23:56.993523Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2632: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-30T09:23:56.993527Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2644: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: unknown state, Cleanup temp tables: 0 2025-06-30T09:23:56.993545Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2735: SessionId: ydb://session/3?node_id=6&id=ZjNiNDMxYTQtODFlYjRkZjgtNGYxMzI0ZDUtYzUwYmQ1NGM=, ActorId: [6:7521670726001254613:4230], ActorState: unknown state, Session actor destroyed 2025-06-30T09:23:57.603921Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7521670710108594924:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:57.603990Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> SystemView::QueryStatsAllTables [GOOD] >> SystemView::QueryStatsRetries >> YdbSdkSessionsPool1Session::FailTest/0 >> YdbSdkSessions::CloseSessionAfterDriverDtorWithoutSessionPool >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryServiceStreamCall [SKIPPED] |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> YdbSdkSessionsPool::PeriodicTask/0 >> YdbSdkSessions::MultiThreadSync >> YdbSdkSessionsPool::WaitQueue/1 >> YdbSdkSessionsPool::WaitQueue/0 >> YdbSdkSessionsPool::StressTestSync/1 >> YdbSdkSessions::TestMultipleSessions >> YdbSdkSessionsPool1Session::RunSmallPlan/0 >> YdbSdkSessionsPool::StressTestAsync/0 >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsTableClient >> YdbSdkSessionsPool1Session::CustomPlan/0 >> YdbSdkSessionsPool1Session::GetSession/0 >> YdbSdkSessionsPool::StressTestSync/0 >> YdbSdkSessions::TestSessionPool >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryService [SKIPPED] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncTableClient >> YdbSdkSessions::TestActiveSessionCountAfterBadSession >> YdbSdkSessions::CloseSessionWithSessionPoolExplicitDriverStopOnly >> YdbSdkSessionsPool1Session::RunSmallPlan/0 [GOOD] >> YdbSdkSessionsPool1Session::GetSession/0 [GOOD] |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> YdbSdkSessionsPool1Session::FailTest/0 [GOOD] >> YdbSdkSessions::TestActiveSessionCountAfterBadSession [GOOD] >> YdbSdkSessions::SessionsServerLimitWithSessionPool [SKIPPED] >> YdbSdkSessions::TestMultipleSessions [GOOD] >> YdbSdkSessions::TestActiveSessionCountAfterTransportError >> YdbSdkSessions::TestSessionPool [GOOD] >> YdbSdkSessions::MultiThreadSync [GOOD] >> YdbSdkSessions::SessionsServerLimit [SKIPPED] >> YdbSdkSessions::CloseSessionAfterDriverDtorWithoutSessionPool [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit >> SystemView::QueryStatsRetries [GOOD] >> YdbSdkSessions::TestActiveSessionCountAfterTransportError [GOOD] >> YdbSdkSessionsPool::WaitQueue/0 [GOOD] >> YdbSdkSessionsPool::WaitQueue/1 [GOOD] >> TPersQueueTest::DirectReadWrongGeneration [GOOD] >> TPersQueueTest::DirectReadStop ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::QueryStatsRetries [GOOD] Test command err: 2025-06-30T09:22:15.670410Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670290721170709:2171];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:15.670498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0030cf/r3tmp/tmpoFEpem/pdisk_1.dat 2025-06-30T09:22:15.760380Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:15.769658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:15.769690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:15.771396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4226, node 1 2025-06-30T09:22:15.791780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:15.791794Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:15.791797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:15.791850Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14958 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:15.831547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:15.841611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-30T09:22:15.846078Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521670292069254564:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:15.846106Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:15.846688Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521670292412567560:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:15.846719Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:15.847777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:15.849321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:15.849341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:15.850325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:15.850344Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:15.850458Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-30T09:22:15.851205Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-30T09:22:16.063365Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:16.063706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:16.275718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-30T09:22:16.283224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-30T09:22:16.286303Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670298616758330:2247];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:16.288749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:16.293756Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; waiting... 2025-06-30T09:22:16.303616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:16.303652Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:16.308488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:16.308727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:16.308997Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:22:16.313187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:16.316401Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-30T09:22:16.317142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:16.407514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:16.469943Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670295016139128:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.469988Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.470153Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670295016139140:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:16.471439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715663:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:16.477321Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670295016139142:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715663 completed, doublechecking } 2025-06-30T09:22:16.561442Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670295016139222:2953] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:16.633900Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz029wrmcq39gaavaa6zjwgs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzY3NGQ1OTUtMjFmYTVmZDUtMTdhODQyYTMtODAzMjA5NmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:22:16.646789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but p ... WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [71:7521670728839671468:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:57.255249Z node 71 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:57.255441Z node 71 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [71:7521670728839671480:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:57.256498Z node 71 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:57.264136Z node 71 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [71:7521670728839671482:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:23:57.371500Z node 71 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [71:7521670728839671558:2662] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:57.396196Z node 71 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02cz66e5khccq5xwnjygnv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=71&id=YjUxOWI3OWItOWYxY2M2ZDEtODM4OWRkODctNDkxNWE3NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:57.423852Z node 71 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz02czaw9ezwe72pntc9c5my, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=71&id=YzJjN2ViNTUtNDdmZWRjYy1lMmQwMGU4My02YmY1YjQ2MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:57.424667Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [71:7521670728839671635:2319], owner: [71:7521670728839671632:2317], scan id: 0, sys view info: Type: ETopQueriesByRequestUnitsOneHour SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:57.425163Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [71:7521670728839671635:2319], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:23:57.425253Z node 71 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [71:7521670728839671635:2319], row count: 1, finished: 1 2025-06-30T09:23:57.425270Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [71:7521670728839671635:2319], owner: [71:7521670728839671632:2317], scan id: 0, sys view info: Type: ETopQueriesByRequestUnitsOneHour SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:57.426256Z node 71 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275437423, txId: 281474976715662] shutting down 2025-06-30T09:23:58.451109Z node 76 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[76:7521670732764889514:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:58.451133Z node 76 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0030cf/r3tmp/tmpNy0BeZ/pdisk_1.dat 2025-06-30T09:23:58.472312Z node 76 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64814, node 76 2025-06-30T09:23:58.494392Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:58.494405Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:58.494407Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:58.494464Z node 76 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25648 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:58.555846Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:58.555880Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:58.559452Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:58.559669Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:58.565445Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:23:58.845822Z node 76 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [76:7521670732764890400:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:58.845860Z node 76 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [76:7521670732764890408:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:58.845870Z node 76 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:58.846899Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:58.853860Z node 76 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [76:7521670732764890414:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:23:58.940680Z node 76 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [76:7521670732764890483:2651] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:58.961230Z node 76 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02d0qx4jvde35d2w9nyy74, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=76&id=MjUwYWQzNTktYmZhMmEwY2QtNjM4MGU3Ni05NDUzZmE1Mg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:58.978339Z node 76 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz02d0vk93csh2nahh92dsyg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=76&id=MmIzYzdjYTMtZjE4NjFhOTMtNmUzMzlhMzYtZTRiNDAwZGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:58.978959Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [76:7521670732764890561:2319], owner: [76:7521670732764890557:2317], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:58.979183Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [76:7521670732764890561:2319], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-30T09:23:58.979297Z node 76 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [76:7521670732764890561:2319], row count: 1, finished: 1 2025-06-30T09:23:58.979320Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [76:7521670732764890561:2319], owner: [76:7521670732764890557:2317], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-30T09:23:58.980187Z node 76 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275438977, txId: 281474976715662] shutting down >> YdbSdkSessions::CloseSessionWithSessionPoolExplicitDriverStopOnly [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryServiceStreamCall [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:248: Test is failing right now ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::SessionsServerLimit [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:548: Enable after accepting a pull request with merging configs |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::FailTest/0 [GOOD] >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndCloseClientSessionWithEnabledRemotePreferredClusterDelaySec_SessionDiesOnlyAfterDelay [GOOD] >> TPersQueueTest::PreferredCluster_NonExistentPreferredCluster_SessionDiesOnlyAfterDelay >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryService [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:200: Test is failing right now |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::SessionsServerLimitWithSessionPool [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:588: Enable after accepting a pull request with merging configs |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::RunSmallPlan/0 [GOOD] |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::GetSession/0 [GOOD] |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_remove_queue_generates_event[tables_format_v0] [SKIPPED] |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSessionPool [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_remove_queue_generates_event[tables_format_v1] |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_remove_queue_generates_event[tables_format_v1] [SKIPPED] |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::WaitQueue/0 [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v0-fifo] |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::WaitQueue/1 [GOOD] |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestActiveSessionCountAfterTransportError [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors [GOOD] |83.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |83.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |83.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsTableClient [GOOD] >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsQueryClient [SKIPPED] >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit [GOOD] |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors [GOOD] |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> SelfHeal::TestTwoMaintenanceRequestsOneFaulty4Plus2Block [GOOD] >> SelfHeal::TestOneInactiveMaintenanceRequestOneMaintenanceRequestMirror3dc |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsQueryClient [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:539: Enable after interactive tx support >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v0-fifo] [GOOD] >> ColumnBuildTest::CancelBuild |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> TPersQueueTest::DirectReadStop [GOOD] >> TPersQueueTest::DirectReadCleanCache >> ColumnBuildTest::AlreadyExists >> ColumnBuildTest::BaseCase |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous [GOOD] |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> YdbSdkSessionsPool1Session::CustomPlan/0 [GOOD] >> YdbSdkSessionsPool::StressTestSync/0 [GOOD] >> YdbSdkSessionsPool::StressTestSync/1 [GOOD] |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncTableClient [GOOD] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> ColumnBuildTest::ValidDefaultValue >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows >> ColumnBuildTest::CancelBuild [GOOD] >> ColumnBuildTest::AlreadyExists [GOOD] |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::CancelBuild [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:07.661948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:07.661979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:07.661986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:07.661991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:07.661998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:07.662003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:07.662014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:07.662029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:07.662160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:07.662245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:07.678855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:07.678883Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:07.682719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:07.682816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:07.682856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:07.684556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:07.684647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:07.684777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:07.684869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:07.685587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:07.685639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:07.685981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:07.685994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:07.686023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:07.686033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:07.686039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:07.686062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:07.687546Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:07.725298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:07.725388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:07.725467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:07.725476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:07.725542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:07.725556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:07.731214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:07.731284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:07.731373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:07.731387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:07.731393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:07.731400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:07.732272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:07.732290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:07.732301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:07.732747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:07.732761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:07.732768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:07.732777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:07.733614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:07.734134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:07.734178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:07.734390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:07.734421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:07.734429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:07.734498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:07.734507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:07.734546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:07.734559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:07.739091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:07.739112Z node 1 :FLAT_TX_SCHEMESHARD ... tID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:10.081895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710761:0 HandleReply TEvOperationPlan: step# 5000007 2025-06-30T09:24:10.081909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710761:0 128 -> 240 2025-06-30T09:24:10.082559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.082576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710761:0 ProgressState 2025-06-30T09:24:10.082596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-30T09:24:10.082602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:24:10.082609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-30T09:24:10.082614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:24:10.082620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: true 2025-06-30T09:24:10.082634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:127:2151] message: TxId: 281474976710761 2025-06-30T09:24:10.082644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:24:10.082650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-30T09:24:10.082656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710761:0 2025-06-30T09:24:10.082673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 12 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-30T09:24:10.083173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-30T09:24:10.083193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710761 2025-06-30T09:24:10.083210Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2019: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710761 2025-06-30T09:24:10.083234Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2022: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancellation_Unlocking, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1180:3030], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0}, txId# 281474976710761 2025-06-30T09:24:10.083701Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking 2025-06-30T09:24:10.083735Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancellation_Unlocking, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1180:3030], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:24:10.083750Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Cancellation_Unlocking to Cancelled 2025-06-30T09:24:10.084422Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled 2025-06-30T09:24:10.084463Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancelled, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1180:3030], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:24:10.084470Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:336: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-30T09:24:10.084545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:24:10.084557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:1204:3054] TestWaitNotification: OK eventTxId 102 2025-06-30T09:24:10.085066Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-30T09:24:10.085189Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:103: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } 2025-06-30T09:24:10.085759Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:10.085850Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 98us result status StatusSuccess 2025-06-30T09:24:10.086008Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "DefaultValue" Type: "Uint64" TypeId: 4 Id: 4 NotNull: false DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 10 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::AlreadyExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:08.321172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:08.321205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:08.321213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:08.321219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:08.321226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:08.321231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:08.321242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:08.321261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:08.321397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:08.321483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:08.335573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:08.335604Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:08.339907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:08.339991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:08.340040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:08.342149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:08.342246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:08.342411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:08.342541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:08.343123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:08.343183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:08.343511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:08.343528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:08.343559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:08.343569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:08.343576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:08.343602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:08.345155Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:08.375120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:08.375224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:08.375306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:08.375316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:08.375385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:08.375402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:08.376434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:08.376495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:08.376580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:08.376593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:08.376599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:08.376607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:08.377288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:08.377307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:08.377319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:08.377829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:08.377847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:08.377855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:08.377865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:08.378780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:08.379367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:08.379422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:08.379676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:08.379714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:08.379725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:08.379806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:08.379817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:08.379859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:08.379875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:08.380480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:08.380493Z node 1 :FLAT_TX_SCHEMESHARD ... artCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409551 SchemeShard: 72075186233409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SharedHive: 72057594037968897 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409549, at schemeshard: 72075186233409549 2025-06-30T09:24:10.134324Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__create.cpp:23: TIndexBuilder::TXTYPE_CREATE_INDEX_BUILD: DoExecute TxId: 106 DatabaseName: "/MyRoot/ServerLessDB" Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } 2025-06-30T09:24:10.137232Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable 2025-06-30T09:24:10.137306Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1154:3022], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:24:10.137323Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.cpp:183: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: AllocateTxId 106 2025-06-30T09:24:10.137387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 106, at schemeshard: 72075186233409549 2025-06-30T09:24:10.137402Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2305: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, id# 106, txId# 281474976725757 2025-06-30T09:24:10.137418Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2308: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1154:3022], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0}, txId# 281474976725757 2025-06-30T09:24:10.141753Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable 2025-06-30T09:24:10.141825Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1154:3022], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0} 2025-06-30T09:24:10.141925Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:428: AlterMainTablePropose 106 AlterMainTable Transaction { WorkingDir: "/MyRoot/ServerLessDB" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table" Columns { Name: "value" Type: "Uint64" DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: true } } Internal: true } TxId: 281474976725757 TabletId: 72075186233409549 FailOnExist: true 2025-06-30T09:24:10.143946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/ServerLessDB" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table" Columns { Name: "value" Type: "Uint64" DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: true } } Internal: true } TxId: 281474976725757 TabletId: 72075186233409549 FailOnExist: true , at schemeshard: 72075186233409549 2025-06-30T09:24:10.144051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:506: TAlterTable Propose, path: /MyRoot/ServerLessDB/Table, pathId: , opId: 281474976725757:0, at schemeshard: 72075186233409549 2025-06-30T09:24:10.144154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976725757:1, propose status:StatusInvalidParameter, reason: Cannot alter type for column 'value', at schemeshard: 72075186233409549 2025-06-30T09:24:10.152725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 281474976725757, response: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549, at schemeshard: 72075186233409549 2025-06-30T09:24:10.152825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976725757, database: /MyRoot/ServerLessDB, subject: , status: StatusInvalidParameter, reason: Cannot alter type for column 'value', operation: ALTER TABLE, path: /MyRoot/ServerLessDB/Table 2025-06-30T09:24:10.152905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6788: Handle: TEvModifySchemeTransactionResult: txId# 281474976725757, status# StatusInvalidParameter 2025-06-30T09:24:10.152918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6790: Message: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549 2025-06-30T09:24:10.152946Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2142: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvModifySchemeTransactionResult, id# 106, cookie: 106, record: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549, status: StatusInvalidParameter 2025-06-30T09:24:10.152985Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2147: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvModifySchemeTransactionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1154:3022], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0}, cookie: 106, record: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549, status: StatusInvalidParameter 2025-06-30T09:24:10.153368Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:2116: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuilder::TTxReply: ReplyOnCreation, BuildIndexId: 106, status: BAD_REQUEST, error: At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column 'value', replyTo: [1:1154:3022], message: TxId: 106 Status: BAD_REQUEST Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } IndexBuild { Id: 106 Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } State: STATE_PREPARING Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } } BUILDCOLUMN RESPONSE CREATE: NKikimrIndexBuilder.TEvCreateResponse TxId: 106 Status: BAD_REQUEST Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } IndexBuild { Id: 106 Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } State: STATE_PREPARING Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } } >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] [GOOD] >> ColumnBuildTest::BaseCase [GOOD] >> TPersQueueTest::PreferredCluster_NonExistentPreferredCluster_SessionDiesOnlyAfterDelay [GOOD] >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndRemoteClusterEnabledDelaySec_SessionDiesOnlyAfterDelay >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected [GOOD] |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestSync/0 [GOOD] >> YdbTableSplit::SplitByLoadWithDeletes [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] [GOOD] >> YdbTableSplit::SplitByLoadWithUpdates [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::BaseCase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:08.407285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:08.407314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:08.407321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:08.407326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:08.407334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:08.407338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:08.407349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:08.407370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:08.407496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:08.407572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:08.443257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:08.443283Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:08.451171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:08.451235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:08.451280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:08.478181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:08.478294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:08.478431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:08.478530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:08.479378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:08.479429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:08.479745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:08.479759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:08.479785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:08.479797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:08.479804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:08.479830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:08.481378Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:08.506636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:08.506728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:08.506882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:08.506893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:08.506962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:08.506978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:08.507779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:08.507832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:08.507910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:08.507923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:08.507929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:08.507936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:08.508490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:08.508505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:08.508511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:08.508919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:08.508931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:08.508938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:08.508947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:08.509707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:08.510225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:08.510275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:08.510496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:08.510525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:08.510534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:08.510601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:08.510609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:08.510644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:08.510659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:08.511156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:08.511167Z node 1 :FLAT_TX_SCHEMESHARD ... 2075186233409550 message:Transaction { AffectedSet { TabletId: 72075186233409549 Flags: 2 } ExecLevel: 0 TxId: 281474976725761 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409550 2025-06-30T09:24:11.713767Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-30T09:24:11.713835Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1154:3022], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424, Billed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424} 2025-06-30T09:24:11.713915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976725761:4294967295 from tablet: 72075186233409549 to tablet: 72075186233409550 cookie: 0:281474976725761 msg type: 269090816 2025-06-30T09:24:11.713956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976725761, partId: 4294967295, tablet: 72075186233409550 2025-06-30T09:24:11.714039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-30T09:24:11.714051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 0/1, is published: true 2025-06-30T09:24:11.714057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-30T09:24:11.727119Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [1:1826:3686], Recipient [1:762:2649]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186233409549 ClientId: [1:1826:3686] ServerId: [1:1828:3688] } 2025-06-30T09:24:11.727151Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-30T09:24:11.787557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 650, transactions count in step: 1, at schemeshard: 72075186233409549 2025-06-30T09:24:11.787616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976725761 AckTo { RawX1: 0 RawX2: 0 } } Step: 650 MediatorID: 72075186233409551 TabletID: 72075186233409549, at schemeshard: 72075186233409549 2025-06-30T09:24:11.787631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72075186233409549] TDropLock TPropose opId# 281474976725761:0 HandleReply TEvOperationPlan: step# 650 2025-06-30T09:24:11.787642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976725761:0 128 -> 240 2025-06-30T09:24:11.788671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976725761:0, at schemeshard: 72075186233409549 2025-06-30T09:24:11.788692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72075186233409549] TDone opId# 281474976725761:0 ProgressState 2025-06-30T09:24:11.788713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-30T09:24:11.788720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-30T09:24:11.788727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-30T09:24:11.788731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-30T09:24:11.788738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 1/1, is published: true 2025-06-30T09:24:11.788755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:571:2507] message: TxId: 281474976725761 2025-06-30T09:24:11.788765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-30T09:24:11.788771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976725761:0 2025-06-30T09:24:11.788777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976725761:0 2025-06-30T09:24:11.788798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-06-30T09:24:11.789775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976725761 2025-06-30T09:24:11.789799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976725761 2025-06-30T09:24:11.789818Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2019: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 106, txId# 281474976725761 2025-06-30T09:24:11.789851Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2022: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1154:3022], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424, Billed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424}, txId# 281474976725761 2025-06-30T09:24:11.790386Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-30T09:24:11.790420Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1154:3022], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424, Billed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424} 2025-06-30T09:24:11.790433Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-30T09:24:11.791055Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done 2025-06-30T09:24:11.791092Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1154:3022], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424, Billed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424} 2025-06-30T09:24:11.791099Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:336: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 106, subscribers count# 1 2025-06-30T09:24:11.791133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-30T09:24:11.791142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:1174:3042] TestWaitNotification: OK eventTxId 106 2025-06-30T09:24:11.791578Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 106 2025-06-30T09:24:11.791679Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:103: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } |83.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |83.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |83.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestSync/1 [GOOD] |83.2%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[delete] |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::CustomPlan/0 [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[insert] |83.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |83.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |83.2%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithDeletes [GOOD] Test command err: 2025-06-30T09:23:50.078720Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670698254316080:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:50.078775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004572/r3tmp/tmpRXZpbW/pdisk_1.dat 2025-06-30T09:23:50.138238Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:50.145716Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 6014, node 1 2025-06-30T09:23:50.151256Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:50.151270Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:50.151272Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:50.151309Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8072 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:23:50.179251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:50.179275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:50.180679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:50.181058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:8072 2025-06-30T09:23:50.405304Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254316979:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.405328Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.432172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:50.497607Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254317148:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.497635Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.501469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1751275430534 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo 2025-06-30T09:23:50.514257Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254317254:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.514310Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } TClient::Ls response: 2025-06-30T09:23:50.514419Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254317267:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.514454Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.514495Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254317288:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.514498Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254317278:2359], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.514510Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254317281:2362], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.514510Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254317284:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.514521Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254317285:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.514523Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254317283:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.514530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254317287:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.514570Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254317296:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1751275430534 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-30T09:23:50.514934Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670698254317332:2385], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.514952Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.c ... 0. Ctx: { TraceId: 01jz02dc3p816zq4z2315ngn3r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmE1YWJhMC1kMWE0NzllMC01MTM3MTFlZS01YzQ3M2RkNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.490242Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742191. Ctx: { TraceId: 01jz02dc3p5jss1zatbeh9jwx8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjE3MzhiZjEtOGE5NGZjMDctYmI1ZDZhNjItZjNlZmVmNTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.490436Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742192. Ctx: { TraceId: 01jz02dc3qb9aaszdvqpx6jrxz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTdjYjk3MmEtMWVjMDE1NTItZjk1MTI2YzEtNzNiYjdlNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.490617Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742183. Ctx: { TraceId: 01jz02dc3q0yfs313qqaawgfdq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzQ4NDM1NWYtM2VmYjdiYjYtM2M2ZDNjNC02NDI0MDI1Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.490864Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742184. Ctx: { TraceId: 01jz02dc3q6x3jv31b4mgbv7b7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2JiNDA4MzEtZjY0ZmFkYzgtOTNmMDdlZWUtODU2MmY1ODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.491050Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742185. Ctx: { TraceId: 01jz02dc3qbmcz0a6eqjbwxp44, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWZhNjEzYjctODk5ZDA0YjAtYzlhNzZhMmMtYjBjYjM0ODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.491222Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742186. Ctx: { TraceId: 01jz02dc3qbneh9j3w9dnck3hv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjFjYzhkYWEtOTE1Yzg3NDctOTAyOTdmNjUtNDhiNTVjOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.491412Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742187. Ctx: { TraceId: 01jz02dc3q4zhhdvng0s8sj3pm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTllNzE2ODUtYWRjMTQ4OWQtYWVmZjYwZGUtYTg2MWUxMzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.521919Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742193. Ctx: { TraceId: 01jz02dc4kct5mzaa2vmdgedap, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTllNzE2ODUtYWRjMTQ4OWQtYWVmZjYwZGUtYTg2MWUxMzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.522527Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742194. Ctx: { TraceId: 01jz02dc4ke1tch22z8hn7gvwd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDEzOTM0ZDYtNzBlMTY5ODctZmE5MDU3YWUtZGQwYjhkNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.522795Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742195. Ctx: { TraceId: 01jz02dc4kdhztz0rp9jhsk14c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjE3MzhiZjEtOGE5NGZjMDctYmI1ZDZhNjItZjNlZmVmNTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.523004Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742196. Ctx: { TraceId: 01jz02dc4kffk2n2z165zz2tvq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTdjYjk3MmEtMWVjMDE1NTItZjk1MTI2YzEtNzNiYjdlNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.523190Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742197. Ctx: { TraceId: 01jz02dc4k4nw5tq4fh4h6y4f4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzQ4NDM1NWYtM2VmYjdiYjYtM2M2ZDNjNC02NDI0MDI1Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.523378Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742198. Ctx: { TraceId: 01jz02dc4kcemdx6wk32m9243r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2JiNDA4MzEtZjY0ZmFkYzgtOTNmMDdlZWUtODU2MmY1ODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.523553Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742199. Ctx: { TraceId: 01jz02dc4kf7z4nkyfjqq78k51, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWZhNjEzYjctODk5ZDA0YjAtYzlhNzZhMmMtYjBjYjM0ODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.523729Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742200. Ctx: { TraceId: 01jz02dc4kfxyzk7ybkp3xnhdm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGI3N2ZjZDAtYjk4ODRmMmQtNjA0YWJmMDEtZTZmZGNlMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.523903Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742201. Ctx: { TraceId: 01jz02dc4k19ghb9pwbdjfhpq1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmE1YWJhMC1kMWE0NzllMC01MTM3MTFlZS01YzQ3M2RkNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.524088Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742202. Ctx: { TraceId: 01jz02dc4k1mfqmm043dnaw429, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjFjYzhkYWEtOTE1Yzg3NDctOTAyOTdmNjUtNDhiNTVjOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.536732Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742203. Ctx: { TraceId: 01jz02dc58a8e82s0qmcn1a6wz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTdjYjk3MmEtMWVjMDE1NTItZjk1MTI2YzEtNzNiYjdlNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.537271Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742204. Ctx: { TraceId: 01jz02dc587ewepcjp759q8ayr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzQ4NDM1NWYtM2VmYjdiYjYtM2M2ZDNjNC02NDI0MDI1Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.538274Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742205. Ctx: { TraceId: 01jz02dc587mef4y7mrv9fyg3j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2JiNDA4MzEtZjY0ZmFkYzgtOTNmMDdlZWUtODU2MmY1ODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-30T09:24:10.548532Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742206. Ctx: { TraceId: 01jz02dc5c79621jdprpx3h07v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmE1YWJhMC1kMWE0NzllMC01MTM3MTFlZS01YzQ3M2RkNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.550101Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742209. Ctx: { TraceId: 01jz02dc5b6bkp2zv01jc21h8t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjFjYzhkYWEtOTE1Yzg3NDctOTAyOTdmNjUtNDhiNTVjOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.550598Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742210. Ctx: { TraceId: 01jz02dc5b0snea2syxw8kdews, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTllNzE2ODUtYWRjMTQ4OWQtYWVmZjYwZGUtYTg2MWUxMzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.550866Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742211. Ctx: { TraceId: 01jz02dc5bcn9vk8fz4jsbgcp2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDEzOTM0ZDYtNzBlMTY5ODctZmE5MDU3YWUtZGQwYjhkNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1751275430534 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-30T09:24:10.551157Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742212. Ctx: { TraceId: 01jz02dc5c5yeh8gt7y5k5pas8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjE3MzhiZjEtOGE5NGZjMDctYmI1ZDZhNjItZjNlZmVmNTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.552036Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742207. Ctx: { TraceId: 01jz02dc5c7v1yf7t32qd8ew38, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGI3N2ZjZDAtYjk4ODRmMmQtNjA0YWJmMDEtZTZmZGNlMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.552179Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742208. Ctx: { TraceId: 01jz02dc5bcnvs2zd1jsh8xw16, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWZhNjEzYjctODk5ZDA0YjAtYzlhNzZhMmMtYjBjYjM0ODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1751275430534 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithUpdates [GOOD] Test command err: 2025-06-30T09:23:50.080331Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670700309954132:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:50.080376Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004591/r3tmp/tmpCeaJpb/pdisk_1.dat 2025-06-30T09:23:50.137012Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11490, node 1 2025-06-30T09:23:50.151134Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:50.151156Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:50.151158Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:50.151190Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29173 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:50.180313Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:50.180340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:50.181966Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:50.212241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:29173 2025-06-30T09:23:50.406043Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309954961:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.406069Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.432634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:50.499801Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955130:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.499827Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.502732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275430534 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275430534 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-30T09:23:50.515862Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955237:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.515907Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.515947Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955268:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.515957Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955269:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.515965Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955271:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.515967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955266:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.515972Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955272:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.515974Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955267:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.515976Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955265:2363], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.515981Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955280:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.515981Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955273:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.516038Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.516072Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955286:2374], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.516249Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670700309955315:2385], DatabaseId: /Root, PoolId: default, Failed to fetch ... 6. Ctx: { TraceId: 01jz02dc3q6434bwchg824n9es, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTIxMWUxNTEtMWU0MzUyYzktNTgzYzY0NzAtNTQ2ZmY2ODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.490169Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976741987. Ctx: { TraceId: 01jz02dc3pbc31bsn0c6n1922k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTY2ZjE4ZjAtNjM3N2I0OGEtYjY4ZGRjZDUtZjA1MDVlMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.490367Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976741988. Ctx: { TraceId: 01jz02dc3pc96t4bz9yxybz1za, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThhZWVmNzYtYmQ2YWU0MzQtNDgwNzkwYWQtNjExNmNkZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.492409Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976741993. Ctx: { TraceId: 01jz02dc3tec97py1nd597ke9m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDI3YjBlNDctMjI0ZGE1MjAtYWRhOThkNWItY2NkNTZjN2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.492750Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976741994. Ctx: { TraceId: 01jz02dc3tfcna8znbs715pknn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWQyOWExZGEtZGZjZGZiNmItNDdiOTQxYy05OTI4MTQ2Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.492968Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976741995. Ctx: { TraceId: 01jz02dc3t0kjh71q61fh0a77s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDljODQyYTQtYjU2MmU1NjctNzE1MGQwNzYtMTM0Yzc4MDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.493168Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976741991. Ctx: { TraceId: 01jz02dc3tapcjqypp6eww3jza, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWVmNDc0ODAtYTIxNDY1MDItNDYyOWU2MTYtMjU4MTg5Yzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.493441Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976741992. Ctx: { TraceId: 01jz02dc3teehpjxz9hs9t6y09, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGNiZmU4NWItMmFlZDQ0MGUtZGVhNDhjZWUtMzk2ZTAzMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.517113Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976741997. Ctx: { TraceId: 01jz02dc4f5vr8rsg98qwsm7fr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDI3YjBlNDctMjI0ZGE1MjAtYWRhOThkNWItY2NkNTZjN2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.517803Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976741998. Ctx: { TraceId: 01jz02dc4f3y79j0ybwsaab374, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWY5N2M2ZTgtNmRkNmIxMy01Y2Q0ZGIxMC0xYThlMjYyZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.518086Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976741999. Ctx: { TraceId: 01jz02dc4fbr4kt0f1b8zefemf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDU1NDg0YjQtNWFkNjkzM2YtODIwZTlmMWQtNzMzZTBlY2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.518295Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742000. Ctx: { TraceId: 01jz02dc4f33vcm0sd6z4ta290, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTIxMWUxNTEtMWU0MzUyYzktNTgzYzY0NzAtNTQ2ZmY2ODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.518483Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742001. Ctx: { TraceId: 01jz02dc4faeqxke4wwmcwpw2v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThhZWVmNzYtYmQ2YWU0MzQtNDgwNzkwYWQtNjExNmNkZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.518673Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742002. Ctx: { TraceId: 01jz02dc4f5g2tzxdebw3z6vm8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWQyOWExZGEtZGZjZGZiNmItNDdiOTQxYy05OTI4MTQ2Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.518831Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742003. Ctx: { TraceId: 01jz02dc4f86t9frfk6k739kjn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDljODQyYTQtYjU2MmU1NjctNzE1MGQwNzYtMTM0Yzc4MDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.518896Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742004. Ctx: { TraceId: 01jz02dc4f9rymppeeqajf257f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWVmNDc0ODAtYTIxNDY1MDItNDYyOWU2MTYtMjU4MTg5Yzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.519099Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976741996. Ctx: { TraceId: 01jz02dc4e2tbhg9f9cs54byvy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTY2ZjE4ZjAtNjM3N2I0OGEtYjY4ZGRjZDUtZjA1MDVlMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.519109Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742005. Ctx: { TraceId: 01jz02dc4f49xx6zmms756trrs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGNiZmU4NWItMmFlZDQ0MGUtZGVhNDhjZWUtMzk2ZTAzMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-30T09:24:10.540517Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742008. Ctx: { TraceId: 01jz02dc56fnws10ypc6bext26, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWY5N2M2ZTgtNmRkNmIxMy01Y2Q0ZGIxMC0xYThlMjYyZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.541036Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742009. Ctx: { TraceId: 01jz02dc569kavw8trfewrdm7c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTIxMWUxNTEtMWU0MzUyYzktNTgzYzY0NzAtNTQ2ZmY2ODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.541307Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742010. Ctx: { TraceId: 01jz02dc570dfeenxktp402b9b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDljODQyYTQtYjU2MmU1NjctNzE1MGQwNzYtMTM0Yzc4MDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.541550Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742011. Ctx: { TraceId: 01jz02dc5603qwf8wpwxtaf548, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThhZWVmNzYtYmQ2YWU0MzQtNDgwNzkwYWQtNjExNmNkZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.541802Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742012. Ctx: { TraceId: 01jz02dc586neg5rfrttkabmyc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGNiZmU4NWItMmFlZDQ0MGUtZGVhNDhjZWUtMzk2ZTAzMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.542044Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742013. Ctx: { TraceId: 01jz02dc575ar2ckznw137pbcm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWQyOWExZGEtZGZjZGZiNmItNDdiOTQxYy05OTI4MTQ2Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.542276Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742014. Ctx: { TraceId: 01jz02dc588995db5p7thpjcjq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTY2ZjE4ZjAtNjM3N2I0OGEtYjY4ZGRjZDUtZjA1MDVlMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.542492Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742015. Ctx: { TraceId: 01jz02dc58dxjy0hrh1pj6pfqy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWVmNDc0ODAtYTIxNDY1MDItNDYyOWU2MTYtMjU4MTg5Yzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.542721Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742006. Ctx: { TraceId: 01jz02dc57bprefw9c8hfhm1a3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDU1NDg0YjQtNWFkNjkzM2YtODIwZTlmMWQtNzMzZTBlY2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:10.542995Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976742007. Ctx: { TraceId: 01jz02dc564kwew2q0sgvp7fct, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDI3YjBlNDctMjI0ZGE1MjAtYWRhOThkNWItY2NkNTZjN2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275430534 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275430534 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> ColumnBuildTest::ValidDefaultValue [GOOD] |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |83.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |83.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows [GOOD] |83.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |83.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |83.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |83.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |83.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |83.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots >> TPersQueueTest::CheckKillBalancer [GOOD] >> TPersQueueTest::CheckDeleteTopic ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::ValidDefaultValue [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:10.172297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:10.172328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:10.172336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:10.172342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:10.172350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:10.172355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:10.172366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:10.172382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:10.172517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:10.172594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:10.215183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:10.215215Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:10.225407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:10.225506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:10.225551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:10.255779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:10.255888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:10.256034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:10.256135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:10.257044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:10.257094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:10.257405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:10.257420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:10.257446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:10.257459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:10.257466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:10.257491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.259076Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:10.311270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:10.311359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.311442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:10.311453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:10.311524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:10.311542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:10.312496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:10.312552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:10.312634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.312648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:10.312655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:10.312663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:10.313260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.313278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:10.313287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:10.313802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.313818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.313826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:10.313837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:10.314820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:10.315478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:10.315535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:10.315777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:10.315813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:10.315823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:10.315896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:10.315906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:10.315948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:10.315964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:10.316535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:10.316547Z node 1 :FLAT_TX_SCHEMESHARD ... 75186233409550 message:Transaction { AffectedSet { TabletId: 72075186233409549 Flags: 2 } ExecLevel: 0 TxId: 281474976725761 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409550 2025-06-30T09:24:13.359706Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-30T09:24:13.359747Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1154:3022], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424, Billed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424} 2025-06-30T09:24:13.359797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976725761:4294967295 from tablet: 72075186233409549 to tablet: 72075186233409550 cookie: 0:281474976725761 msg type: 269090816 2025-06-30T09:24:13.359832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976725761, partId: 4294967295, tablet: 72075186233409550 2025-06-30T09:24:13.359887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-30T09:24:13.359897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 0/1, is published: true 2025-06-30T09:24:13.359903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-30T09:24:13.372156Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [1:1826:3686], Recipient [1:762:2649]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186233409549 ClientId: [1:1826:3686] ServerId: [1:1828:3688] } 2025-06-30T09:24:13.372187Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-30T09:24:13.431360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 650, transactions count in step: 1, at schemeshard: 72075186233409549 2025-06-30T09:24:13.431405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976725761 AckTo { RawX1: 0 RawX2: 0 } } Step: 650 MediatorID: 72075186233409551 TabletID: 72075186233409549, at schemeshard: 72075186233409549 2025-06-30T09:24:13.431421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72075186233409549] TDropLock TPropose opId# 281474976725761:0 HandleReply TEvOperationPlan: step# 650 2025-06-30T09:24:13.431431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976725761:0 128 -> 240 2025-06-30T09:24:13.435779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976725761:0, at schemeshard: 72075186233409549 2025-06-30T09:24:13.435809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72075186233409549] TDone opId# 281474976725761:0 ProgressState 2025-06-30T09:24:13.435832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-30T09:24:13.435838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-30T09:24:13.435846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-30T09:24:13.435852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-30T09:24:13.435858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 1/1, is published: true 2025-06-30T09:24:13.435889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:571:2507] message: TxId: 281474976725761 2025-06-30T09:24:13.435902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-30T09:24:13.435908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976725761:0 2025-06-30T09:24:13.435914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976725761:0 2025-06-30T09:24:13.435939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-06-30T09:24:13.439280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976725761 2025-06-30T09:24:13.439321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976725761 2025-06-30T09:24:13.439361Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2019: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 106, txId# 281474976725761 2025-06-30T09:24:13.439393Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2022: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1154:3022], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424, Billed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424}, txId# 281474976725761 2025-06-30T09:24:13.440925Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-30T09:24:13.440972Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1154:3022], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424, Billed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424} 2025-06-30T09:24:13.440998Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-30T09:24:13.447342Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1201: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done 2025-06-30T09:24:13.447405Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1202: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1154:3022], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424, Billed: UploadRows: 101 UploadBytes: 2424 ReadRows: 101 ReadBytes: 2424} 2025-06-30T09:24:13.447412Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:336: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 106, subscribers count# 1 2025-06-30T09:24:13.447459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-30T09:24:13.447467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:1174:3042] TestWaitNotification: OK eventTxId 106 2025-06-30T09:24:13.447912Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 106 2025-06-30T09:24:13.448012Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:103: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "ColumnValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 1111 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "ColumnValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 1111 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:10.305958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:10.306017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:10.306025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:10.306030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:10.306038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:10.306043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:10.306054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:10.306070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:10.306201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:10.306271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:10.323622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:10.323644Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:10.327377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:10.327432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:10.327473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:10.329001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:10.329078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:10.329206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:10.329276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:10.329996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:10.330038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:10.330296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:10.330306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:10.330331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:10.330339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:10.330346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:10.330365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.331813Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:10.355459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:10.355525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.355584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:10.355592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:10.355658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:10.355672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:10.356497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:10.356544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:10.356596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.356605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:10.356611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:10.356617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:10.357029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.357042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:10.357048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:10.357385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.357396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:10.357403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:10.357411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:10.358189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:10.358590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:10.358623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:10.358832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:10.358859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:10.358867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:10.358930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:10.358937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:10.358970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:10.358981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:10.359380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:10.359389Z node 1 :FLAT_TX_SCHEMESHARD ... lMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'28))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.028367Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2063:3923], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'29))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.029300Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2064:3924], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'30))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.030211Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2065:3925], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'31))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.031271Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2066:3926], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'32))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.032261Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2067:3927], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'33))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.033173Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2068:3928], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'34))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.034051Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2069:3929], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'35))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.035378Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2070:3930], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'36))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.036397Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2071:3931], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'37))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.037272Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2072:3932], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'38))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.038190Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2073:3933], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'39))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.039303Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2074:3934], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'40))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.040261Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2075:3935], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'41))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.041175Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2076:3936], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'42))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.042110Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2077:3937], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'43))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.043080Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2078:3938], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'44))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.043924Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2079:3939], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'45))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.044891Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2080:3940], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'46))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.046130Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2081:3941], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'47))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.047141Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2082:3942], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'48))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.048067Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2083:3943], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'49))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-30T09:24:14.048984Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2084:3944], Recipient [1:762:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'50))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads [GOOD] >> TPersQueueTest::DirectReadCleanCache [GOOD] >> TPersQueueTest::DirectReadRestartPQRB |83.2%| [TA] $(B)/ydb/core/tx/schemeshard/ut_column_build/test-results/unittest/{meta.json ... results_accumulator.log} |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads [GOOD] Test command err: 2025-06-30T09:23:49.988551Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670697559260026:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:49.988750Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00456d/r3tmp/tmpllhVQj/pdisk_1.dat 2025-06-30T09:23:50.039272Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:50.051156Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 4482, node 1 2025-06-30T09:23:50.056521Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:50.056537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:50.056540Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:50.056589Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28495 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:50.089034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:50.089065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:50.090575Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:50.116980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:28495 2025-06-30T09:23:50.298395Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701854228223:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.298416Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.325466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:50.391601Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701854228394:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.391607Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701854228399:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.391625Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.392265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:23:50.395445Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670701854228401:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:23:50.458222Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670701854228471:2768] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:23:50.472357Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02crfqbkycg38w51a3n77v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.477474Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02crjcef0z10pfbb60dh4y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.480955Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz02crjg6p7n4ety2shkh2qb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.484312Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jz02crjk5mfhvppxwgw7nm77, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.487422Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jz02crjp8cb5gmrbswknr5nt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.490260Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jz02crjse2tm25vd3ce9jsqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.492877Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jz02crjwbcz14wznddmf7s7q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.495956Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jz02crjz0hk8dzr24v412arq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.498967Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jz02crk2dfb7jgtnprg6cq0c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.502271Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jz02crk5dmraxm5ft7y7xesk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.505284Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jz02crk82db8rt72dk7kj3nv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.508284Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jz02crkb4bqjcmmd2b5y5mr3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.511343Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jz02crke5ebb62pggsszsj9v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.514443Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jz02crkhd28ksdhjyq74dfqq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.517586Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jz02crkn471snw0xs3yc594h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M3MzM0OTYtMjA2MTY5YTctZmRmM2YzYWYtNTg5ODFiODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:23:50.52025 ... 7. Ctx: { TraceId: 01jz02dek14wesy69dsnsqg0bw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWI1ZTU0ZjUtNGQ5ZmUwM2MtYWEwNzVlZDgtYjIzOTE2YWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.027463Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734080. Ctx: { TraceId: 01jz02dek1d25jdnmapq94wdkb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDUxMWRlNTEtZWM3N2IwODQtZTQ4MzdmODItYjM0NGIzZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.028009Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734083. Ctx: { TraceId: 01jz02dek242gv5tf2346r94nh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTczMDFiY2YtMzI4NTdjMjItYzlhNTJlZjktODQyMDcyNzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.028302Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734081. Ctx: { TraceId: 01jz02dek2dy0h0he6eh2235z3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWY0MTczZTktMWI4OTBiYmItNjhjOWRlODktZjYzZmNiMjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.028435Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734082. Ctx: { TraceId: 01jz02dek22bs6f993ye6cztp4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGY2ZjJhMmYtNzUyN2U3ZS1lZjUwNDdiYi1lNzM2ODc5Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.033524Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734088. Ctx: { TraceId: 01jz02dek88smsns06rfg2wh6g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzhiMjkxYWMtOWRiYmY3Ny1lZGNkNDg1ZS05YWZmMzUzNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.033524Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734087. Ctx: { TraceId: 01jz02dek83xtmbm848b2rgrxf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTA3NjBmOTItYjBiMjZkNGQtZTM4MTAxZjAtOWUzNmNkNTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.033941Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734084. Ctx: { TraceId: 01jz02dek82cgjjqm78t8bezby, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjAwZDJkNGUtN2U0MTU0My1hMDcwNWRhZi1lNWY2OTVkMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.033956Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734085. Ctx: { TraceId: 01jz02dek8724hpt548w05j1m7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGY2ZjJhMmYtNzUyN2U3ZS1lZjUwNDdiYi1lNzM2ODc5Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.034040Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734091. Ctx: { TraceId: 01jz02dek8393jxqf4345bdaz2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDUxMWRlNTEtZWM3N2IwODQtZTQ4MzdmODItYjM0NGIzZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.034180Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734086. Ctx: { TraceId: 01jz02dek8fn2teeknf1bf0y32, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGE1NGIzMjMtNGRkMDc4NWQtMzFmYTE3ZTMtZmVjZmU1MjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.034490Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734089. Ctx: { TraceId: 01jz02dek8827k4js3sz0dtqyj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWY0MTczZTktMWI4OTBiYmItNjhjOWRlODktZjYzZmNiMjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.034644Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734090. Ctx: { TraceId: 01jz02dek8692zwwjgdzb0rtx0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWI1ZTU0ZjUtNGQ5ZmUwM2MtYWEwNzVlZDgtYjIzOTE2YWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.034838Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734092. Ctx: { TraceId: 01jz02dek87sa68jvgmmjgke8d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2FlOTI3NDMtYWM3Mzc0YTUtMjc5YzFkYmEtYTg3MDdjOTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.034939Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734093. Ctx: { TraceId: 01jz02dek8933h5mpfamvz10g7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTczMDFiY2YtMzI4NTdjMjItYzlhNTJlZjktODQyMDcyNzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.038670Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734095. Ctx: { TraceId: 01jz02deke9pd4njaqtsdgkc7m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTA3NjBmOTItYjBiMjZkNGQtZTM4MTAxZjAtOWUzNmNkNTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.038677Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734094. Ctx: { TraceId: 01jz02deke1b17r6n6e5yac9se, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzhiMjkxYWMtOWRiYmY3Ny1lZGNkNDg1ZS05YWZmMzUzNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-30T09:24:13.039574Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734096. Ctx: { TraceId: 01jz02deked2xqp6rs6ndvar4g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGE1NGIzMjMtNGRkMDc4NWQtMzFmYTE3ZTMtZmVjZmU1MjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.041481Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734097. Ctx: { TraceId: 01jz02deke5gp56kb2561sd7vn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWI1ZTU0ZjUtNGQ5ZmUwM2MtYWEwNzVlZDgtYjIzOTE2YWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.041495Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734104. Ctx: { TraceId: 01jz02dekg4ax667rsh4q1kwjb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTA3NjBmOTItYjBiMjZkNGQtZTM4MTAxZjAtOWUzNmNkNTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.041621Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734101. Ctx: { TraceId: 01jz02deke1q97c129ct9gszd5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2FlOTI3NDMtYWM3Mzc0YTUtMjc5YzFkYmEtYTg3MDdjOTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.041725Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734098. Ctx: { TraceId: 01jz02dekeczy3j49g0gqpe9kt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTczMDFiY2YtMzI4NTdjMjItYzlhNTJlZjktODQyMDcyNzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.041833Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734105. Ctx: { TraceId: 01jz02dekgdem94n6kwyx56d8a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzhiMjkxYWMtOWRiYmY3Ny1lZGNkNDg1ZS05YWZmMzUzNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.041918Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734099. Ctx: { TraceId: 01jz02deke9hnbb396t6yyat9h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGY2ZjJhMmYtNzUyN2U3ZS1lZjUwNDdiYi1lNzM2ODc5Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.041995Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734102. Ctx: { TraceId: 01jz02deke0b5y00ahzt4ckbrx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWY0MTczZTktMWI4OTBiYmItNjhjOWRlODktZjYzZmNiMjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: 2025-06-30T09:24:13.042076Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734103. Ctx: { TraceId: 01jz02dekf9tydfhr6n27e9zmt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjAwZDJkNGUtN2U0MTU0My1hMDcwNWRhZi1lNWY2OTVkMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.042234Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734106. Ctx: { TraceId: 01jz02dekh5g9xnd61r7jh6sef, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGE1NGIzMjMtNGRkMDc4NWQtMzFmYTE3ZTMtZmVjZmU1MjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:13.042287Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734100. Ctx: { TraceId: 01jz02deke6c821vpb90p3c0kw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDUxMWRlNTEtZWM3N2IwODQtZTQ4MzdmODItYjM0NGIzZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275430429 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275430429 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards |83.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TSubscriberTest::SyncPartial |83.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |83.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.3%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_column_build/test-results/unittest/{meta.json ... results_accumulator.log} >> TSubscriberSyncQuorumTest::TwoRingGroups |83.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> TSubscriberTest::NotifyUpdate >> TSubscriberTest::SyncPartial [GOOD] >> TSubscriberTest::SyncWithOutdatedReplica >> TSubscriberSyncQuorumTest::TwoRingGroups [GOOD] >> TSubscriberTest::Boot |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> TSubscriberTest::NotifyUpdate [GOOD] >> TSubscriberTest::ReconnectOnFailure |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> TSubscriberTest::SyncWithOutdatedReplica [GOOD] >> TSubscriberSinglePathUpdateTest::OneRingGroup >> TSubscriberTest::NotifyDelete |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> TSubscriberCombinationsTest::MigratedPathRecreation >> TSubscriberTest::Boot [GOOD] >> TSubscriberTest::StrongNotificationAfterCommit >> TSubscriberSinglePathUpdateTest::TwoRingGroups >> TSubscriberCombinationsTest::CombinationsRootDomain |83.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |83.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |83.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::Boot [GOOD] Test command err: ... waiting for initial path lookups 2025-06-30T09:24:16.381879Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:28:2075][TestPath] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:16.382514Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:2:2049] 2025-06-30T09:24:16.382550Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:5:2052] 2025-06-30T09:24:16.382559Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:37:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:8:2055] 2025-06-30T09:24:16.382568Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:11:2058] 2025-06-30T09:24:16.382576Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:14:2061] 2025-06-30T09:24:16.382585Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:17:2064] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR Poisoning replica: [1:2199047594611:0] Poisoning replica: [1:5497582477939:0] 2025-06-30T09:24:16.382723Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][1:28:2075][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:27:2074], cookie# 12345 2025-06-30T09:24:16.382777Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:29:2075] 2025-06-30T09:24:16.382820Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:30:2075] 2025-06-30T09:24:16.382830Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:31:2075] 2025-06-30T09:24:16.382842Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:32:2075] 2025-06-30T09:24:16.382851Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:33:2075] 2025-06-30T09:24:16.382870Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:28:2075][TestPath] Set up state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.382882Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:34:2075] 2025-06-30T09:24:16.382892Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:28:2075][TestPath] Ignore empty state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.382919Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:29:2075], cookie# 12345 2025-06-30T09:24:16.382932Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:30:2075], cookie# 12345 2025-06-30T09:24:16.382942Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:37:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:31:2075], cookie# 12345 2025-06-30T09:24:16.382952Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:32:2075], cookie# 12345 2025-06-30T09:24:16.382962Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:33:2075], cookie# 12345 2025-06-30T09:24:16.382971Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:40:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:34:2075], cookie# 12345 2025-06-30T09:24:16.382981Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:2:2049], cookie# 12345 2025-06-30T09:24:16.382988Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:5:2052], cookie# 12345 2025-06-30T09:24:16.382998Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:11:2058], cookie# 12345 2025-06-30T09:24:16.383006Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:14:2061], cookie# 12345 2025-06-30T09:24:16.383026Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:29:2075], cookie# 12345 2025-06-30T09:24:16.383037Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:24:16.383046Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:30:2075], cookie# 12345 2025-06-30T09:24:16.383052Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:24:16.383059Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:31:2075], cookie# 12345 2025-06-30T09:24:16.383066Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-30T09:24:16.383071Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 1, size# 3, half# 1, successes# 0, failures# 0 2025-06-30T09:24:16.383078Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:31:2075] 2025-06-30T09:24:16.383086Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:28:2075][TestPath] Ignore empty state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.383093Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:32:2075], cookie# 12345 2025-06-30T09:24:16.383098Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-30T09:24:16.383103Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 1, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:24:16.383110Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:33:2075], cookie# 12345 2025-06-30T09:24:16.383116Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-30T09:24:16.383121Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 1, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:24:16.383127Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:34:2075], cookie# 12345 2025-06-30T09:24:16.383133Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-30T09:24:16.383138Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 1, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-30T09:24:16.383148Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:34:2075] 2025-06-30T09:24:16.383157Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:28:2075][TestPath] Ignore empty state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Poisoning replica: [1:3298559222387:0] 2025-06-30T09:24:16.383182Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][1:28:2075][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:27:2074], cookie# 12346 2025-06-30T09:24:16.383207Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:29:2075], cookie# 12346 2025-06-30T09:24:16.383216Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:30:2075], cookie# 12346 2025-06-30T09:24:16.383225Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:31:2075], cookie# 12346 2025-06-30T09:24:16.383231Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-30T09:24:16.383238Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:32:2075], cookie# 12346 2025-06-30T09:24:16.383246Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:33:2075], cookie# 12346 2025-06-30T09:24:16.383254Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:34:2075], cookie# 12346 2025-06-30T09:24:16.383260Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-30T09:24:16.383267Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:2:2049], cookie# 12346 2025-06-30T09:24:16.383273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:5:2052], cookie# 12346 2025-06-30T09:24:16.383281Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:14:2061], cookie# 12346 2025-06-30T09:24:16.383293Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:29:2075], cookie# 12346 2025-06-30T09:24:16.383299Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 1, failures# 1 2025-06-30T09:24:16.383305Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:30:2075], cookie# 12346 2025-06-30T09:24:16.383311Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-30T09:24:16.383317Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 1, size# 3, half# 1, successes# 0, failures# 1 2025-06-30T09:24:16.383324Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:32:2075], cookie# 12346 2025-06-30T09:24:16.383330Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-30T09:24:16.383335Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 1, size# 3, half# 1, successes# 0, failures# 2 2025-06-30T09:24:16.383341Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:32:2075] 2025-06-30T09:24:16.383349Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:28:2075][TestPath] Ignore empty state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.383356Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:33:2075], cookie# 12346 2025-06-30T09:24:16.383361Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-30T09:24:16.383367Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1002: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 1, size# 3, half# 1, successes# 1, failures# 2, partial# 1 2025-06-30T09:24:16.383372Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][1:28:2075][TestPath] Sync is incomplete in one of the ring groups: cookie# 12346 2025-06-30T09:24:16.624616Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][2:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:16.624789Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:3:2050] 2025-06-30T09:24:16.624804Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:6:2053] 2025-06-30T09:24:16.624813Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:9:2056] 2025-06-30T09:24:16.624828Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:37:2066] 2025-06-30T09:24:16.624847Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:38:2066] 2025-06-30T09:24:16.624859Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][2:36:2066][path] Set up state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.624907Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:39:2066] 2025-06-30T09:24:16.624916Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } >> TSubscriberSinglePathUpdateTest::TwoRingGroups [GOOD] >> TSubscriberSyncQuorumTest::OneDisconnectedRingGroup >> TSubscriberTest::ReconnectOnFailure [GOOD] >> TExternalTableTestReboots::ParallelCreateDrop >> TSubscriberSinglePathUpdateTest::OneRingGroup [GOOD] >> TSubscriberSinglePathUpdateTest::OneWriteOnlyRingGroup ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::SyncWithOutdatedReplica [GOOD] Test command err: 2025-06-30T09:24:16.143458Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:16.143967Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-30T09:24:16.144003Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-30T09:24:16.144013Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-30T09:24:16.144026Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-30T09:24:16.144055Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-30T09:24:16.144067Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.144079Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-30T09:24:16.144087Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.144131Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][1:36:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 1 2025-06-30T09:24:16.144160Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:37:2066], cookie# 1 2025-06-30T09:24:16.144169Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2066], cookie# 1 2025-06-30T09:24:16.144194Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2066], cookie# 1 2025-06-30T09:24:16.144206Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:6:2053], cookie# 1 2025-06-30T09:24:16.144212Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:9:2056], cookie# 1 2025-06-30T09:24:16.144224Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:37:2066], cookie# 1 2025-06-30T09:24:16.144232Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:36:2066][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-30T09:24:16.144239Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-30T09:24:16.144246Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.144255Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:38:2066], cookie# 1 2025-06-30T09:24:16.144260Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:36:2066][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 1 2025-06-30T09:24:16.144266Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:39:2066], cookie# 1 2025-06-30T09:24:16.144273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:36:2066][path] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-30T09:24:16.144290Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][1:36:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 2 2025-06-30T09:24:16.144306Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:37:2066], cookie# 2 2025-06-30T09:24:16.144312Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:36:2066][path] Sync is in progress: cookie# 2, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-30T09:24:16.144318Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2066], cookie# 2 2025-06-30T09:24:16.144324Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2066], cookie# 2 2025-06-30T09:24:16.144335Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:9:2056], cookie# 2 2025-06-30T09:24:16.144343Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:38:2066], cookie# 2 2025-06-30T09:24:16.144348Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:36:2066][path] Sync is in progress: cookie# 2, ring group# 0, size# 3, half# 1, successes# 0, failures# 2 2025-06-30T09:24:16.144353Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-30T09:24:16.144360Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.144366Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:39:2066], cookie# 2 2025-06-30T09:24:16.144371Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1002: [main][1:36:2066][path] Sync is done in the ring group: cookie# 2, ring group# 0, size# 3, half# 1, successes# 1, failures# 2, partial# 1 2025-06-30T09:24:16.144376Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][1:36:2066][path] Sync is incomplete in one of the ring groups: cookie# 2 2025-06-30T09:24:16.144390Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][1:36:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 3 2025-06-30T09:24:16.144406Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:37:2066], cookie# 3 2025-06-30T09:24:16.144411Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:36:2066][path] Sync is in progress: cookie# 3, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-30T09:24:16.144417Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:38:2066], cookie# 3 2025-06-30T09:24:16.144422Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:36:2066][path] Sync is in progress: cookie# 3, ring group# 0, size# 3, half# 1, successes# 0, failures# 2 2025-06-30T09:24:16.144428Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2066], cookie# 3 2025-06-30T09:24:16.144439Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:39:2066], cookie# 3 2025-06-30T09:24:16.144445Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1002: [main][1:36:2066][path] Sync is done in the ring group: cookie# 3, ring group# 0, size# 3, half# 1, successes# 0, failures# 3, partial# 1 2025-06-30T09:24:16.144449Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][1:36:2066][path] Sync is incomplete in one of the ring groups: cookie# 3 2025-06-30T09:24:16.144455Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-30T09:24:16.144462Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.552515Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][3:37:2067][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:16.552639Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 2] Version: 2 }: sender# [3:3:2050] 2025-06-30T09:24:16.552652Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [3:6:2053] 2025-06-30T09:24:16.552659Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [3:9:2056] 2025-06-30T09:24:16.552670Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 2] Version: 2 }: sender# [3:38:2067] 2025-06-30T09:24:16.552680Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [3:39:2067] 2025-06-30T09:24:16.552693Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][3:37:2067][path] Set up state: owner# [3:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.552730Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [3:40:2067] 2025-06-30T09:24:16.552741Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][3:37:2067][path] Path was already updated: owner# [3:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.552759Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][3:37:2067][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:35:2065], cookie# 1 2025-06-30T09:24:16.552777Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:38:2067], cookie# 1 2025-06-30T09:24:16.552786Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:39:2067], cookie# 1 2025-06-30T09:24:16.552792Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:40:2067], cookie# 1 2025-06-30T09:24:16.552801Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [3:3:2050], cookie# 1 2025-06-30T09:24:16.552807Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [3:6:2053], cookie# 1 2025-06-30T09:24:16.552812Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [3:9:2056], cookie# 1 2025-06-30T09:24:16.552820Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [3:38:2067], cookie# 1 2025-06-30T09:24:16.552833Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][3:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:24:16.552839Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [3:39:2067], cookie# 1 2025-06-30T09:24:16.552843Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][3:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:24:16.552848Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [3:40:2067], cookie# 1 2025-06-30T09:24:16.552854Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][3:37:2067][path] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 >> TSubscriberTest::NotifyDelete [GOOD] >> TSubscriberCombinationsTest::MigratedPathRecreation [GOOD] >> TSubscriberTest::InvalidNotification >> TSubscriberSinglePathUpdateTest::OneDisconnectedRingGroup >> TSubscriberTest::StrongNotificationAfterCommit [GOOD] >> TSubscriberTest::Sync |83.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |83.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |83.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost >> TSubscriberSyncQuorumTest::OneDisconnectedRingGroup [GOOD] |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::ReconnectOnFailure [GOOD] Test command err: 2025-06-30T09:24:16.558060Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:16.558530Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-30T09:24:16.558554Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-30T09:24:16.558562Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-30T09:24:16.558573Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-30T09:24:16.558597Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-30T09:24:16.558608Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.558619Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-30T09:24:16.558628Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.559223Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-06-30T09:24:16.559245Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:37:2066] 2025-06-30T09:24:16.559257Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:36:2066][path] Update to strong state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.970324Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][4:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:16.970557Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][4:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:3:2050] 2025-06-30T09:24:16.970571Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][4:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:6:2053] 2025-06-30T09:24:16.970582Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][4:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:9:2056] 2025-06-30T09:24:16.970612Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:37:2066] 2025-06-30T09:24:16.970632Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:38:2066] 2025-06-30T09:24:16.970643Z node 4 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][4:36:2066][path] Set up state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.970660Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:39:2066] 2025-06-30T09:24:16.970670Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.970803Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:37:2066] 2025-06-30T09:24:16.970815Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.970824Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:38:2066] 2025-06-30T09:24:16.970830Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.970855Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:39:2066] 2025-06-30T09:24:16.970861Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.981303Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][4:47:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:3:2050] 2025-06-30T09:24:16.981346Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:37:2066] 2025-06-30T09:24:16.981374Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.981401Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][4:48:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:6:2053] 2025-06-30T09:24:16.981415Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:38:2066] 2025-06-30T09:24:16.981419Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.981424Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][4:49:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:9:2056] 2025-06-30T09:24:16.981433Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:39:2066] 2025-06-30T09:24:16.981437Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:16.981545Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][4:47:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:3:2050] 2025-06-30T09:24:16.981557Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [4:37:2066] 2025-06-30T09:24:16.981567Z node 4 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][4:36:2066][path] Update to strong state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } >> TSubscriberTest::InvalidNotification [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberSyncQuorumTest::OneDisconnectedRingGroup [GOOD] Test command err: ... waiting for initial path lookups 2025-06-30T09:24:17.164468Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:28:2075][TestPath] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:17.165015Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:2:2049] 2025-06-30T09:24:17.165046Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:5:2052] 2025-06-30T09:24:17.165056Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:37:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:8:2055] 2025-06-30T09:24:17.165064Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:11:2058] 2025-06-30T09:24:17.165072Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:14:2061] 2025-06-30T09:24:17.165079Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:17:2064] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR 2025-06-30T09:24:17.165190Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:29:2075] 2025-06-30T09:24:17.165236Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:30:2075] 2025-06-30T09:24:17.165245Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:31:2075] 2025-06-30T09:24:17.165252Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:32:2075] 2025-06-30T09:24:17.165263Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:33:2075] 2025-06-30T09:24:17.165275Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:28:2075][TestPath] Set up state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.165284Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:34:2075] 2025-06-30T09:24:17.165293Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:28:2075][TestPath] Ignore empty state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:24339059:0] 2025-06-30T09:24:17.165388Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:2:2049] 2025-06-30T09:24:17.165402Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:29:2075] 2025-06-30T09:24:17.165412Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:28:2075][TestPath] Update to strong state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:1099535966835:0] 2025-06-30T09:24:17.165476Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [1:5:2052] 2025-06-30T09:24:17.165488Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [1:30:2075] 2025-06-30T09:24:17.165498Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:28:2075][TestPath] Path was updated to new version: owner# [1:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:2199047594611:0] 2025-06-30T09:24:17.165539Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:37:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [1:8:2055] 2025-06-30T09:24:17.165555Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [1:31:2075] 2025-06-30T09:24:17.165563Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:28:2075][TestPath] Path was updated to new version: owner# [1:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 3) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:3298559222387:0] 2025-06-30T09:24:17.165601Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 4 }: sender# [1:11:2058] 2025-06-30T09:24:17.165612Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 4 }: sender# [1:32:2075] 2025-06-30T09:24:17.165620Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:28:2075][TestPath] Path was updated to new version: owner# [1:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 3) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 4) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:4398070850163:0] 2025-06-30T09:24:17.165655Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 5 }: sender# [1:14:2061] 2025-06-30T09:24:17.165666Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 5 }: sender# [1:33:2075] 2025-06-30T09:24:17.165695Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:28:2075][TestPath] Path was updated to new version: owner# [1:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 4) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 5) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:5497582477939:0] 2025-06-30T09:24:17.165732Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 6 }: sender# [1:17:2064] 2025-06-30T09:24:17.165742Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 6 }: sender# [1:34:2075] 2025-06-30T09:24:17.165750Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:28:2075][TestPath] Path was updated to new version: owner# [1:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 5) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 6) DomainId: AbandonedSchemeShards: there are 0 elements } ... waiting for initial path lookups 2025-06-30T09:24:17.379407Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][2:28:2075][TestPath] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:17.379533Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:2:2049] 2025-06-30T09:24:17.379558Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:5:2052] 2025-06-30T09:24:17.379565Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:8:2055] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR Poisoning replica: [2:2199047594611:0] Poisoning replica: [2:3298559222387:0] Poisoning replica: [2:4398070850163:0] Poisoning replica: [2:5497582477939:0] 2025-06-30T09:24:17.379651Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][2:28:2075][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [2:27:2074], cookie# 12345 2025-06-30T09:24:17.379675Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:29:2075] 2025-06-30T09:24:17.379704Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:30:2075] 2025-06-30T09:24:17.379717Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][2:28:2075][TestPath] Set up state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.379726Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:31:2075] 2025-06-30T09:24:17.379734Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][2:28:2075][TestPath] Ignore empty state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.379751Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [2:29:2075], cookie# 12345 2025-06-30T09:24:17.379761Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [2:30:2075], cookie# 12345 2025-06-30T09:24:17.379768Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [2:31:2075], cookie# 12345 2025-06-30T09:24:17.379782Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [2:2:2049], cookie# 12345 2025-06-30T09:24:17.379788Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [2:5:2052], cookie# 12345 2025-06-30T09:24:17.379801Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [2:29:2075], cookie# 12345 2025-06-30T09:24:17.379809Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][2:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:24:17.379815Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [2:30:2075], cookie# 12345 2025-06-30T09:24:17.379819Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][2:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:24:17.379824Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [2:31:2075], cookie# 12345 2025-06-30T09:24:17.379830Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][2:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-30T09:24:17.379837Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:31:2075] 2025-06-30T09:24:17.379842Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][2:28:2075][TestPath] Ignore empty state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Poisoning replica: [2:1099535966835:0] 2025-06-30T09:24:17.379863Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][2:28:2075][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [2:27:2074], cookie# 12346 2025-06-30T09:24:17.379877Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [2:29:2075], cookie# 12346 2025-06-30T09:24:17.379885Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [2:30:2075], cookie# 12346 2025-06-30T09:24:17.379891Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [2:31:2075], cookie# 12346 2025-06-30T09:24:17.379896Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][2:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-30T09:24:17.379900Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [2:2:2049], cookie# 12346 2025-06-30T09:24:17.379908Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [2:29:2075], cookie# 12346 2025-06-30T09:24:17.379914Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][2:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 1, failures# 1 2025-06-30T09:24:17.379918Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [2:30:2075], cookie# 12346 2025-06-30T09:24:17.379923Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1002: [main][2:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 1, failures# 2, partial# 1 2025-06-30T09:24:17.379927Z node 2 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][2:28:2075][TestPath] Sync is incomplete in one of the ring groups: cookie# 12346 2025-06-30T09:24:17.379933Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:30:2075] 2025-06-30T09:24:17.379939Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][2:28:2075][TestPath] Ignore empty state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } >> TSubscriberTest::Sync [GOOD] >> TBackupTests::BackupUuidColumn[Zstd] |83.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |83.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |83.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector >> TExternalTableTestReboots::SimpleDropExternalTableWithReboots2 |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004ca7/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk12/testing_out_stuff/test_auditlog.py.test_dml_requests_arent_logged_when_anonymous/audit.txt ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::Sync [GOOD] Test command err: 2025-06-30T09:24:17.076659Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:17.076973Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-30T09:24:17.076991Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-30T09:24:17.076996Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-30T09:24:17.077004Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-30T09:24:17.077022Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-30T09:24:17.077029Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.077037Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-30T09:24:17.077044Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.077100Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-30T09:24:17.077110Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-30T09:24:17.077117Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:36:2066][path] Update to strong state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.077135Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-30T09:24:17.077145Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-30T09:24:17.077149Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.488557Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][3:37:2067][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:17.488683Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:3:2050] 2025-06-30T09:24:17.488698Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:6:2053] 2025-06-30T09:24:17.488706Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:9:2056] 2025-06-30T09:24:17.488717Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:38:2067] 2025-06-30T09:24:17.488729Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:39:2067] 2025-06-30T09:24:17.488741Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][3:37:2067][path] Set up state: owner# [3:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.488774Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:40:2067] 2025-06-30T09:24:17.488785Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][3:37:2067][path] Path was already updated: owner# [3:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.488800Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][3:37:2067][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:35:2065], cookie# 1 2025-06-30T09:24:17.488817Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:38:2067], cookie# 1 2025-06-30T09:24:17.488826Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:39:2067], cookie# 1 2025-06-30T09:24:17.488833Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:40:2067], cookie# 1 2025-06-30T09:24:17.488843Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [3:3:2050], cookie# 1 2025-06-30T09:24:17.488849Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [3:6:2053], cookie# 1 2025-06-30T09:24:17.488854Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [3:9:2056], cookie# 1 2025-06-30T09:24:17.488863Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [3:38:2067], cookie# 1 2025-06-30T09:24:17.488874Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][3:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:24:17.488880Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [3:39:2067], cookie# 1 2025-06-30T09:24:17.488885Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][3:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:24:17.488890Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [3:40:2067], cookie# 1 2025-06-30T09:24:17.488896Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][3:37:2067][path] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::InvalidNotification [GOOD] Test command err: 2025-06-30T09:24:17.085099Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:37:2067][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:17.085493Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-06-30T09:24:17.085519Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:6:2053] 2025-06-30T09:24:17.085529Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:9:2056] 2025-06-30T09:24:17.085541Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:38:2067] 2025-06-30T09:24:17.085551Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:39:2067] 2025-06-30T09:24:17.085563Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:37:2067][path] Set up state: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.085594Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:40:2067] 2025-06-30T09:24:17.085604Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.085659Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:3:2050] 2025-06-30T09:24:17.085668Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:6:2053] 2025-06-30T09:24:17.085729Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:38:2067] 2025-06-30T09:24:17.085738Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:37:2067][path] Path was updated to new version: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.085750Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:39:2067] 2025-06-30T09:24:17.085758Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.496628Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][3:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:17.496764Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:3:2050] 2025-06-30T09:24:17.496777Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:6:2053] 2025-06-30T09:24:17.496786Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:9:2056] 2025-06-30T09:24:17.496799Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:37:2066] 2025-06-30T09:24:17.496815Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:38:2066] 2025-06-30T09:24:17.496825Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][3:36:2066][path] Set up state: owner# [3:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.496836Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:39:2066] 2025-06-30T09:24:17.496846Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][3:36:2066][path] Ignore empty state: owner# [3:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.496875Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { PathId: [OwnerId: 1, LocalPathId: 1] Version: 0 }: sender# [3:35:2065] 2025-06-30T09:24:17.496882Z node 3 :SCHEME_BOARD_SUBSCRIBER ERROR: subscriber.cpp:837: [main][3:36:2066][path] Suspicious NKikimrSchemeBoard.TEvNotify { PathId: [OwnerId: 1, LocalPathId: 1] Version: 0 }: sender# [3:35:2065] |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |83.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |83.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TBackupTests::BackupUuidColumn[Zstd] [GOOD] |83.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |83.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::SimpleDropExternalTableWithReboots >> TestDataErasure::Run3CyclesForTables >> TestDataErasure::DataErasureWithSplit >> TestDataErasure::DataErasureManualLaunch >> TestDataErasure::SchemeShardCounterDoesNotConsistWithBscCounter >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient [GOOD] >> TestDataErasure::ManualLaunch3Cycles >> YdbSdkSessionsPool::PeriodicTask/0 [GOOD] >> YdbSdkSessionsPool::PeriodicTask/1 >> TestDataErasure::SimpleTestForTables >> TestDataErasure::Run3CyclesForAllSupportedObjects ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::BackupUuidColumn[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:18.078595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:18.078626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:18.078633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:18.078639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:18.078646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:18.078650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:18.078660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:18.078674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:18.078827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:18.078899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:18.094214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:18.094244Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:18.097977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:18.098061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:18.098108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:18.101209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:18.101308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:18.101444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:18.101553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:18.102414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:18.102480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:18.102829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:18.102846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:18.102877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:18.102887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:18.102895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:18.102919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.104551Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:18.121807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:18.121877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.121938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:18.121944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:18.121989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:18.122002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:18.122803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:18.122840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:18.122899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.122915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:18.122920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:18.122924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:18.123322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.123334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:18.123342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:18.123642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.123648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.123652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:18.123674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:18.124189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:18.124574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:18.124608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:18.124787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:18.124808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:18.124815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:18.124871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:18.124877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:18.124905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:18.124915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:18.125348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:18.125358Z node 1 :FLAT_TX_SCHEMESHARD ... shard: 72057594046678944 2025-06-30T09:24:18.235590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-30T09:24:18.235613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 128 -> 129 2025-06-30T09:24:18.235643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:24:18.238706Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:421:2390], attempt# 0 2025-06-30T09:24:18.241656Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:421:2390], sender# [1:420:2387] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-30T09:24:18.242810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:18.242826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:24:18.242916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:18.242923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-30T09:24:18.242942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.242951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:18.243230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:24:18.243241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:24:18.243246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 REQUEST: 2025-06-30T09:24:18.243252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 PUT /metadata.json HTTP/1.1 HEADERS: 2025-06-30T09:24:18.243275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Host: localhost:27617 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: AD418D2D-B423-4D56-84DC-4E18825CE56D amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: 2025-06-30T09:24:18.243307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:24:18.243663Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:421:2390], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:27617 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 04F023C3-555D-4A4D-9BDD-04B997EE91E2 amz-sdk-request: attempt=1 content-length: 357 content-md5: IxJB3qM/y2xlsv8qcwTF7g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-30T09:24:18.245063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:24:18.245332Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:421:2390], result# PutObjectResult { ETag: 231241dea33fcb6c65b2ff2a7304c5ee } 2025-06-30T09:24:18.245353Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:420:2387] 2025-06-30T09:24:18.245424Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:421:2390], sender# [1:420:2387], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:27617 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 1AE369B7-CDD7-49EE-B81C-8D84A349650B amz-sdk-request: attempt=1 content-length: 40 content-md5: LXbLDYru8NmFsYXNSXjnpQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 40 2025-06-30T09:24:18.246246Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:421:2390], result# PutObjectResult { ETag: 2d76cb0d8aeef0d985b185cd4978e7a5 } 2025-06-30T09:24:18.246262Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:421:2390], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-30T09:24:18.246298Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:420:2387], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-30T09:24:18.258834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-30T09:24:18.258871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:24:18.258912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-30T09:24:18.258935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-30T09:24:18.258955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:18.258964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.258973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:24:18.258987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 129 -> 240 2025-06-30T09:24:18.259057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:18.259775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.259881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.259893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:24:18.259909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:24:18.259914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:24:18.259920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:24:18.259923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:24:18.259928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:24:18.259945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:342:2319] message: TxId: 102 2025-06-30T09:24:18.259964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:24:18.259974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:24:18.259978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:24:18.260012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:24:18.260573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:24:18.260587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:404:2374] TestWaitNotification: OK eventTxId 102 >> TestDataErasure::Run3CyclesForTopics >> TestDataErasure::SimpleTestForTopic >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable [GOOD] >> KqpStreamLookup::ReadTableWithIndexDuringSplit >> KqpStreamLookup::ReadTableDuringSplit >> KqpCost::IndexLookupAtLeast8BytesInStorage-useSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:23:09.635559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:23:09.635608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:23:09.635615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:23:09.635621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:23:09.635639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:23:09.635644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:23:09.635655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:23:09.635678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:23:09.635835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:23:09.635920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:23:09.651774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:23:09.651810Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:09.656055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:23:09.656130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:23:09.656171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:23:09.657952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:23:09.658052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:23:09.658210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:09.658308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:23:09.659057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:09.659107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:23:09.659429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:23:09.659443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:09.659476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:23:09.659486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:23:09.659493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:23:09.659515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:23:09.661137Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:23:09.685174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:23:09.685298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:09.685387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:23:09.685400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:23:09.685486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:23:09.685505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:09.688478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:09.688557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:23:09.688652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:09.688671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:23:09.688679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:23:09.688687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:23:09.696230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:09.696271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:23:09.696287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:23:09.700526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:09.700560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:09.700571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:23:09.700585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:23:09.701556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:23:09.703043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:23:09.703111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:23:09.703406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:09.703463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:23:09.703475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:23:09.703566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:23:09.703578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:23:09.703623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:23:09.703640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:23:09.706549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:23:09.706572Z node 1 :FLAT_TX_SCHEMESHARD ... 9:24:19.319107Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5020: StateWork, processing event TEvPrivate::TEvRunConditionalErase 2025-06-30T09:24:19.319114Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6723: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-30T09:24:19.319153Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-30T09:24:19.319182Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-30T09:24:19.436153Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:776:2660]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-30T09:24:19.436203Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-30T09:24:19.436249Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409552 outdated step 200 last cleanup 0 2025-06-30T09:24:19.436308Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409552 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:19.436319Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409552 2025-06-30T09:24:19.436327Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409552 has no attached operations 2025-06-30T09:24:19.436333Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409552 2025-06-30T09:24:19.436389Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:776:2660]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-30T09:24:19.436436Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409552, FollowerId 0, tableId 2 2025-06-30T09:24:19.436571Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269553162, Sender [3:776:2660], Recipient [3:906:2762]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409552 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 13 Memory: 119208 } ShardState: 2 UserTablePartOwners: 72075186233409552 NodeId: 3 StartTime: 122 TableOwnerId: 72075186233409549 FollowerId: 0 2025-06-30T09:24:19.436585Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4997: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-30T09:24:19.436606Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409552 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0013 2025-06-30T09:24:19.436629Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409552 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-30T09:24:19.436639Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-30T09:24:19.446890Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:780:2663]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-30T09:24:19.446932Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-30T09:24:19.446970Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409553 outdated step 200 last cleanup 0 2025-06-30T09:24:19.447002Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409553 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:19.447013Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409553 2025-06-30T09:24:19.447020Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409553 has no attached operations 2025-06-30T09:24:19.447025Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409553 2025-06-30T09:24:19.447069Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:780:2663]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-30T09:24:19.447116Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409553, FollowerId 0, tableId 2 2025-06-30T09:24:19.447234Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269553162, Sender [3:780:2663], Recipient [3:906:2762]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409553 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 12 Memory: 119208 } ShardState: 2 UserTablePartOwners: 72075186233409553 NodeId: 3 StartTime: 122 TableOwnerId: 72075186233409549 FollowerId: 0 2025-06-30T09:24:19.447242Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4997: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-30T09:24:19.447257Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409553 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0012 2025-06-30T09:24:19.447271Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409553 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-30T09:24:19.461847Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:906:2762]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:19.461881Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:19.461901Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [3:906:2762], Recipient [3:906:2762]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:19.461908Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:19.475038Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435096, Sender [0:0:0], Recipient [3:906:2762]: NKikimr::NSchemeShard::TEvPrivate::TEvSendBaseStatsToSA 2025-06-30T09:24:19.475072Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5147: StateWork, processing event TEvPrivate::TEvSendBaseStatsToSA 2025-06-30T09:24:19.475183Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435076, Sender [0:0:0], Recipient [3:906:2762]: NKikimr::NSchemeShard::TEvPrivate::TEvRunConditionalErase 2025-06-30T09:24:19.475190Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5020: StateWork, processing event TEvPrivate::TEvRunConditionalErase 2025-06-30T09:24:19.475196Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6723: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-30T09:24:19.475226Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-30T09:24:19.475248Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-30T09:24:19.475274Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269746180, Sender [3:2013:3829], Recipient [3:906:2762]: NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult 2025-06-30T09:24:19.475280Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5146: StateWork, processing event TEvTxProxySchemeCache::TEvNavigateKeySetResult 2025-06-30T09:24:19.499170Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:2016:3832], Recipient [3:776:2660]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:19.499199Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:19.499211Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409552, clientId# [3:2015:3831], serverId# [3:2016:3832], sessionId# [0:0:0] 2025-06-30T09:24:19.499294Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553213, Sender [3:2014:3830], Recipient [3:776:2660]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72075186233409549 LocalId: 2 } 2025-06-30T09:24:19.499408Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:2019:3835], Recipient [3:780:2663]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:19.499412Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:19.499418Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409553, clientId# [3:2018:3834], serverId# [3:2019:3835], sessionId# [0:0:0] 2025-06-30T09:24:19.499436Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553213, Sender [3:2017:3833], Recipient [3:780:2663]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72075186233409549 LocalId: 2 } >> KqpCost::IndexLookup-useSink >> TestDataErasure::SimpleTestForAllSupportedObjects |83.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004c8e/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk7/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_root-_good_dynconfig/audit.txt 2025-06-30T09:24:11.471786Z: {"sanitized_token":"**** (B6C6F477)","subject":"root@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} |83.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004c86/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk13/testing_out_stuff/test_auditlog.py.test_dml_requests_arent_logged_when_sid_is_expected/audit.txt |83.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient [GOOD] |83.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookup-useSink [GOOD] >> KqpCost::IndexLookupAtLeast8BytesInStorage-useSink [GOOD] >> AsyncIndexChangeCollector::UpsertToSameKey |83.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore_reboots/schemeshard-ut_incremental_restore_reboots |83.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore_reboots/schemeshard-ut_incremental_restore_reboots |83.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore_reboots/schemeshard-ut_incremental_restore_reboots ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004c6f/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk5/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_other-_good_dynconfig/audit.txt 2025-06-30T09:24:12.402003Z: {"sanitized_token":"othe****ltin (27F910A9)","subject":"other-user@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> TestDataErasure::DataErasureManualLaunch [GOOD] >> TestDataErasure::DataErasureWithCopyTable >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndRemoteClusterEnabledDelaySec_SessionDiesOnlyAfterDelay [GOOD] >> TPersQueueTest::PreferredCluster_RemotePreferredClusterEnabledWhileSessionInitializing_SessionDiesOnlyAfterInitializationAndDelay ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAtLeast8BytesInStorage-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 16796, MsgBus: 25664 2025-06-30T09:24:20.340925Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670829457116130:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:20.341194Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0035ee/r3tmp/tmpg62MTt/pdisk_1.dat 2025-06-30T09:24:20.398638Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670829457116105:2080] 1751275460340730 != 1751275460340733 2025-06-30T09:24:20.400446Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16796, node 1 2025-06-30T09:24:20.420122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:20.420135Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:20.420137Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:20.420179Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25664 TClient is connected to server localhost:25664 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:24:20.482273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:20.482303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:20.483414Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:20.507471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:20.522340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:20.546405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:20.576046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:20.589821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:20.735031Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670829457117699:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:20.735064Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:20.772372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.828323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.838881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.853612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.867734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.880976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.894859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.911337Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670829457118353:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:20.911372Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:20.911395Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670829457118358:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:20.912173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:20.914170Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670829457118360:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:20.995158Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670829457118411:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:21.202157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:21.343032Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 |83.4%| [TA] $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookup-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 25064, MsgBus: 15328 2025-06-30T09:24:20.460210Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670828188060784:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:20.460244Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0035eb/r3tmp/tmpCappaC/pdisk_1.dat 2025-06-30T09:24:20.507748Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:20.510266Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670828188060763:2080] 1751275460460097 != 1751275460460100 TServer::EnableGrpc on GrpcPort 25064, node 1 2025-06-30T09:24:20.524183Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:20.524196Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:20.524198Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:20.524240Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15328 TClient is connected to server localhost:15328 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:24:20.587610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:20.587650Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:20.588649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:20.593998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:20.605575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:20.628770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:24:20.657380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:20.672308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.831696Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670828188062366:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:20.831720Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:20.875821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.883492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.938516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.950592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.958180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.971718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.985193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:21.001930Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670832483030315:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:21.001957Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:21.001958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670832483030320:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:21.002675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:21.005083Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670832483030322:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:21.061885Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670832483030373:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:21.226269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004c64/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk0/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_bad_auth-_bad_dynconfig/audit.txt 2025-06-30T09:24:12.721636Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"**** (C877DF61)","remote_address":"127.0.0.1","status":"ERROR","subject":"__bad__@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} |83.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |83.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr >> AsyncIndexChangeCollector::DeleteNothing >> TestDataErasure::SchemeShardCounterDoesNotConsistWithBscCounter [GOOD] |83.4%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} |83.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr >> CdcStreamChangeCollector::InsertSingleRow |83.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |83.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt >> CdcStreamChangeCollector::UpsertManyRows |83.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt >> TestDataErasure::SimpleTestForTables [GOOD] >> TestDataErasure::SimpleTestForTopic [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SchemeShardCounterDoesNotConsistWithBscCounter [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:19.131632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:19.131660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.131666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:19.131672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:19.131686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:19.131690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:19.131700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.131715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:19.131861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:19.131945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:19.150414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:19.150434Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:19.152935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:19.152984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:19.153015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:19.154130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:19.154182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:19.154277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.154333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:19.154796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.154832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:19.155057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.155065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.155087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:19.155092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:19.155096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:19.155109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.156315Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:19.180244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:19.180335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.180408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:19.180418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:19.180468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:19.180482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:19.183155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.183222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:19.183292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.183305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:19.183312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:19.183319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:19.184426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.184464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:19.184475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:19.185168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.185184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.185192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.185201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:19.186059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:19.186799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:19.186855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:19.187104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.187141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:19.187150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.187237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:19.187248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.187285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:19.187300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:19.193526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.193545Z node 1 :FLAT_TX_SCHEMESHARD ... HEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:21.333482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:646: TTxCompleteDataErasureBSC Unknown generation#47, Expected gen# 1 at schemestard: 72057594046678944 2025-06-30T09:24:21.333494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 48 2025-06-30T09:24:21.333538Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:306:2284], Recipient [1:298:2278]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 48 Completed: false Progress10k: 0 2025-06-30T09:24:21.333542Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:21.333546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:21.333552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:21.333560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 0% 2025-06-30T09:24:21.333929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-30T09:24:21.333941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-30T09:24:21.333948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-30T09:24:21.786448Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:21.786482Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:21.786499Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:467:2416]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:21.786504Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:21.786515Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:845:2724]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:21.786519Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:21.786529Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:467:2416], Recipient [1:467:2416]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:21.786534Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:21.786549Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:845:2724], Recipient [1:845:2724]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:21.786553Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:21.786562Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:298:2278], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:21.786566Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:21.830894Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:21.830920Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:21.830926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 48 2025-06-30T09:24:21.831014Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:306:2284], Recipient [1:298:2278]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 48 Completed: false Progress10k: 5000 2025-06-30T09:24:21.831022Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:21.831028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:21.831048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:21.831058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-30T09:24:21.831071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-30T09:24:21.831078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-30T09:24:22.188056Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:467:2416]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.188088Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.188109Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:845:2724]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.188115Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.188127Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.188132Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.188145Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:467:2416], Recipient [1:467:2416]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.188150Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.188167Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:845:2724], Recipient [1:845:2724]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.188172Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.188183Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:298:2278], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.188187Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.228828Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:22.228861Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:22.228870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 48 2025-06-30T09:24:22.228988Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:306:2284], Recipient [1:298:2278]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 48 Completed: true Progress10k: 10000 2025-06-30T09:24:22.228996Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:22.229002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:22.229024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:22.229031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-30T09:24:22.229042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.933000s, Timestamp# 1970-01-01T00:00:05.113000Z 2025-06-30T09:24:22.229049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 48, duration# 2 s 2025-06-30T09:24:22.231214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-30T09:24:22.231375Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:1996:3661], Recipient [1:298:2278]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:22.231382Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:22.231387Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:24:22.231421Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:298:2278]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-30T09:24:22.231427Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5160: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-30T09:24:22.231432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7845: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> AsyncIndexChangeCollector::InsertSingleRow >> CdcStreamChangeCollector::UpsertIntoTwoStreams >> AsyncIndexChangeCollector::CoveredIndexUpdateCoveredColumn >> KqpStreamLookup::ReadTableDuringSplit [GOOD] >> test_auditlog.py::test_single_dml_query_logged[delete] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SimpleTestForTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:19.775975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:19.776007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.776013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:19.776018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:19.776031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:19.776036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:19.776046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.776061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:19.776191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:19.776272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:19.808615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:19.808649Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:19.819780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:19.819872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:19.819919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:19.821865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:19.821949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:19.822091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.822161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:19.822929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.822978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:19.823319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.823334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.823365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:19.823375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:19.823381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:19.823403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.832171Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:19.870837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:19.870913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.870976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:19.870985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:19.871036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:19.871049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:19.879148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.879198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:19.879248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.879257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:19.879261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:19.879265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:19.880404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.880434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:19.880446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:19.881029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.881049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.881057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.881065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:19.881774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:19.882235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:19.882272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:19.882465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.882489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:19.882497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.882566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:19.882576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.882608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:19.882621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:19.883056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.883065Z node 1 :FLAT_TX_SCHEMESHARD ... [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-30T09:24:21.825703Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877760, Sender [1:1974:3640], Recipient [1:295:2277]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037932033 Status: OK ServerId: [1:1975:3641] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-30T09:24:21.825709Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5055: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-30T09:24:21.825714Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5794: Handle TEvClientConnected, tabletId: 72057594037932033, status: OK, at schemeshard: 72057594046678944 2025-06-30T09:24:21.825735Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: false Progress10k: 0 2025-06-30T09:24:21.825741Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:21.825746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:21.825755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:21.825760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 0% 2025-06-30T09:24:21.825770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-30T09:24:21.825778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-30T09:24:22.240418Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.240465Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.240489Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:838:2717]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.240494Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.240508Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.240514Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.240527Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:295:2277], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.240534Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.240552Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:464:2414], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.240557Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.240570Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:838:2717], Recipient [1:838:2717]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.240575Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.282387Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:22.282428Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:22.282439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-30T09:24:22.282534Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: false Progress10k: 5000 2025-06-30T09:24:22.282543Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:22.282549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:22.282585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:22.282603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-30T09:24:22.282632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-30T09:24:22.282649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-30T09:24:22.628905Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.628939Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.628958Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:838:2717]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.628962Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.628974Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.628978Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.628989Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:464:2414], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.628994Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.629011Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:838:2717], Recipient [1:838:2717]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.629016Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.629026Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:295:2277], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.629030Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.669752Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:22.669790Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:22.669800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-30T09:24:22.669919Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-30T09:24:22.669926Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:22.669930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:22.669956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:22.669961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-30T09:24:22.669971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.936000s, Timestamp# 1970-01-01T00:00:05.109000Z 2025-06-30T09:24:22.669977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 2 s 2025-06-30T09:24:22.670869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-30T09:24:22.671052Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:1996:3662], Recipient [1:295:2277]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:22.671061Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:22.671068Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:24:22.671088Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:295:2277]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-30T09:24:22.671094Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5160: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-30T09:24:22.671100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7845: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> AsyncIndexChangeCollector::UpsertToSameKey [GOOD] >> AsyncIndexChangeCollector::UpsertWithoutIndexedValue ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SimpleTestForTopic [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:20.003706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:20.003732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:20.003739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:20.003745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:20.003764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:20.003769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:20.003780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:20.003801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:20.003937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:20.004021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:20.019290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:20.019310Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:20.022728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:20.022807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:20.022848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:20.025091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:20.025191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:20.025318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:20.025392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:20.026242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:20.026296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:20.026590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:20.026599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:20.026630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:20.026640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:20.026648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:20.026667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:20.028225Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:20.051274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:20.051346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:20.051416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:20.051426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:20.051479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:20.051497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:20.052286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:20.052334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:20.052386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:20.052398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:20.052405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:20.052413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:20.052913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:20.052928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:20.052939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:20.053340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:20.053352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:20.053360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:20.053370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:20.054247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:20.054779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:20.054822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:20.055054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:20.055089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:20.055100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:20.055193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:20.055203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:20.055244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:20.055260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:20.055800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:20.055812Z node 1 :FLAT_TX_SCHEMESHARD ... 657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-30T09:24:22.192617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-30T09:24:22.192628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-30T09:24:22.253943Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.253986Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.254025Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:464:2414], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.254031Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.254089Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [1:644:2558], Recipient [1:464:2414]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409546 2025-06-30T09:24:22.254094Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:24:22.254117Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:24:22.254172Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 2 took 41us result status StatusSuccess 2025-06-30T09:24:22.254326Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409550 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409549 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409550 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:24:22.305513Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:893:2765]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.305549Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.305588Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:893:2765], Recipient [1:893:2765]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.305594Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.305655Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [1:1076:2911], Recipient [1:893:2765]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409551 2025-06-30T09:24:22.305660Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:24:22.305695Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409551, at schemeshard: 72075186233409551 2025-06-30T09:24:22.305771Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409551 describe pathId 2 took 62us result status StatusSuccess 2025-06-30T09:24:22.305964Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409551 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409555 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409554 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409555 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409552 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409553 SchemeShard: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409551, at schemeshard: 72075186233409551 2025-06-30T09:24:22.639963Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.639996Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:22.640014Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:295:2277], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.640020Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:22.660360Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:22.660392Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:22.660401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-30T09:24:22.660475Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-30T09:24:22.660482Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:22.660487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:22.660510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:22.660516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-30T09:24:22.660529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.982000s, Timestamp# 1970-01-01T00:00:05.063000Z 2025-06-30T09:24:22.660535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 2 s 2025-06-30T09:24:22.661237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-30T09:24:22.661377Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:1358:3160], Recipient [1:295:2277]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:22.661396Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:22.661401Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:24:22.661431Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:295:2277]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-30T09:24:22.661436Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5160: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-30T09:24:22.661441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7845: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> KqpStreamLookup::ReadTableWithIndexDuringSplit [GOOD] >> KqpCost::IndexLookupJoin-StreamLookupJoin >> KqpCost::OlapWriteRow ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_kqp/unittest >> KqpStreamLookup::ReadTableDuringSplit [GOOD] Test command err: 2025-06-30T09:24:20.700014Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:24:20.700117Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:24:20.700136Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047d8/r3tmp/tmplijgPZ/pdisk_1.dat 2025-06-30T09:24:20.810022Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:24:20.810982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:20.827548Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:20.828432Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275460196047 != 1751275460196051 2025-06-30T09:24:20.870506Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:20.870549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:20.881125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:20.954678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:21.158799Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:698:2580], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:21.158838Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:709:2585], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:21.158852Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:21.159778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:21.202694Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:21.317957Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:712:2588], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:24:21.357185Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:783:2628] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:22.723423Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz02dph68fav5rccav3y6193, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTk4MDdkY2ItODNlZTNhZTYtM2ZlZTNkNDAtYzQwNGM0NDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:22.794102Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02dr2t63z10r0amv75nw4x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjUwOTg3Ni01MzJkZTMyNC1iZGZkYzJhMi03YTdjYmNhNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_SOURCE_READ_ACTOR to TX_DATASHARD_ACTOR 2025-06-30T09:24:22.797264Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02dr2t63z10r0amv75nw4x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjUwOTg3Ni01MzJkZTMyNC1iZGZkYzJhMi03YTdjYmNhNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_STREAM_LOOKUP_ACTOR to TX_DATASHARD_ACTOR --- split started --- --- split finished --- Captured TEvDataShard::TEvRead from KQP_STREAM_LOOKUP_ACTOR to TX_DATASHARD_ACTOR Captured TEvDataShard::TEvRead from KQP_STREAM_LOOKUP_ACTOR to TX_DATASHARD_ACTOR >> AsyncIndexChangeCollector::DeleteNothing [GOOD] >> AsyncIndexChangeCollector::DeleteSingleRow >> AsyncIndexChangeCollector::UpsertSingleRow >> test_auditlog.py::test_single_dml_query_logged[insert] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_kqp/unittest >> KqpStreamLookup::ReadTableWithIndexDuringSplit [GOOD] Test command err: 2025-06-30T09:24:20.471719Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:24:20.471801Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:24:20.471818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0047ec/r3tmp/tmpMhiCUX/pdisk_1.dat 2025-06-30T09:24:20.580887Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:24:20.581969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:20.600835Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:20.602208Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275460000287 != 1751275460000291 2025-06-30T09:24:20.644511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:20.644562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:20.655198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:20.731924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:20.945124Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:746:2617], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:20.945155Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:757:2622], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:20.945166Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:20.946205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:20.989509Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:21.095542Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:760:2625], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:24:21.135941Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:831:2665] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:23.124452Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz02dpag0xcw4gchg7x4cfsb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2YxZDg3NjYtMTZlZWE3NmYtZjNjMzMxNTUtZWI5N2E0NDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:23.138387Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02dpag0xcw4gchg7x4cfsb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2YxZDg3NjYtMTZlZWE3NmYtZjNjMzMxNTUtZWI5N2E0NDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:23.209348Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02drg80293hdavpy99e5re, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGExOGMzNzgtODBhYmY1Ni1jYTA4YTNjYy1hYzAzYWM0Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_SOURCE_READ_ACTOR to TX_DATASHARD_ACTOR >> KqpCost::IndexLookup+useSink >> TestDataErasure::DataErasureWithSplit [GOOD] >> TestDataErasure::DataErasureWithMerge |83.4%| [TA] $(B)/ydb/core/tx/datashard/ut_kqp/test-results/unittest/{meta.json ... results_accumulator.log} >> CdcStreamChangeCollector::InsertSingleRow [GOOD] >> CdcStreamChangeCollector::InsertSingleUuidRow >> CdcStreamChangeCollector::UpsertManyRows [GOOD] >> CdcStreamChangeCollector::UpsertToSameKey >> AsyncIndexChangeCollector::InsertSingleRow [GOOD] >> AsyncIndexChangeCollector::InsertManyRows >> AsyncIndexChangeCollector::UpsertWithoutIndexedValue [GOOD] >> CdcStreamChangeCollector::DeleteNothing >> TestDataErasure::SimpleTestForAllSupportedObjects [GOOD] >> KqpCost::OlapRangeFullScan >> AsyncIndexChangeCollector::CoveredIndexUpdateCoveredColumn [GOOD] >> AsyncIndexChangeCollector::CoveredIndexUpsert |83.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> TestDataErasure::ManualLaunch3Cycles [GOOD] >> TestDataErasure::ManualLaunch3CyclesWithNotConsistentCountersInSchemeShardAndBSC >> CdcStreamChangeCollector::UpsertIntoTwoStreams [GOOD] >> CdcStreamChangeCollector::PageFaults >> TestDataErasure::Run3CyclesForTables [GOOD] >> KqpCost::IndexLookupJoin-StreamLookupJoin [GOOD] >> AsyncIndexChangeCollector::UpsertSingleRow [GOOD] >> AsyncIndexChangeCollector::UpsertManyRows >> AsyncIndexChangeCollector::DeleteSingleRow [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyDeleteSingleRow ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v0-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |83.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SimpleTestForAllSupportedObjects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:20.631146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:20.631176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:20.631183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:20.631189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:20.631203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:20.631208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:20.631222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:20.631239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:20.631389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:20.631465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:20.646047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:20.646072Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:20.649276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:20.649342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:20.649395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:20.651577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:20.651665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:20.651789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:20.651876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:20.652649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:20.652689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:20.652993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:20.653004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:20.653031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:20.653039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:20.653046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:20.653066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:20.654810Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:20.679473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:20.679563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:20.679635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:20.679644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:20.679698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:20.679712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:20.680758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:20.680808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:20.680866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:20.680878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:20.680885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:20.680892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:20.681428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:20.681441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:20.681450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:20.681866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:20.681877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:20.681884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:20.681893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:20.682696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:20.683197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:20.683238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:20.683454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:20.683481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:20.683489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:20.683562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:20.683570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:20.683618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:20.683644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:20.684114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:20.684124Z node 1 :FLAT_TX_SCHEMESHARD ... 657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-30T09:24:23.976218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-30T09:24:23.976229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-30T09:24:24.006828Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:24.006858Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:24.006909Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:464:2414], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:24.006915Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:24.006977Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [1:803:2687], Recipient [1:464:2414]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409546 2025-06-30T09:24:24.006984Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:24:24.007009Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:24:24.007072Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 3 took 50us result status StatusSuccess 2025-06-30T09:24:24.007243Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:24:24.078671Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:964:2820]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:24.078699Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:24.078725Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:964:2820], Recipient [1:964:2820]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:24.078730Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:24.078792Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [1:1388:3170], Recipient [1:964:2820]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409553 2025-06-30T09:24:24.078797Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:24:24.078821Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-30T09:24:24.078885Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409553 describe pathId 3 took 49us result status StatusSuccess 2025-06-30T09:24:24.079004Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409553 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 108 CreateStep: 350 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409559 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409558 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409559 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 SchemeShard: 72075186233409553 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-30T09:24:24.526384Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:24.526427Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:24.526450Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:295:2277], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:24.526456Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:24.586996Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:24.587033Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:24.587044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-30T09:24:24.587155Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-30T09:24:24.587165Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:24.587172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:24.587203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:24.587211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-30T09:24:24.587226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.917000s, Timestamp# 1970-01-01T00:00:05.128000Z 2025-06-30T09:24:24.587235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 2 s 2025-06-30T09:24:24.591736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-30T09:24:24.592003Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:2433:4034], Recipient [1:295:2277]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:24.592017Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:24.592025Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:24:24.592072Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:295:2277]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-30T09:24:24.592079Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5160: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-30T09:24:24.592086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7845: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> KqpCost::AAARangeFullScan >> KqpCost::IndexLookup+useSink [GOOD] >> TestDataErasure::Run3CyclesForTopics [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::Run3CyclesForTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:19.117992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:19.118024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.118030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:19.118037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:19.118052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:19.118057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:19.118068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.118085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:19.118229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:19.118312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:19.135916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:19.135941Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:19.139558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:19.139626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:19.139668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:19.141144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:19.141221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:19.141346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.141417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:19.142046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.142084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:19.142380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.142394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.142422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:19.142438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:19.142444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:19.142462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.143857Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:19.186143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:19.186220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.186282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:19.186290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:19.186338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:19.186353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:19.187137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.187190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:19.187240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.187251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:19.187258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:19.187264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:19.187796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.187816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:19.187831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:19.188388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.188402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.188411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.188419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:19.189255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:19.189758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:19.189803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:19.190032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.190074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:19.190085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.190187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:19.190202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.190252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:19.190270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:19.190807Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.190817Z node 1 :FLAT_TX_SCHEMESHARD ... 594046678944, LocalPathId: 3] in# 64 ms, next wakeup# 593.936000s, rate# 0, in queue# 0 tenants, running# 0 tenants at schemeshard 72057594046678944 2025-06-30T09:24:24.007032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:327: [RootDataErasureManager] Data erasure in tenants is completed. Send request to BS controller 2025-06-30T09:24:24.007484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:615: TTxCompleteDataErasureTenant Complete at schemeshard: 72057594046678944, NeedSendRequestToBSC# true 2025-06-30T09:24:24.007505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-30T09:24:24.007563Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: false Progress10k: 0 2025-06-30T09:24:24.007570Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:24.007575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:24.007585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:24.007591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 0% 2025-06-30T09:24:24.007602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-30T09:24:24.007610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-30T09:24:24.530911Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:24.530950Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:24.530971Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:24.530977Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:24.530988Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:838:2717]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:24.530994Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:24.531006Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:838:2717], Recipient [1:838:2717]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:24.531012Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:24.531031Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:295:2277], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:24.531036Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:24.531047Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:464:2414], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:24.531052Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:24.573211Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:24.573251Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:24.573260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-30T09:24:24.573365Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: false Progress10k: 5000 2025-06-30T09:24:24.573375Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:24.573381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:24.573409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:24.573422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-30T09:24:24.573440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-30T09:24:24.573452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-30T09:24:25.007037Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:25.007074Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:25.007093Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:25.007098Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:25.007107Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:838:2717]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:25.007112Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:25.007123Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:295:2277], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:25.007128Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:25.007143Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:464:2414], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:25.007147Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:25.007158Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:838:2717], Recipient [1:838:2717]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:25.007162Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:25.046953Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:25.046990Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:25.046998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-30T09:24:25.047072Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-06-30T09:24:25.047081Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:25.047086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:25.047110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:25.047117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-30T09:24:25.047127Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.935000s, Timestamp# 1970-01-01T00:00:11.110000Z 2025-06-30T09:24:25.047133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-06-30T09:24:25.047897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-30T09:24:25.048055Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:3583:4937], Recipient [1:295:2277]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:25.048063Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:25.048068Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:24:25.048089Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:295:2277]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-30T09:24:25.048095Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5160: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-30T09:24:25.048101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7845: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupJoin-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 12364, MsgBus: 3690 2025-06-30T09:24:23.462795Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670843257193347:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:23.462833Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0035a2/r3tmp/tmpfheWou/pdisk_1.dat 2025-06-30T09:24:23.520215Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:23.522837Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670843257193325:2080] 1751275463462594 != 1751275463462597 TServer::EnableGrpc on GrpcPort 12364, node 1 2025-06-30T09:24:23.537130Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:23.537151Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:23.537154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:23.537216Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3690 2025-06-30T09:24:23.564520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:23.564548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:23.565686Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3690 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:23.606773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:23.609428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:23.621741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:23.693911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:23.721422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:23.735144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:23.923591Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670843257194928:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:23.923643Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:23.974020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:23.982994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:23.996059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.009652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.024019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.038123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.051554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.068127Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670847552162876:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:24.068149Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:24.068186Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670847552162881:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:24.069041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:24.071179Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670847552162883:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:24.133368Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670847552162934:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:24.398029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.422885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.446916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.470008Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; /Root/Join1_2 1 19 /Root/Join1_1 8 136 >> KqpCost::OlapWriteRow [GOOD] >> AsyncIndexChangeCollector::InsertManyRows [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableInsertSingleRow >> CdcStreamChangeCollector::UpsertToSameKey [GOOD] >> CdcStreamChangeCollector::UpsertToSameKeyWithImages ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::Run3CyclesForTopics [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:19.790472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:19.790506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.790514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:19.790521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:19.790541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:19.790547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:19.790560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.790581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:19.790780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:19.790887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:19.817660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:19.817714Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:19.823242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:19.823320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:19.823368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:19.827536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:19.827657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:19.827828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.827930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:19.829647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.829754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:19.830202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.830221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.830264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:19.830276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:19.830286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:19.830320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.832706Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:19.858517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:19.858608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.858688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:19.858697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:19.858765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:19.858783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:19.859785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.859848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:19.859917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.859931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:19.859938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:19.859947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:19.863737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.863762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:19.863777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:19.867358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.867376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.867383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.867390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:19.868002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:19.871243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:19.871302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:19.871522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.871557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:19.871567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.871645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:19.871656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.871689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:19.871702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:19.874335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.874347Z node 1 :FLAT_TX_SCHEMESHARD ... 657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-30T09:24:24.923194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-30T09:24:24.923209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-30T09:24:25.034921Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:25.034956Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:25.034977Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:464:2414], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:25.034984Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:25.035049Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [1:644:2558], Recipient [1:464:2414]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409546 2025-06-30T09:24:25.035057Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:24:25.035084Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:24:25.035160Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 2 took 61us result status StatusSuccess 2025-06-30T09:24:25.035337Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409550 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409549 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409550 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:24:25.122440Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:893:2765]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:25.122477Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:25.122506Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:893:2765], Recipient [1:893:2765]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:25.122513Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:25.122572Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [1:1076:2911], Recipient [1:893:2765]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409551 2025-06-30T09:24:25.122578Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:24:25.122607Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409551, at schemeshard: 72075186233409551 2025-06-30T09:24:25.122683Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409551 describe pathId 2 took 64us result status StatusSuccess 2025-06-30T09:24:25.122875Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409551 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409555 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409554 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409555 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409552 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409553 SchemeShard: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409551, at schemeshard: 72075186233409551 2025-06-30T09:24:25.447479Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:25.447513Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:25.447532Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:295:2277], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:25.447537Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:25.457745Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:25.457776Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:25.457785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-30T09:24:25.457861Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-06-30T09:24:25.457868Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:25.457874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:25.457903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:25.457910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-30T09:24:25.457926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.984000s, Timestamp# 1970-01-01T00:00:11.061000Z 2025-06-30T09:24:25.457932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-06-30T09:24:25.458845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-30T09:24:25.459014Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:1538:3324], Recipient [1:295:2277]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:25.459024Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:25.459030Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:24:25.459064Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:295:2277]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-30T09:24:25.459070Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5160: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-30T09:24:25.459076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7845: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookup+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 1214, MsgBus: 11032 2025-06-30T09:24:24.072108Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670846785573853:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:24.072293Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00358b/r3tmp/tmpVpTUES/pdisk_1.dat 2025-06-30T09:24:24.150920Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670846785573829:2080] 1751275464071922 != 1751275464071925 2025-06-30T09:24:24.153146Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1214, node 1 2025-06-30T09:24:24.173138Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:24.173152Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:24.173154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:24.173209Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11032 2025-06-30T09:24:24.227536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:24.227583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:24.229243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11032 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:24.284811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:24.288509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:24.295181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:24.366697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:24.413367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:24.485746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:24.623304Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670846785575438:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:24.623339Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:24.733815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.750680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.764722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.787288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.809475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.837061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.862836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.895304Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670846785576093:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:24.895345Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:24.897104Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670846785576098:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:24.898473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:24.903213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:24.903333Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670846785576100:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:24.975401Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670846785576151:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:25.076274Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:25.227384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 >> AsyncIndexChangeCollector::CoveredIndexUpsert [GOOD] >> AsyncIndexChangeCollector::AllColumnsInPk |83.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapWriteRow [GOOD] Test command err: Trying to start YDB, gRPC: 12942, MsgBus: 14352 2025-06-30T09:24:23.574165Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670840115890175:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:23.574367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00359f/r3tmp/tmpfVAWEb/pdisk_1.dat TServer::EnableGrpc on GrpcPort 12942, node 1 2025-06-30T09:24:23.649621Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:23.649778Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670840115890152:2080] 1751275463573953 != 1751275463573956 2025-06-30T09:24:23.653310Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:23.653326Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:23.653329Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:23.653375Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:23.676990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:23.677028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:14352 2025-06-30T09:24:23.678193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14352 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:23.722995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:23.728370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:23.796155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:23.818607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:23.830421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:23.973275Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670840115891769:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:23.973304Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:24.018051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.026510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.037825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.051978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.066103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.079611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.097950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.138474Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670844410859716:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:24.138514Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:24.138660Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670844410859721:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:24.139734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:24.144097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:24.144193Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670844410859723:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:24.218726Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670844410859774:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:24.480483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:24:24.522646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7521670844410860161:2480];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:24:24.522719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7521670844410860161:2480];tablet_id=7207518622403 ... hed_tx;tx_id=281474976715684; query_phases { duration_us: 18434 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 368 } } cpu_time_us: 960 affected_shards: 1 } query_phases { duration_us: 19434 cpu_time_us: 90 affected_shards: 1 } compilation { duration_us: 35707 cpu_time_us: 30832 } process_cpu_time_us: 260 total_duration_us: 75402 total_cpu_time_us: 32142 2025-06-30T09:24:25.038486Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:1914: ActorId: [1:7521670848705828125:2469] TxId: 281474976715685. Ctx: { TraceId: 01jz02dt9rdes6922pytxbw85r, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzRmNDBkYmMtOGM5ZGE1NjYtZDExYmE2OWEtMmMyYTQyNjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-30T09:24:25.045841Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:1914: ActorId: [1:7521670848705828141:2469] TxId: 281474976715686. Ctx: { TraceId: 01jz02dt9rdes6922pytxbw85r, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzRmNDBkYmMtOGM5ZGE1NjYtZDExYmE2OWEtMmMyYTQyNjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-30T09:24:25.094053Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=281474976715687;tx_id=281474976715687;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715687; 2025-06-30T09:24:25.094418Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976715687;tx_id=281474976715687;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715687; query_phases { duration_us: 7040 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 368 } } cpu_time_us: 951 affected_shards: 1 } query_phases { duration_us: 10388 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 368 } } cpu_time_us: 967 affected_shards: 2 } query_phases { duration_us: 43329 cpu_time_us: 93 affected_shards: 2 } compilation { duration_us: 17604 cpu_time_us: 16839 } process_cpu_time_us: 341 total_duration_us: 80522 total_cpu_time_us: 19191 2025-06-30T09:24:25.123123Z node 1 :TX_COLUMNSHARD_RESTORE WARN: log.cpp:784: tablet_id=72075186224037930;tablet_actor_id=[1:7521670844410860172:2481];this=88835981292208;activity=1;task_id=3874a86-559411f0-b6c97a57-4fcdd2ad::4;fline=restore.cpp:28;event=merge_data_problems;write_id=4;tablet_id=72075186224037930;message=Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]}; 2025-06-30T09:24:25.123257Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7521670844410860172:2481];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteBlobsResult;tablet_id=72075186224037930;event=TEvWriteBlobsResult;fline=events.h:105;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]};tx_id=281474976715688; 2025-06-30T09:24:25.125492Z node 1 :TX_COLUMNSHARD_SCAN WARN: actor.cpp:133: Scan [1:7521670848705828193:2683] got AbortExecution txId: 281474976715688 scanId: 1 gen: 1 tablet: 72075186224037930 code: ABORTED reason: {
: Error: task finished: Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]} } 2025-06-30T09:24:25.125744Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [1:7521670848705828190:2681], Table: `/Root/TestTable` ([72057594046644480:17:1]), SessionActorId: [0:0:0]Got CONSTRAINT VIOLATION for table `/Root/TestTable`. ShardID=72075186224037930, Sink=[1:7521670848705828190:2681].{
: Error: Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]}, code: 2012 } 2025-06-30T09:24:25.125763Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1566: SelfId: [1:7521670848705828187:2681], TxId: 281474976715688, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=YzRmNDBkYmMtOGM5ZGE1NjYtZDExYmE2OWEtMmMyYTQyNjM=. TraceId : 01jz02dtcg3nvrt95945eft99y. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Sink[0] fatal error: {
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012 subissue: {
: Error: Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]}, code: 2012 } } 2025-06-30T09:24:25.125784Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7521670848705828187:2681], TxId: 281474976715688, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=YzRmNDBkYmMtOGM5ZGE1NjYtZDExYmE2OWEtMmMyYTQyNjM=. TraceId : 01jz02dtcg3nvrt95945eft99y. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012 subissue: {
: Error: Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]}, code: 2012 } }. 2025-06-30T09:24:25.126325Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=YzRmNDBkYmMtOGM5ZGE1NjYtZDExYmE2OWEtMmMyYTQyNjM=, ActorId: [1:7521670844410860037:2469], ActorState: ExecuteState, TraceId: 01jz02dtcg3nvrt95945eft99y, Create QueryResponse for error on request, msg: query_phases { duration_us: 7304 cpu_time_us: 841 } compilation { duration_us: 12978 cpu_time_us: 11927 } process_cpu_time_us: 183 total_duration_us: 22097 total_cpu_time_us: 12951 2025-06-30T09:24:25.179549Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976715690;tx_id=281474976715690;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715690; query_phases { duration_us: 4618 cpu_time_us: 972 affected_shards: 1 } query_phases { duration_us: 18071 cpu_time_us: 88 affected_shards: 1 } compilation { duration_us: 22756 cpu_time_us: 21613 } process_cpu_time_us: 240 total_duration_us: 54803 total_cpu_time_us: 22913 2025-06-30T09:24:25.230989Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976715692;tx_id=281474976715692;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715692; query_phases { duration_us: 8994 table_access { name: "/Root/TestTable" deletes { rows: 1 } } cpu_time_us: 935 affected_shards: 1 } query_phases { duration_us: 13648 cpu_time_us: 83 affected_shards: 1 } compilation { duration_us: 15937 cpu_time_us: 14939 } process_cpu_time_us: 250 total_duration_us: 45030 total_cpu_time_us: 16207 2025-06-30T09:24:25.257645Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976715694;tx_id=281474976715694;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715694; query_phases { duration_us: 4296 table_access { name: "/Root/TestTable" deletes { rows: 1 } } cpu_time_us: 1091 affected_shards: 1 } query_phases { duration_us: 4997 cpu_time_us: 134 affected_shards: 1 } compilation { duration_us: 12925 cpu_time_us: 11925 } process_cpu_time_us: 246 total_duration_us: 23158 total_cpu_time_us: 13396 2025-06-30T09:24:25.293262Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976715697;tx_id=281474976715697;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715697; 2025-06-30T09:24:25.293384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976715697;tx_id=281474976715697;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715697; query_phases { duration_us: 3029 table_access { name: "/Root/TestTable" deletes { rows: 1 } } cpu_time_us: 808 affected_shards: 1 } query_phases { duration_us: 3929 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 368 } } cpu_time_us: 889 affected_shards: 2 } query_phases { duration_us: 5466 cpu_time_us: 102 affected_shards: 2 } compilation { duration_us: 16546 cpu_time_us: 15579 } process_cpu_time_us: 303 total_duration_us: 29931 total_cpu_time_us: 17681 2025-06-30T09:24:25.371591Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976715699;tx_id=281474976715699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715699; 2025-06-30T09:24:25.372689Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=281474976715699;tx_id=281474976715699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715699; 2025-06-30T09:24:25.372748Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=281474976715699;tx_id=281474976715699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715699; 2025-06-30T09:24:25.372793Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715699;tx_id=281474976715699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715699; 2025-06-30T09:24:25.372809Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976715699;tx_id=281474976715699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715699; 2025-06-30T09:24:25.372877Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976715699;tx_id=281474976715699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715699; 2025-06-30T09:24:25.372914Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=281474976715699;tx_id=281474976715699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715699; 2025-06-30T09:24:25.372920Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976715699;tx_id=281474976715699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715699; 2025-06-30T09:24:25.372961Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=281474976715699;tx_id=281474976715699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715699; 2025-06-30T09:24:25.372983Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=281474976715699;tx_id=281474976715699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715699; query_phases { duration_us: 260 cpu_time_us: 260 } query_phases { duration_us: 16051 table_access { name: "/Root/TestTable" reads { rows: 2 bytes: 40 } deletes { rows: 2 } } cpu_time_us: 5583 affected_shards: 10 } query_phases { duration_us: 6763 cpu_time_us: 176 affected_shards: 10 } compilation { duration_us: 53771 cpu_time_us: 52015 } process_cpu_time_us: 564 total_duration_us: 78263 total_cpu_time_us: 58598 >> CdcStreamChangeCollector::InsertSingleUuidRow [GOOD] >> CdcStreamChangeCollector::IndexAndStreamUpsert >> KqpCost::OlapRangeFullScan [GOOD] >> AsyncIndexChangeCollector::UpsertManyRows [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableUpdateOneIndexedColumn >> AsyncIndexChangeCollector::IndexedPrimaryKeyDeleteSingleRow [GOOD] >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn >> CdcStreamChangeCollector::DeleteNothing [GOOD] >> CdcStreamChangeCollector::DeleteSingleRow |83.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |83.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |83.5%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp/test-results/unittest/{meta.json ... results_accumulator.log} |83.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut >> TIncrementalRestoreWithRebootsTests::OperationIdempotencyAfterReboots >> KqpCost::PointLookup >> KqpCost::AAARangeFullScan [GOOD] >> KqpCost::OlapRange |83.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |83.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |83.5%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapRangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 7883, MsgBus: 22948 2025-06-30T09:24:24.673300Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670845801056852:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:24.673434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003585/r3tmp/tmpLZyFNx/pdisk_1.dat 2025-06-30T09:24:24.782729Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:24.786845Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670845801056681:2080] 1751275464668903 != 1751275464668906 TServer::EnableGrpc on GrpcPort 7883, node 1 2025-06-30T09:24:24.836802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:24.836817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:24.836819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:24.836877Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:24.848885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:24.848918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:24.849486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22948 TClient is connected to server localhost:22948 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:25.013104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:25.016499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:25.020902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:24:25.101275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:25.163151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:25.188535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:25.367458Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670850096025607:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:25.367490Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:25.415301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:25.423632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:25.437903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:25.493645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:25.508094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:25.522392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:25.536038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:25.556409Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670850096026263:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:25.556444Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:25.556497Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670850096026268:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:25.557395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:25.563159Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670850096026270:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:25.626757Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670850096026321:3405] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:25.671221Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:25.845653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:24:25.887841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7521670850096026657:2473];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved ... nished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:24:25.961622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:24:25.961681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:24:25.961690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:24:25.961705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:24:25.961710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:24:25.962684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7521670850096026651:2472];ev=NActors::IEventHandle;tablet_id=72075186224037929;tx_id=281474976715672;this=19700892866176;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275465962;max=18446744073709551615;plan=0;src=[1:7521670845801057004:2143];cookie=422:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.962904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;self_id=[1:7521670850096026725:2477];ev=NActors::IEventHandle;tablet_id=72075186224037928;tx_id=281474976715672;this=19700892737344;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275465962;max=18446744073709551615;plan=0;src=[1:7521670845801057004:2143];cookie=412:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.963008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;self_id=[1:7521670850096026745:2479];ev=NActors::IEventHandle;tablet_id=72075186224037924;tx_id=281474976715672;this=19700892863936;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275465962;max=18446744073709551615;plan=0;src=[1:7521670845801057004:2143];cookie=372:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.963094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7521670850096026731:2478];ev=NActors::IEventHandle;tablet_id=72075186224037927;tx_id=281474976715672;this=19700892674560;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275465963;max=18446744073709551615;plan=0;src=[1:7521670845801057004:2143];cookie=402:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.963854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[1:7521670850096026637:2470];ev=NActors::IEventHandle;tablet_id=72075186224037923;tx_id=281474976715672;this=19700892864096;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275465963;max=18446744073709551615;plan=0;src=[1:7521670845801057004:2143];cookie=362:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.963962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7521670850096026709:2475];ev=NActors::IEventHandle;tablet_id=72075186224037931;tx_id=281474976715672;this=19700892676960;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275465963;max=18446744073709551615;plan=0;src=[1:7521670845801057004:2143];cookie=442:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.964059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[1:7521670850096026707:2474];ev=NActors::IEventHandle;tablet_id=72075186224037926;tx_id=281474976715672;this=19700892678560;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275465964;max=18446744073709551615;plan=0;src=[1:7521670845801057004:2143];cookie=392:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.964613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7521670850096026717:2476];ev=NActors::IEventHandle;tablet_id=72075186224037930;tx_id=281474976715672;this=19700892381248;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275465964;max=18446744073709551615;plan=0;src=[1:7521670845801057004:2143];cookie=432:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.964817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7521670850096026638:2471];ev=NActors::IEventHandle;tablet_id=72075186224037925;tx_id=281474976715672;this=19700892737344;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275465964;max=18446744073709551615;plan=0;src=[1:7521670845801057004:2143];cookie=382:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.965944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.967000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.968339Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:24:25.968517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.968713Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:24:25.968885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.969778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:24:25.969811Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:24:25.969933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.969934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.970899Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:24:25.971049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.971326Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:24:25.971479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.972395Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:24:25.972545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.972622Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:24:25.972779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:25.973503Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:24:25.974093Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:24:26.017405Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715674; 2025-06-30T09:24:26.017506Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715674; 2025-06-30T09:24:26.017700Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[1:7521670850096026707:2474];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037926;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037931;receive=72075186224037930; 2025-06-30T09:24:26.017763Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715674; query_phases { duration_us: 44575 table_access { name: "/Root/TestTable" reads { rows: 2 bytes: 72 } } cpu_time_us: 27086 } compilation { duration_us: 49488 cpu_time_us: 48330 } process_cpu_time_us: 158 total_duration_us: 96760 total_cpu_time_us: 75574 >> TIncrementalRestoreWithRebootsTests::BackupTablePathStateRestorationWithMultipleCollections >> TIncrementalRestoreWithRebootsTests::IncrementalBackupTablePathStatesWithReboots ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::AAARangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 15088, MsgBus: 9681 2025-06-30T09:24:25.426425Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670852022012045:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:25.426451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003553/r3tmp/tmp9MdPzl/pdisk_1.dat TServer::EnableGrpc on GrpcPort 15088, node 1 2025-06-30T09:24:25.481791Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:25.482174Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670852022012025:2080] 1751275465426294 != 1751275465426297 2025-06-30T09:24:25.485694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:25.485706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:25.485708Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:25.485748Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9681 2025-06-30T09:24:25.529204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:25.529245Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:25.530406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9681 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:25.568650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:25.572976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:25.579804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:25.617513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:25.672371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:24:25.693650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:25.948108Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670852022013630:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:25.948160Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:25.995546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:26.015644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:26.032220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:26.053129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:26.112151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:26.160008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:26.180049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:26.196585Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670856316981590:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:26.196618Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:26.196672Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670856316981595:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:26.197983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:26.201366Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670856316981597:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:26.284321Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670856316981648:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:26.430198Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; {"Plan":{"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Test"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Group (-∞, +∞)","Name (-∞, +∞)"],"Reverse":false,"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Test","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Test","ReadColumns":["Amount","Comment","Group","Name"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Limit","Limit":"1"},{"Inputs":[{"ExternalPlanNodeId":1}],"E-Rows":"0","Predicate":"item.Amount \u003C 5000","Name":"Filter","E-Size":"0","E ... ,"Group","Name"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Operators":[{"E-Rows":"0","Predicate":"item.Amount \u003C 5000","Name":"Filter","E-Size":"0","E-Cost":"0"}],"Node Type":"Filter"}],"Operators":[{"A-Rows":1,"A-SelfCpu":0.47,"A-Cpu":0.47,"A-Size":19,"Name":"Limit","Limit":"1"}],"Node Type":"Limit"}],"Operators":[{"A-Rows":1,"A-SelfCpu":0.257,"A-Cpu":0.727,"A-Size":19,"Name":"Limit","Limit":"1"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","PlanNodeType":"Query"}} query_phases { duration_us: 2178 table_access { name: "/Root/Test" reads { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 2145 affected_shards: 1 } compilation { duration_us: 33247 cpu_time_us: 32158 } process_cpu_time_us: 128 query_plan: "{\"Plan\":{\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":3,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"Tables\":[\"Test\"],\"PlanNodeId\":1,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"ReadRanges\":[\"Group (-\342\210\236, +\342\210\236)\",\"Name (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/Test\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"0\",\"Table\":\"Test\",\"ReadColumns\":[\"Amount\",\"Comment\",\"Group\",\"Name\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TableFullScan\"}],\"Operators\":[{\"Inputs\":[{\"InternalOperatorId\":1}],\"Name\":\"Limit\",\"Limit\":\"1\"},{\"Inputs\":[{\"ExternalPlanNodeId\":1}],\"E-Rows\":\"0\",\"Predicate\":\"item.Amount \\u003C 5000\",\"Name\":\"Filter\",\"E-Size\":\"0\",\"E-Cost\":\"0\"}],\"Node Type\":\"Limit-Filter\",\"Stats\":{\"UseLlvm\":\"undefined\",\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19}},\"Name\":\"4\",\"Push\":{\"WaitTimeUs\":{\"Count\":1,\"Sum\":685,\"Max\":685,\"Min\":685},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1}}}],\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[1,1048576]},\"Tasks\":1,\"OutputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FinishedTasks\":1,\"IngressRows\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"PhysicalStageId\":0,\"StageDurationUs\":0,\"Table\":[{\"Path\":\"\\/Root\\/Test\",\"ReadRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ReadBytes\":{\"Count\":1,\"Sum\":20,\"Max\":20,\"Min\":20}}],\"BaseTimeMs\":1751275466550,\"OutputBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"CpuTimeUs\":{\"Count\":1,\"Sum\":470,\"Max\":470,\"Min\":470,\"History\":[1,470]},\"Ingress\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":192,\"Max\":192,\"Min\":192,\"History\":[1,192]}},\"External\":{},\"Name\":\"KqpReadRangesSource\",\"Ingress\":{},\"Push\":{\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":192,\"Max\":192,\"Min\":192,\"History\":[1,192]},\"WaitTimeUs\":{\"Count\":1,\"Sum\":677,\"Max\":677,\"Min\":677,\"History\":[1,677]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1}}}],\"UpdateTimeMs\":1}}],\"Node Type\":\"Merge\",\"SortColumns\":[\"Group (Asc)\"],\"PlanNodeType\":\"Connection\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":3}],\"Name\":\"Limit\",\"Limit\":\"1\"}],\"Node Type\":\"Limit\",\"Stats\":{\"UseLlvm\":\"undefined\",\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[1,19]}},\"Name\":\"RESULT\",\"Push\":{\"WaitTimeUs\":{\"Count\":1,\"Sum\":1020,\"Max\":1020,\"Min\":1020,\"History\":[1,1020]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1}}}],\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[1,1048576]},\"InputBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"ResultRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Tasks\":1,\"ResultBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"OutputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FinishedTasks\":1,\"InputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"PhysicalStageId\":1,\"StageDurationUs\":0,\"BaseTimeMs\":1751275466550,\"OutputBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"CpuTimeUs\":{\"Count\":1,\"Sum\":257,\"Max\":257,\"Min\":257,\"History\":[1,257]},\"UpdateTimeMs\":1,\"Input\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19}},\"Name\":\"2\",\"Push\":{\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"WaitTimeUs\":{\"Count\":1,\"Sum\":961,\"Max\":961,\"Min\":961},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1}}}]}}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"Compilation\":{\"FromCache\":false,\"DurationUs\":33247,\"CpuTimeUs\":32158},\"ProcessCpuTimeUs\":128,\"TotalDurationUs\":36868,\"ResourcePoolId\":\"default\",\"QueuedTimeUs\":253},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":6,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"ReadRanges\":[\"Group (-\342\210\236, +\342\210\236)\",\"Name (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Path\":\"\\/Root\\/Test\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"0\",\"Table\":\"Test\",\"ReadColumns\":[\"Amount\",\"Comment\",\"Group\",\"Name\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TableFullScan\"}],\"Operators\":[{\"E-Rows\":\"0\",\"Predicate\":\"item.Amount \\u003C 5000\",\"Name\":\"Filter\",\"E-Size\":\"0\",\"E-Cost\":\"0\"}],\"Node Type\":\"Filter\"}],\"Operators\":[{\"A-Rows\":1,\"A-SelfCpu\":0.47,\"A-Cpu\":0.47,\"A-Size\":19,\"Name\":\"Limit\",\"Limit\":\"1\"}],\"Node Type\":\"Limit\"}],\"Operators\":[{\"A-Rows\":1,\"A-SelfCpu\":0.257,\"A-Cpu\":0.727,\"A-Size\":19,\"Name\":\"Limit\",\"Limit\":\"1\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"PlanNodeType\":\"Query\"}}" query_ast: "(\n(let $1 (KqpTable \'\"/Root/Test\" \'\"72057594046644480:9\" \'\"\" \'1))\n(let $2 \'(\'\"Amount\" \'\"Comment\" \'\"Group\" \'\"Name\"))\n(let $3 (KqpRowsSourceSettings $1 $2 \'(\'(\'\"Sorted\")) (Void) \'()))\n(let $4 (Uint64 \'1))\n(let $5 (OptionalType (DataType \'String)))\n(let $6 (StructType \'(\'\"Amount\" (OptionalType (DataType \'Uint64))) \'(\'\"Comment\" $5) \'(\'\"Group\" (OptionalType (DataType \'Uint32))) \'(\'\"Name\" $5)))\n(let $7 \'(\'(\'\"_logical_id\" \'559) \'(\'\"_id\" \'\"d365a198-b5510fc5-1298eb5f-a83ab6ba\") \'(\'\"_wide_channels\" $6)))\n(let $8 (DqPhyStage \'((DqSource (DataSource \'\"KqpReadRangesSource\") $3)) (lambda \'($12) (block \'(\n (let $13 (lambda \'($16) (block \'(\n (let $17 (Member $16 \'\"Amount\"))\n (return $17 (Member $16 \'\"Comment\") (Member $16 \'\"Group\") (Member $16 \'\"Name\") (Coalesce (< $17 (Uint64 \'\"5000\")) (Bool \'false)))\n ))))\n (let $14 (WideFilter (ExpandMap (ToFlow $12) $13) (lambda \'($18 $19 $20 $21 $22) $22) $4))\n (let $15 (lambda \'($23 $24 $25 $26 $27) $23 $24 $25 $26))\n (return (FromFlow (WideMap $14 $15)))\n))) $7))\n(let $9 (DqCnMerge (TDqOutput $8 \'0) \'(\'(\'\"2\" \'\"Asc\"))))\n(let $10 (DqPhyStage \'($9) (lambda \'($28) (FromFlow (NarrowMap (Take (ToFlow $28) $4) (lambda \'($29 $30 $31 $32) (AsStruct \'(\'\"Amount\" $29) \'(\'\"Comment\" $30) \'(\'\"Group\" $31) \'(\'\"Name\" $32)))))) \'(\'(\'\"_logical_id\" \'572) \'(\'\"_id\" \'\"42ce19b0-70add6db-ae87a233-80761384\"))))\n(let $11 (DqCnResult (TDqOutput $10 \'0) \'()))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($8 $10) \'($11) \'() \'(\'(\'\"type\" \'\"data\")))) \'((KqpTxResultBinding (ListType $6) \'0 \'0)) \'(\'(\'\"type\" \'\"data_query\"))))\n)\n" total_duration_us: 36868 total_cpu_time_us: 34431 query_meta: "{\"query_database\":\"/Root\",\"query_parameter_types\":{},\"table_metadata\":[\"{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/Test\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":9},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"Amount\\\",\\\"Id\\\":3,\\\"Type\\\":\\\"Uint64\\\",\\\"TypeId\\\":4,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Comment\\\",\\\"Id\\\":4,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Group\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"Uint32\\\",\\\"TypeId\\\":2,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Name\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"Group\\\",\\\"Name\\\"],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}\"],\"table_meta_serialization_type\":2,\"created_at\":\"1751275466\",\"query_type\":\"QUERY_TYPE_SQL_DML\",\"query_syntax\":\"1\",\"query_cluster\":\"db\",\"query_id\":\"7b19fc85-6a1e684b-b9e7274c-5100d39e\",\"version\":\"1.0\"}" >> AsyncIndexChangeCollector::MultiIndexedTableInsertSingleRow [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow >> TIncrementalRestoreWithRebootsTests::MultipleIncrementalBackupSnapshots >> KqpNotNullColumns::ReplaceNotNull >> KqpNewEngine::BatchUpload >> AsyncIndexChangeCollector::AllColumnsInPk [GOOD] >> AsyncIndexChangeCollector::CoverIndexedColumn >> CdcStreamChangeCollector::UpsertToSameKeyWithImages [GOOD] >> CdcStreamChangeCollector::UpsertModifyDelete >> KqpNewEngine::LocksSingleShard >> CdcStreamChangeCollector::PageFaults [GOOD] >> CdcStreamChangeCollector::OldImage >> KqpCost::PointLookup [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableUpdateOneIndexedColumn [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn [GOOD] >> CdcStreamChangeCollector::DeleteSingleRow [GOOD] >> KqpCost::OlapRange [GOOD] >> CdcStreamChangeCollector::IndexAndStreamUpsert [GOOD] >> CdcStreamChangeCollector::NewImage >> TestDataErasure::Run3CyclesForAllSupportedObjects [GOOD] >> KqpNewEngine::PkSelect1 >> TestDataErasure::DataErasureWithCopyTable [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::PointLookup [GOOD] Test command err: Trying to start YDB, gRPC: 24089, MsgBus: 8876 2025-06-30T09:24:26.651461Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670853226915787:2188];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:26.651536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00350b/r3tmp/tmpcHldGa/pdisk_1.dat 2025-06-30T09:24:26.700250Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:26.700385Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670853226915635:2080] 1751275466649322 != 1751275466649325 TServer::EnableGrpc on GrpcPort 24089, node 1 2025-06-30T09:24:26.711869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:26.711890Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:26.711892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:26.711975Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8876 TClient is connected to server localhost:8876 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:26.778612Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:26.778641Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:26.779399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:26.779658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:24:26.792464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:26.858576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:26.919360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:26.932247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:27.128223Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670857521884548:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.128258Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.190899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.202269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.213841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.275060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.331891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.347440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.407012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.423584Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670857521885212:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.423610Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.423645Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670857521885217:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.424658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:27.427717Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670857521885219:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:27.488449Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670857521885270:3405] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:27.652527Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::DeleteSingleRow [GOOD] Test command err: 2025-06-30T09:24:22.184110Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:24:22.184177Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:24:22.184190Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00332c/r3tmp/tmpwHE4zk/pdisk_1.dat 2025-06-30T09:24:22.296374Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:24:22.297494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:22.322584Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:22.324344Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275461736967 != 1751275461736971 2025-06-30T09:24:22.371543Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:22.371592Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:22.382776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:22.461576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:22.482097Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:641:2542] 2025-06-30T09:24:22.482251Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:22.492308Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:22.492379Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:22.492590Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:24:22.492602Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:24:22.492610Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:24:22.492698Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:22.492752Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:22.492766Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:667:2542] in generation 1 2025-06-30T09:24:22.493175Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:644:2544] 2025-06-30T09:24:22.493216Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:22.494762Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:22.494805Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:22.494977Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-30T09:24:22.494986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-30T09:24:22.494993Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-30T09:24:22.495041Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:22.495058Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:22.495072Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:675:2544] in generation 1 2025-06-30T09:24:22.505472Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:22.509412Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:24:22.509511Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:22.509539Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:678:2563] 2025-06-30T09:24:22.509544Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:22.509548Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:24:22.509552Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:22.509659Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:22.509667Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-30T09:24:22.509697Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:22.509703Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:679:2564] 2025-06-30T09:24:22.509705Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:24:22.509707Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-30T09:24:22.509709Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:22.509800Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:24:22.509825Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:24:22.509851Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:22.509857Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:22.509866Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:24:22.509870Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:22.509874Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-30T09:24:22.509880Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-30T09:24:22.509907Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:634:2538], serverId# [1:652:2548], sessionId# [0:0:0] 2025-06-30T09:24:22.509914Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:24:22.509918Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:22.509923Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-30T09:24:22.509928Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:24:22.509952Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:22.510007Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:24:22.510034Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:24:22.510132Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:635:2539], serverId# [1:660:2554], sessionId# [0:0:0] 2025-06-30T09:24:22.510236Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-30T09:24:22.510257Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-30T09:24:22.510265Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-30T09:24:22.510587Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:22.510606Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:22.521030Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:24:22.521094Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:22.521125Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-30T09:24:22.521135Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:22.665543Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:696:2575], serverId# [1:699:2578], sessionId# [0:0:0] 2025-06-30T09:24:22.665601Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2576], serverId# [1:700:2579], sessionId# [0:0:0] 2025-06-30T09:24:22.666655Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... ace = 0 at datashard 72075186224037888 2025-06-30T09:24:27.412879Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:27.412886Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:27.412894Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:24:27.412914Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:27.412926Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:27.412937Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:27.413281Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:27.413322Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:24:27.413332Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:27.417546Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:27.417591Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-30T09:24:27.417603Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-30T09:24:27.417608Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-30T09:24:27.417822Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:27.443074Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:24:27.498586Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:27.617774Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:24:27.617817Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:27.617897Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:27.617910Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:24:27.617922Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-30T09:24:27.618000Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-30T09:24:27.618048Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:24:27.618151Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:27.618371Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:27.655204Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-30T09:24:27.655244Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:27.655254Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:27.655268Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:27.655294Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:27.655311Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-30T09:24:27.655329Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:27.655974Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-30T09:24:27.655990Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:27.660656Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:842:2679], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.660687Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:853:2684], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.660702Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.661914Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:27.668182Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:27.842326Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:27.843306Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:856:2687], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:24:27.879497Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:912:2724] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:27.895055Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02dwwc10y12e9xbk68639a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OWFiNGZlOGQtNzRjYThkNjktN2QxMzcwNzEtZGJhM2QxNWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:27.895790Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:943:2741], serverId# [4:944:2742], sessionId# [0:0:0] 2025-06-30T09:24:27.895976Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:3] at 72075186224037888 2025-06-30T09:24:27.896073Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1751275467896030 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-30T09:24:27.896114Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-30T09:24:27.907154Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-30T09:24:27.907195Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:27.927761Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02dx442xbj0cxyv2km25qe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZGQ2NzZjMDMtNzRjYWIzZDEtNWVlZmRkNTAtNDZlY2E5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:27.928736Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:4] at 72075186224037888 2025-06-30T09:24:27.928856Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1751275467928812 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-30T09:24:27.928901Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-30T09:24:27.940124Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-30T09:24:27.940164Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:27.940980Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:971:2760], serverId# [4:972:2761], sessionId# [0:0:0] 2025-06-30T09:24:27.942273Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:973:2762], serverId# [4:974:2763], sessionId# [0:0:0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[delete] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004c21/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk17/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.delete/audit.txt 2025-06-30T09:24:23.102651Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:24:23.102616Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-06-30T09:24:23.069120Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapRange [GOOD] Test command err: Trying to start YDB, gRPC: 13832, MsgBus: 17955 2025-06-30T09:24:26.790686Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670853418297219:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:26.790713Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00351b/r3tmp/tmpNYXlkR/pdisk_1.dat 2025-06-30T09:24:26.848428Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670853418297197:2080] 1751275466790525 != 1751275466790528 2025-06-30T09:24:26.851147Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13832, node 1 2025-06-30T09:24:26.864736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:26.864754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:26.864756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:26.864805Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17955 2025-06-30T09:24:26.893980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:26.894019Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:26.895305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17955 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:26.924524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:26.939609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:26.957834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:26.980026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:26.991983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:27.172236Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670857713266090:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.172267Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.222262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.235446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.246450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.279504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.294455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.309191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.322993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.340402Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670857713266742:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.340434Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.340463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670857713266747:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.341317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:27.348270Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670857713266749:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:24:27.422194Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670857713266800:3399] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:27.664483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:24:27.697089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7521670857713267154:2469];tablet_id=72075186224037925;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:24:27.697159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7521670857713267154:2469];tablet_id=72075186224037925;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:24:27.697221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: ta ... 2;id=CopyBlobIdsToV2; 2025-06-30T09:24:27.739368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:24:27.739377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:24:27.739505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7521670857713267165:2471];ev=NActors::IEventHandle;tablet_id=72075186224037931;tx_id=281474976710672;this=18025847892320;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1751275467739;max=18446744073709551615;plan=0;src=[1:7521670853418297511:2136];cookie=442:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.739617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7521670857713267155:2470];ev=NActors::IEventHandle;tablet_id=72075186224037929;tx_id=281474976710672;this=18025847893280;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1751275467739;max=18446744073709551615;plan=0;src=[1:7521670853418297511:2136];cookie=422:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.739723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[1:7521670857713267172:2475];ev=NActors::IEventHandle;tablet_id=72075186224037923;tx_id=281474976710672;this=18025847894560;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1751275467739;max=18446744073709551615;plan=0;src=[1:7521670853418297511:2136];cookie=362:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.739812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7521670857713267171:2474];ev=NActors::IEventHandle;tablet_id=72075186224037930;tx_id=281474976710672;this=18025847895520;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1751275467739;max=18446744073709551615;plan=0;src=[1:7521670853418297511:2136];cookie=432:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.739955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7521670857713267154:2469];ev=NActors::IEventHandle;tablet_id=72075186224037925;tx_id=281474976710672;this=18025847896640;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1751275467739;max=18446744073709551615;plan=0;src=[1:7521670853418297511:2136];cookie=382:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.740970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7521670857713267167:2473];ev=NActors::IEventHandle;tablet_id=72075186224037922;tx_id=281474976710672;this=18025847897760;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1751275467740;max=18446744073709551615;plan=0;src=[1:7521670853418297511:2136];cookie=352:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.741263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;self_id=[1:7521670857713267177:2476];ev=NActors::IEventHandle;tablet_id=72075186224037928;tx_id=281474976710672;this=18025847895520;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1751275467741;max=18446744073709551615;plan=0;src=[1:7521670853418297511:2136];cookie=412:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.741592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7521670857713267178:2477];ev=NActors::IEventHandle;tablet_id=72075186224037927;tx_id=281474976710672;this=18025847802048;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1751275467737;max=18446744073709551615;plan=0;src=[1:7521670853418297511:2136];cookie=402:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.742057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[1:7521670857713267261:2478];ev=NActors::IEventHandle;tablet_id=72075186224037926;tx_id=281474976710672;this=18025847893280;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1751275467741;max=18446744073709551615;plan=0;src=[1:7521670853418297511:2136];cookie=392:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.745439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.748092Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-30T09:24:27.748252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.749337Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-30T09:24:27.749496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.750499Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-30T09:24:27.750624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.751906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.752931Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-30T09:24:27.753068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.754352Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-30T09:24:27.754488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.756003Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-30T09:24:27.756214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.757187Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-30T09:24:27.757313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.758292Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-30T09:24:27.758428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:24:27.760039Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-30T09:24:27.762767Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-30T09:24:27.791805Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:1914: ActorId: [1:7521670857713267553:2460] TxId: 281474976710673. Ctx: { TraceId: 01jz02dwzq3dmx4k36xadt7y02, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTQ1YTc1NzgtZmJjMzI1Y2EtYzU3Njg0YmEtYmRlYTVlOTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-30T09:24:27.795098Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:27.824696Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-30T09:24:27.824837Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-30T09:24:27.825203Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7521670857713267171:2474];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037930;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037926;receive=72075186224037931; 2025-06-30T09:24:27.825272Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-30T09:24:27.884690Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:1914: ActorId: [1:7521670857713267634:2460] TxId: 281474976710675. Ctx: { TraceId: 01jz02dx1q9bamb7c9pjztbwzh, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTQ1YTc1NzgtZmJjMzI1Y2EtYzU3Njg0YmEtYmRlYTVlOTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::Run3CyclesForAllSupportedObjects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:19.792324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:19.792360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.792367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:19.792374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:19.792389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:19.792394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:19.792405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.792422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:19.792563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:19.792652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:19.835018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:19.835048Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:19.843231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:19.843318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:19.843361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:19.887108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:19.887197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:19.887327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.887388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:19.903154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.903233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:19.903609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.903625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.903656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:19.903666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:19.903673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:19.903697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.919274Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:19.950278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:19.950361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.950430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:19.950438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:19.950488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:19.950501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:19.955744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.955803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:19.955868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.955881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:19.955887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:19.955896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:19.959221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.959251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:19.959260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:19.959880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.959895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.959902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.959911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:19.960701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:19.961274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:19.961317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:19.961534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.961565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:19.961573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.961656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:19.961666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.961719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:19.961734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:19.962242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.962253Z node 1 :FLAT_TX_SCHEMESHARD ... 657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-30T09:24:27.434939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-30T09:24:27.434948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-30T09:24:27.511496Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:27.511537Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:27.511620Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:464:2414], Recipient [1:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:27.511629Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:27.511682Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [1:803:2687], Recipient [1:464:2414]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409546 2025-06-30T09:24:27.511689Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:24:27.511725Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:24:27.511813Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 3 took 68us result status StatusSuccess 2025-06-30T09:24:27.512005Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:24:27.644186Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:964:2820]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:27.644233Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:27.644263Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:964:2820], Recipient [1:964:2820]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:27.644271Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:27.644336Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [1:1388:3170], Recipient [1:964:2820]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409553 2025-06-30T09:24:27.644342Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:24:27.644379Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-30T09:24:27.644465Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409553 describe pathId 3 took 64us result status StatusSuccess 2025-06-30T09:24:27.644662Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409553 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 108 CreateStep: 350 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409559 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409558 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409559 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 SchemeShard: 72075186233409553 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-30T09:24:28.044176Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.044220Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.044245Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [1:295:2277], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.044251Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.085454Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:28.085497Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:28.085508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-30T09:24:28.085629Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-06-30T09:24:28.085638Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:28.085643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:28.085698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:28.085705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-30T09:24:28.085717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.918000s, Timestamp# 1970-01-01T00:00:11.127000Z 2025-06-30T09:24:28.085724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-06-30T09:24:28.091383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-30T09:24:28.091669Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:4104:5377], Recipient [1:295:2277]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:28.091685Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:28.091692Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:24:28.091739Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:295:2277]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-30T09:24:28.091746Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5160: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-30T09:24:28.091752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7845: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn [GOOD] Test command err: 2025-06-30T09:24:22.748092Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:24:22.748167Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:24:22.748179Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003324/r3tmp/tmpMjtwqN/pdisk_1.dat 2025-06-30T09:24:22.839876Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:24:22.840936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:22.859483Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:22.860897Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275462367848 != 1751275462367852 2025-06-30T09:24:22.904551Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:22.904600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:22.915439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:22.989490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:23.010880Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:641:2542] 2025-06-30T09:24:23.010996Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:23.021933Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:23.022002Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:23.022234Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:24:23.022246Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:24:23.022256Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:24:23.022338Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:23.022403Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:23.022417Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:667:2542] in generation 1 2025-06-30T09:24:23.022846Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:644:2544] 2025-06-30T09:24:23.022895Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:23.024487Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:23.024531Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:23.024718Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-30T09:24:23.024728Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-30T09:24:23.024736Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-30T09:24:23.024790Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:23.024810Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:23.024823Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:675:2544] in generation 1 2025-06-30T09:24:23.039035Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:23.044167Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:24:23.044284Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:23.044314Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:678:2563] 2025-06-30T09:24:23.044320Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:23.044326Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:24:23.044333Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.044467Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:23.044477Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-30T09:24:23.044490Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:23.044498Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:679:2564] 2025-06-30T09:24:23.044502Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:24:23.044505Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-30T09:24:23.044509Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:23.044624Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:24:23.044657Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:24:23.044689Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:23.044697Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:23.044709Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:24:23.044715Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:23.044721Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-30T09:24:23.044731Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-30T09:24:23.044765Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:634:2538], serverId# [1:652:2548], sessionId# [0:0:0] 2025-06-30T09:24:23.044773Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:24:23.044777Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:23.044782Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-30T09:24:23.044788Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:24:23.044820Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:23.044892Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:24:23.044934Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:24:23.045078Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:635:2539], serverId# [1:660:2554], sessionId# [0:0:0] 2025-06-30T09:24:23.045243Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-30T09:24:23.045284Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-30T09:24:23.045297Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-30T09:24:23.045724Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:23.045744Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:23.059069Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:24:23.059131Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:23.059161Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-30T09:24:23.059170Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:23.203502Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:696:2575], serverId# [1:699:2578], sessionId# [0:0:0] 2025-06-30T09:24:23.203557Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2576], serverId# [1:700:2579], sessionId# [0:0:0] 2025-06-30T09:24:23.204574Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... ] schema version# 1 2025-06-30T09:24:27.474668Z node 4 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:24:27.474781Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:27.475084Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1000} 2025-06-30T09:24:27.475106Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:24:27.475805Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:24:27.475828Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:27.475848Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:24:27.475859Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:24:27.475865Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-06-30T09:24:27.475887Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:27.475900Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:27.475919Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:27.476140Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:24:27.476152Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:27.476416Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:27.476429Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:27.476435Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:24:27.476450Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:27.476460Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:27.476474Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:27.476867Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:27.476893Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:27.477541Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:27.480969Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-30T09:24:27.481007Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-30T09:24:27.481358Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:27.481455Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:24:27.481465Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:27.485780Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:744:2615], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.485812Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:755:2620], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.485824Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:27.487433Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:27.488816Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:27.488855Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:27.537273Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:27.646536Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:27.646593Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:27.647261Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:758:2623], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:24:27.684283Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:829:2663] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:27.724412Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz02dwpxcfkdg0b8pnf9r11d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NTU0MzU3MjEtOGZjOTMxMWMtODRlZTBhNzgtZjE0ZjA4NWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:27.725106Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:898:2694], serverId# [4:899:2695], sessionId# [0:0:0] 2025-06-30T09:24:27.725264Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:2] at 72075186224037889 2025-06-30T09:24:27.725363Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1751275467725318 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-30T09:24:27.725404Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-30T09:24:27.738892Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-30T09:24:27.738928Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:27.758193Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02dwyw5x4jyvhcn9dr5xe6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=N2EyMmQwNWItYzA2NjgyODItZDBkODMwOGEtYTA4MDkzOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:27.764017Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:3] at 72075186224037889 2025-06-30T09:24:27.764150Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1751275467764106 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-30T09:24:27.764197Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1751275467764106 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-30T09:24:27.764223Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:3] at 72075186224037889, row count=1 2025-06-30T09:24:27.774924Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-30T09:24:27.774963Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:27.778484Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:939:2726], serverId# [4:940:2727], sessionId# [0:0:0] 2025-06-30T09:24:27.792162Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:941:2728], serverId# [4:942:2729], sessionId# [0:0:0] >> KqpNotNullColumns::ReplaceNotNull [GOOD] >> KqpNotNullColumns::JoinLeftTableWithNotNullPk+StreamLookup >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow [GOOD] >> KqpSqlIn::CantRewrite ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureWithCopyTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:19.351377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:19.351409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.351416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:19.351423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:19.351443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:19.351448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:19.351462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.351482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:19.351643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:19.351749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:19.371295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:19.371321Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:19.375470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:19.375533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:19.375588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:19.377293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:19.377373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:19.377495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.377552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:19.378140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.378182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:19.378468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.378478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.378502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:19.378509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:19.378514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:19.378530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.380067Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:19.418208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:19.418311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.418400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:19.418412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:19.418474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:19.418494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:19.423533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.423621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:19.423712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.423732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:19.423740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:19.423748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:19.427380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.427432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:19.427444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:19.431324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.431362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.431374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.431388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:19.432376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:19.437713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:19.437814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:19.438160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.438225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:19.438238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.438375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:19.438395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.438456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:19.438477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:19.443747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.443778Z node 1 :FLAT_TX_SCHEMESHARD ... d(TabletID)=72075186233409552 maps to shardIdx: 72075186233409546:7 followerId=0, pathId: [OwnerId: 72075186233409546, LocalPathId: 3], pathId map=SimpleCopy, is column=0, is olap=0, RowCount 50, DataSize 5121950 2025-06-30T09:24:27.977971Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409552, followerId 0 2025-06-30T09:24:27.977979Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72075186233409546:7 with partCount# 1, rowCount# 50, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:50.000000Z at schemeshard 72075186233409546 2025-06-30T09:24:27.977986Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409552 2025-06-30T09:24:27.978010Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72075186233409546 2025-06-30T09:24:27.988373Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-30T09:24:27.988413Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5137: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-30T09:24:27.988420Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72075186233409546, queue size# 0 2025-06-30T09:24:28.014945Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.014984Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.015010Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:190:2181], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.015016Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.026909Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.026943Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.026968Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:284:2242], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.026973Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.062665Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.062703Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.062753Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:190:2181], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.062759Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.073346Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.073382Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.073412Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:284:2242], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.073418Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.109319Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.109355Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.109389Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:190:2181], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.109395Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.119952Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.119992Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.120017Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:284:2242], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.120023Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.157612Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.157645Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.157686Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:190:2181], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.157691Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.167943Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.167980Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.168004Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:284:2242], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.168009Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.199095Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.199133Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:28.199160Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:190:2181], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.199168Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:28.209406Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:28.209445Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:28.209455Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-30T09:24:28.209550Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [2:188:2180], Recipient [2:190:2181]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-30T09:24:28.209559Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:28.209565Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:28.209595Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:28.209602Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-30T09:24:28.209618Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 14.999500s, Timestamp# 1970-01-01T00:01:25.000500Z 2025-06-30T09:24:28.209628Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 35 s 2025-06-30T09:24:28.209925Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-30T09:24:28.210660Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [2:1739:3444], Recipient [2:190:2181]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:28.210675Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:28.210682Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:24:28.210723Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125519, Sender [2:173:2172], Recipient [2:190:2181]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-30T09:24:28.210729Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5160: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-30T09:24:28.210758Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7845: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> AsyncIndexChangeCollector::CoverIndexedColumn [GOOD] >> KqpNewEngine::BatchUpload [GOOD] >> KqpNewEngine::Aggregate ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[insert] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004c1c/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk18/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.insert/audit.txt 2025-06-30T09:24:23.726337Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:24:23.726319Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-06-30T09:24:23.709169Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> KqpNewEngine::LocksSingleShard [GOOD] >> KqpNewEngine::LocksMultiShardOk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow [GOOD] Test command err: 2025-06-30T09:24:23.398840Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:24:23.398948Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:24:23.398969Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003305/r3tmp/tmpRBhlbZ/pdisk_1.dat 2025-06-30T09:24:23.508479Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:24:23.509528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:23.530577Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:23.532606Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275462880002 != 1751275462880006 2025-06-30T09:24:23.575069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:23.575150Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:23.585804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:23.663336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:23.686116Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:641:2542] 2025-06-30T09:24:23.686201Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:23.698088Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:23.698149Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:23.698358Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:24:23.698368Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:24:23.698376Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:24:23.698449Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:23.698499Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:23.698511Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:667:2542] in generation 1 2025-06-30T09:24:23.698910Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:644:2544] 2025-06-30T09:24:23.698954Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:23.700614Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:23.700653Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:23.700828Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-30T09:24:23.700840Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-30T09:24:23.700848Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-30T09:24:23.700897Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:23.700918Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:23.700931Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:675:2544] in generation 1 2025-06-30T09:24:23.711250Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:23.715887Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:24:23.715987Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:23.716016Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:678:2563] 2025-06-30T09:24:23.716021Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:23.716024Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:24:23.716029Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.716131Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:23.716140Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-30T09:24:23.716152Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:23.716161Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:679:2564] 2025-06-30T09:24:23.716166Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:24:23.716171Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-30T09:24:23.716176Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:23.716260Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:24:23.716281Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:24:23.716308Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:23.716316Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:23.716327Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:24:23.716334Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:23.716342Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-30T09:24:23.716351Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-30T09:24:23.716383Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:634:2538], serverId# [1:652:2548], sessionId# [0:0:0] 2025-06-30T09:24:23.716390Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:24:23.716394Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:23.716398Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-30T09:24:23.716403Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:24:23.716432Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:23.716491Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:24:23.716526Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:24:23.716666Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:635:2539], serverId# [1:660:2554], sessionId# [0:0:0] 2025-06-30T09:24:23.716817Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-30T09:24:23.716856Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-30T09:24:23.716870Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-30T09:24:23.717187Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:23.717200Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:23.727566Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:24:23.727619Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:23.727646Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-30T09:24:23.727656Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:23.871534Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:696:2575], serverId# [1:699:2578], sessionId# [0:0:0] 2025-06-30T09:24:23.871587Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2576], serverId# [1:700:2579], sessionId# [0:0:0] 2025-06-30T09:24:23.872528Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... ARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:28.403872Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:24:28.403883Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:28.403920Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037889 time 0 2025-06-30T09:24:28.403926Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:28.404342Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:28.404366Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:24:28.404378Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:24:28.404453Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:24:28.404497Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:24:28.404715Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:28.404738Z node 4 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 4] schema version# 1 2025-06-30T09:24:28.404839Z node 4 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:24:28.404942Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:28.405231Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1000} 2025-06-30T09:24:28.405249Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:24:28.405884Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:24:28.405908Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:28.405927Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:24:28.405938Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:24:28.405945Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-06-30T09:24:28.405969Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:28.405984Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:28.406004Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:28.406237Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:24:28.406248Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:28.406509Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:28.406520Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:28.406526Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:24:28.406541Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:28.406551Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:28.406564Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:28.407059Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:28.407087Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:28.407695Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:28.407750Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-30T09:24:28.407761Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-30T09:24:28.407926Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:28.408004Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:24:28.408013Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:28.412831Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:744:2615], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.412866Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:755:2620], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.412880Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.414285Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:28.415835Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:28.415874Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:28.458143Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:28.563555Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:28.563612Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:28.564295Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:758:2623], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:24:28.596483Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:829:2663] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:28.610666Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz02dxkwdk3pm9k8fbe3h2ce, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MTRkOWIxZGEtODEzZjUyNmQtYjI0ZmZkYTgtYzY3YWZjNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:28.611506Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:898:2694], serverId# [4:899:2695], sessionId# [0:0:0] 2025-06-30T09:24:28.611683Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:2] at 72075186224037889 2025-06-30T09:24:28.611784Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1751275468611744 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-30T09:24:28.611825Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-30T09:24:28.622368Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-30T09:24:28.622417Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:28.626545Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:905:2700], serverId# [4:906:2701], sessionId# [0:0:0] 2025-06-30T09:24:28.627924Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:907:2702], serverId# [4:908:2703], sessionId# [0:0:0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::CoverIndexedColumn [GOOD] Test command err: 2025-06-30T09:24:23.569070Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:24:23.569166Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:24:23.569183Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0032f4/r3tmp/tmpj07YVp/pdisk_1.dat 2025-06-30T09:24:23.676219Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:24:23.677159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:23.694887Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:23.696049Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275463089166 != 1751275463089170 2025-06-30T09:24:23.738264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:23.738312Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:23.749047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:23.822403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:23.840684Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:641:2542] 2025-06-30T09:24:23.840772Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:23.848637Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:23.848677Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:23.848801Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:24:23.848808Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:24:23.848813Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:24:23.848860Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:23.848896Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:23.848904Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:667:2542] in generation 1 2025-06-30T09:24:23.849185Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:644:2544] 2025-06-30T09:24:23.849216Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:23.850333Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:23.850361Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:23.850446Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-30T09:24:23.850452Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-30T09:24:23.850457Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-30T09:24:23.850479Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:23.850491Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:23.850498Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:675:2544] in generation 1 2025-06-30T09:24:23.860783Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:23.864543Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:24:23.864637Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:23.864663Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:678:2563] 2025-06-30T09:24:23.864667Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:23.864672Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:24:23.864676Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.864773Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:23.864780Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-30T09:24:23.864789Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:23.864796Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:679:2564] 2025-06-30T09:24:23.864800Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:24:23.864803Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-30T09:24:23.864807Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:23.864901Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:24:23.864925Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:24:23.864948Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:23.864954Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:23.864963Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:24:23.864966Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:23.864970Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-30T09:24:23.864977Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-30T09:24:23.864999Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:634:2538], serverId# [1:652:2548], sessionId# [0:0:0] 2025-06-30T09:24:23.865003Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:24:23.865006Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:23.865008Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-30T09:24:23.865011Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:24:23.865033Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:23.865084Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:24:23.865114Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:24:23.865233Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:635:2539], serverId# [1:660:2554], sessionId# [0:0:0] 2025-06-30T09:24:23.865389Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-30T09:24:23.865426Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-30T09:24:23.865440Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-30T09:24:23.865784Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:23.865800Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:23.876242Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:24:23.876311Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:23.876339Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-30T09:24:23.876347Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:24.020367Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:696:2575], serverId# [1:699:2578], sessionId# [0:0:0] 2025-06-30T09:24:24.020418Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2576], serverId# [1:700:2579], sessionId# [0:0:0] 2025-06-30T09:24:24.021508Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... ] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:28.542536Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:24:28.542546Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:24:28.542551Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-06-30T09:24:28.542571Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:28.542582Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:28.542595Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:28.542913Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037890 time 0 2025-06-30T09:24:28.542926Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:24:28.543196Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:28.543207Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:28.543211Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:24:28.543222Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:28.543228Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:28.543236Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:28.543255Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 1000} 2025-06-30T09:24:28.543263Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-30T09:24:28.543381Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:28.543395Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:28.543403Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-30T09:24:28.543572Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-30T09:24:28.543577Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-30T09:24:28.543581Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037890 2025-06-30T09:24:28.543590Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037890 at tablet 72075186224037890 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:28.543595Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:28.543601Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:24:28.544172Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:28.544195Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-30T09:24:28.544200Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-30T09:24:28.544289Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:28.544315Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:28.544345Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:24:28.544349Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:28.544411Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037890 state Ready 2025-06-30T09:24:28.544415Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-06-30T09:24:28.547210Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:794:2652], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.547239Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:804:2657], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.547315Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.548052Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:28.549069Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:28.549092Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:28.549102Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-30T09:24:28.591029Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:28.695386Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:28.695432Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:28.695449Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-30T09:24:28.696167Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:808:2660], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:24:28.728143Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:880:2701] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:28.743593Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz02dxr20zhj8aspfcvbd1we, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OTlhZTMwNmEtNjIzYmIyMmItMTRlN2VjNzYtNGJkNTdkOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:28.744446Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:985:2744], serverId# [4:986:2745], sessionId# [0:0:0] 2025-06-30T09:24:28.744606Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:2] at 72075186224037889 2025-06-30T09:24:28.744707Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1751275468744668 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 38b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-30T09:24:28.744770Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1751275468744668 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-30T09:24:28.744793Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-30T09:24:28.755319Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 38 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-30T09:24:28.755360Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:28.760317Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:992:2750], serverId# [4:993:2751], sessionId# [0:0:0] 2025-06-30T09:24:28.761744Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:994:2752], serverId# [4:995:2753], sessionId# [0:0:0] >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow [GOOD] >> KqpNewEngine::PkSelect1 [GOOD] >> KqpNewEngine::PkSelect2 |83.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |83.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |83.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export >> CdcStreamChangeCollector::OldImage [GOOD] >> CdcStreamChangeCollector::SchemaChanges >> TestDataErasure::DataErasureWithMerge [GOOD] >> CdcStreamChangeCollector::UpsertModifyDelete [GOOD] >> KqpRanges::WhereInSubquery >> CdcStreamChangeCollector::NewImage [GOOD] >> KqpRanges::IsNullInValue ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureWithMerge [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:68:2058] recipient: [1:61:2102] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:68:2058] recipient: [1:61:2102] Leader for TabletID 72057594046678944 is [1:72:2106] sender: [1:76:2058] recipient: [1:61:2102] 2025-06-30T09:24:19.003014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:19.003049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.003055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:19.003062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:19.003080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:19.003085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:19.003097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.003121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:19.003284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:19.003397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:19.018450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:19.018481Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:19.021153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:19.021216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:19.021259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:19.021619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:19.021658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:19.021806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.021866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:19.022060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.022099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:19.022406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.022423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.022443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:19.022451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:19.022458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:19.022511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.023121Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:72:2106] sender: [1:152:2058] recipient: [1:16:2063] 2025-06-30T09:24:19.045261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:19.045355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.045427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:19.045436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:19.045494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:19.045513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:19.045794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.045852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:19.045908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.045919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:19.045924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:19.045930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:19.046021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.046028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:19.046037Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:19.046113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.046119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.046125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.046132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:19.046820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:19.046953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:19.046999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:19.047275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.047308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 77 RawX2: 4294969406 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:19.047317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.047426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:19.047440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.047478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:19.047502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:19.047661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.047668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: s ... 0: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.145124Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.145164Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:190:2181], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.145172Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.155461Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.155508Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.155540Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:284:2242], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.155547Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.191110Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.191158Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.191189Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:190:2181], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.191196Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.202767Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.202825Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.202859Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:284:2242], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.202866Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.238996Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.239040Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.239070Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:190:2181], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.239077Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.250944Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.250990Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.251022Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:284:2242], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.251028Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.289814Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.289859Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.289954Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:190:2181], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.289962Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.303169Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269553162, Sender [2:1209:3021], Recipient [2:284:2242]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409551 TableLocalId: 2 Generation: 2 Round: 1 TableStats { DataSize: 10141461 RowCount: 99 IndexSize: 4463 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 50 HasLoanedParts: false Channels { Channel: 1 DataSize: 10141461 IndexSize: 4463 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 1054 Memory: 90173 Storage: 10149823 } ShardState: 2 UserTablePartOwners: 72075186233409551 NodeId: 2 StartTime: 50000 TableOwnerId: 72075186233409546 IsDstSplit: true FollowerId: 0 2025-06-30T09:24:29.303210Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4997: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-30T09:24:29.303234Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72075186233409546 from shard 72075186233409551 followerId 0 pathId [OwnerId: 72075186233409546, LocalPathId: 2] state 'Ready' dataSize 10141461 rowCount 99 cpuUsage 0.1054 2025-06-30T09:24:29.303261Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72075186233409546 from shard 72075186233409551 followerId 0 pathId [OwnerId: 72075186233409546, LocalPathId: 2] raw table stats: DataSize: 10141461 RowCount: 99 IndexSize: 4463 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 50 HasLoanedParts: false Channels { Channel: 1 DataSize: 10141461 IndexSize: 4463 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-30T09:24:29.303272Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-30T09:24:29.314957Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.315003Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.315033Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:284:2242], Recipient [2:284:2242]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.315040Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.325318Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [2:190:2181]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:29.325364Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:29.325376Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-30T09:24:29.325471Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [2:188:2180], Recipient [2:190:2181]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-30T09:24:29.325480Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:29.325486Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:29.325518Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:29.325526Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-30T09:24:29.325542Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 29.948500s, Timestamp# 1970-01-01T00:01:10.051500Z 2025-06-30T09:24:29.325550Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 20 s 2025-06-30T09:24:29.325838Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-30T09:24:29.326693Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [2:1527:3279], Recipient [2:190:2181]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:29.326711Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:29.326717Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:24:29.326784Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125519, Sender [2:173:2172], Recipient [2:190:2181]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-30T09:24:29.326792Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5160: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-30T09:24:29.326798Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7845: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow [GOOD] Test command err: 2025-06-30T09:24:24.036016Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:24:24.036087Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:24:24.036100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0032e4/r3tmp/tmp1Eggvd/pdisk_1.dat 2025-06-30T09:24:24.136761Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:24:24.137831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:24.164540Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:24.165908Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275463675451 != 1751275463675455 2025-06-30T09:24:24.211175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:24.211221Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:24.222094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:24.309428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:24.334410Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:641:2542] 2025-06-30T09:24:24.334513Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:24.346630Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:24.346701Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:24.347108Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:24:24.347125Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:24:24.347134Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:24:24.347226Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:24.347310Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:24.347323Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:667:2542] in generation 1 2025-06-30T09:24:24.347764Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:644:2544] 2025-06-30T09:24:24.347814Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:24.349479Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:24.349519Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:24.349719Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-30T09:24:24.349728Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-30T09:24:24.349735Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-30T09:24:24.349786Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:24.349805Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:24.349818Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:675:2544] in generation 1 2025-06-30T09:24:24.361304Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:24.367287Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:24:24.367400Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:24.367432Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:678:2563] 2025-06-30T09:24:24.367439Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:24.367445Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:24:24.367452Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:24.367571Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:24.367579Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-30T09:24:24.367592Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:24.367600Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:679:2564] 2025-06-30T09:24:24.367604Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-30T09:24:24.367608Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-30T09:24:24.367611Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:24.367739Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:24:24.367772Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:24:24.367808Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:24.367816Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:24.367828Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:24:24.367834Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:24.367841Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-30T09:24:24.367851Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-30T09:24:24.367882Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:634:2538], serverId# [1:652:2548], sessionId# [0:0:0] 2025-06-30T09:24:24.367888Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-30T09:24:24.367893Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:24.367897Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-30T09:24:24.367902Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-30T09:24:24.367946Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:24.368024Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:24:24.368059Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:24:24.368189Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:635:2539], serverId# [1:660:2554], sessionId# [0:0:0] 2025-06-30T09:24:24.368324Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-30T09:24:24.368353Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-30T09:24:24.368366Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-30T09:24:24.368732Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:24.368750Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:24.379140Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:24:24.379196Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:24.379221Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-30T09:24:24.379231Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:24.563813Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:696:2575], serverId# [1:699:2578], sessionId# [0:0:0] 2025-06-30T09:24:24.563892Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2576], serverId# [1:700:2579], sessionId# [0:0:0] 2025-06-30T09:24:24.565006Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... rId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-30T09:24:28.910378Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-30T09:24:28.910384Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-30T09:24:28.910387Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037890 2025-06-30T09:24:28.910395Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037890 at tablet 72075186224037890 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:28.910400Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:28.910406Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-30T09:24:28.911071Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:28.911106Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-30T09:24:28.911115Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-30T09:24:28.911243Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:28.911272Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:28.911293Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:24:28.911297Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:28.911384Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037890 state Ready 2025-06-30T09:24:28.911392Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-06-30T09:24:28.914448Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:794:2652], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.914474Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:804:2657], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.914568Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.915474Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:28.916748Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:28.916782Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:28.916797Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-30T09:24:28.959377Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:29.073331Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:29.073393Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-30T09:24:29.073414Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-30T09:24:29.074286Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:808:2660], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:24:29.111187Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:880:2701] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:29.139903Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jz02dy3j97n3kkjt4ybh34fv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MzlhMzFmMWQtOWY5NzcyNGItNWQ5ZTFjMzQtYzY1OTQzMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:29.140830Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:985:2744], serverId# [4:986:2745], sessionId# [0:0:0] 2025-06-30T09:24:29.140993Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:2] at 72075186224037889 2025-06-30T09:24:29.141104Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1751275469141060 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-30T09:24:29.141140Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1751275469141060 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-30T09:24:29.141163Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-30T09:24:29.154258Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-30T09:24:29.154287Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:29.178492Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02dyb33mgw2y69gahxktqm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MzQ2YzRjMzAtNjEzNTAwZjItZWZjMzg3MDQtNTQ3MTM2OTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:29.179551Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:3] at 72075186224037889 2025-06-30T09:24:29.179691Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1751275469179644 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-30T09:24:29.179732Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 4 Group: 1751275469179644 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-30T09:24:29.179748Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 5 Group: 1751275469179644 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-30T09:24:29.179762Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 6 Group: 1751275469179644 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 24b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-30T09:24:29.179787Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:3] at 72075186224037889, row count=1 2025-06-30T09:24:29.191285Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 5 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 24 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-30T09:24:29.191332Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-30T09:24:29.195632Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1034:2784], serverId# [4:1035:2785], sessionId# [0:0:0] 2025-06-30T09:24:29.197267Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1036:2786], serverId# [4:1037:2787], sessionId# [0:0:0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::UpsertModifyDelete [GOOD] Test command err: 2025-06-30T09:24:23.131695Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:24:23.131787Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:24:23.131801Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003309/r3tmp/tmpaGmoxx/pdisk_1.dat 2025-06-30T09:24:23.237328Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:24:23.238316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:23.260195Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:23.261566Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-30T09:24:23.261829Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275462653320 != 1751275462653324 2025-06-30T09:24:23.304493Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:23.304545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:23.315319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:23.391746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:23.412714Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:24:23.412806Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:23.422167Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:23.422218Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:23.422375Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:24:23.422381Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:24:23.422387Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:24:23.422453Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:23.422467Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:23.422480Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:24:23.432872Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:23.438077Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:24:23.438185Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:23.438221Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:24:23.438227Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:23.438232Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:24:23.438236Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.438460Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:24:23.438493Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:24:23.438507Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:23.438515Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:23.438528Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:24:23.438534Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:23.438559Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:24:23.438730Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:23.438890Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:24:23.438919Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:24:23.439351Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:23.449784Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:24:23.449848Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:23.599734Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:24:23.601435Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:24:23.601466Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.601777Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:23.601791Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:24:23.601802Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:24:23.601875Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:24:23.601921Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:24:23.602067Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:23.602083Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:24:23.602603Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:24:23.602712Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:23.603101Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:24:23.603111Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.603243Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:24:23.603368Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:23.603820Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:23.603833Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:23.603841Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:24:23.603861Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:23.603874Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:23.603910Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.604886Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:23.605289Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:24:23.605304Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:23.605489Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:23.610878Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:23.610929Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-30T09:24:2 ... 7888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:28.676658Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:28.676727Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-30T09:24:28.676746Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-30T09:24:28.676753Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-30T09:24:28.676986Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:28.698276Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:24:28.759283Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:28.886775Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:24:28.886818Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:28.886879Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:28.886892Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:24:28.886903Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-30T09:24:28.886968Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-30T09:24:28.887010Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:24:28.887088Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:28.887281Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:28.928609Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-30T09:24:28.928652Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:28.928664Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:28.928679Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:28.928706Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:28.928725Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-30T09:24:28.928742Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:28.929365Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-30T09:24:28.929385Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:28.934193Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:842:2679], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.934229Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:853:2684], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.934244Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.935481Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:28.936962Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:29.117105Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:29.117753Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:856:2687], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:24:29.154843Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:912:2724] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:29.171754Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02dy458kywkgdv9am7kx4k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NjliZmY0ZGItNGNiNmI3ZmQtYzMxY2U0M2EtNjJlNDc1MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:29.172479Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:943:2741], serverId# [4:944:2742], sessionId# [0:0:0] 2025-06-30T09:24:29.172631Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:3] at 72075186224037888 2025-06-30T09:24:29.172728Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1751275469172686 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-30T09:24:29.172765Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-30T09:24:29.186139Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-30T09:24:29.186173Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:29.202906Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02dyc39yve208yzn1neqn9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ODljNGJlYjMtOThjN2M1MmEtZWE4YTY0YWYtZmNhOTY4NGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:29.203674Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:4] at 72075186224037888 2025-06-30T09:24:29.203784Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1751275469203745 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 50b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-30T09:24:29.203826Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-30T09:24:29.223606Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 50 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-30T09:24:29.223630Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:29.254094Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jz02dyd836d9azndn7vfxnx9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MjQzNTIyMmMtNjc5N2VlOWUtZTc0ODVmNGMtMzY3OGIxNWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:29.259306Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:5] at 72075186224037888 2025-06-30T09:24:29.259435Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1751275469259391 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-30T09:24:29.259477Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:5] at 72075186224037888, row count=1 2025-06-30T09:24:29.271042Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-30T09:24:29.271070Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:29.271720Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:990:2771], serverId# [4:991:2772], sessionId# [0:0:0] 2025-06-30T09:24:29.273165Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:992:2773], serverId# [4:993:2774], sessionId# [0:0:0] >> KqpNotNullColumns::ReplaceNotNullPk >> KqpNamedExpressions::NamedExpressionRandomInsert+UseSink >> TestDataErasure::ManualLaunch3CyclesWithNotConsistentCountersInSchemeShardAndBSC [GOOD] >> KqpNotNullColumns::JoinLeftTableWithNotNullPk+StreamLookup [GOOD] >> KqpNotNullColumns::JoinLeftTableWithNotNullPk-StreamLookup >> KqpAgg::AggWithLookup ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::NewImage [GOOD] Test command err: 2025-06-30T09:24:23.117511Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:24:23.117641Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:24:23.117665Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00331d/r3tmp/tmp0qTYX8/pdisk_1.dat 2025-06-30T09:24:23.233707Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:24:23.234719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:23.253164Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:23.254394Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-30T09:24:23.254567Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275462570820 != 1751275462570824 2025-06-30T09:24:23.300699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:23.300737Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:23.311368Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:23.384420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:23.402117Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:24:23.402188Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:23.412013Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:23.412051Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:23.412202Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:24:23.412211Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:24:23.412217Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:24:23.412272Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:23.412288Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:23.412299Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:24:23.422623Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:23.428097Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:24:23.428198Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:23.428232Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:24:23.428239Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:23.428245Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:24:23.428252Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.428437Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:24:23.428466Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:24:23.428482Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:23.428490Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:23.428500Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:24:23.428506Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:23.428529Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:24:23.428663Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:23.428726Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:24:23.428746Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:24:23.429137Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:23.440114Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:24:23.440165Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:23.584888Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:24:23.585696Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:24:23.585715Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.585920Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:23.585929Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:24:23.585939Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:24:23.585994Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:24:23.586021Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:24:23.586126Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:23.586152Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:24:23.586507Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:24:23.586593Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:23.586894Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:24:23.586902Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.587021Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:24:23.587036Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:23.587288Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:23.587297Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:23.587304Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:24:23.587318Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:23.587326Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:23.587349Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.587957Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:23.588314Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:24:23.588331Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:23.588520Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:23.592010Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:23.592048Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-30T09:24:2 ... ace = 0 at datashard 72075186224037888 2025-06-30T09:24:29.001729Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:29.001740Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:29.001748Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:24:29.001769Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:29.001782Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:29.001797Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:29.002291Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:29.002340Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:24:29.002350Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:29.007235Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:29.007287Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-30T09:24:29.007302Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-30T09:24:29.007308Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-30T09:24:29.007486Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:29.029416Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:24:29.086802Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:29.223261Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:24:29.223291Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:29.223340Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:29.223349Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:24:29.223360Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-30T09:24:29.223419Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-30T09:24:29.223454Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:24:29.223519Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:29.223691Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:29.263080Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-30T09:24:29.263120Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:29.263128Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:29.263141Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:29.263165Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:29.263182Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-30T09:24:29.263200Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:29.263798Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-30T09:24:29.263849Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:29.267677Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:842:2679], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:29.267709Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:853:2684], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:29.267723Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:29.268753Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:29.270121Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:29.473554Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:29.474149Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:856:2687], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:24:29.519271Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:912:2724] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:29.545943Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02dyekd4mtxkhwe43n33ss, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZjZmZDdjZDUtNDlhOWM4YmEtMTQ0ODk1YmEtODlmNGVkZjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:29.546648Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:943:2741], serverId# [4:944:2742], sessionId# [0:0:0] 2025-06-30T09:24:29.546825Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:3] at 72075186224037888 2025-06-30T09:24:29.546920Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1751275469546879 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 40b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-30T09:24:29.546958Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-30T09:24:29.557432Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 40 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-30T09:24:29.557464Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:29.572564Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jz02dyqpb8fjxzpqr837fyss, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=N2VlZDFjNWItN2ZkZDE3OWYtNGFmMDUzMTQtNWQ3MDE5N2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:29.573283Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:4] at 72075186224037888 2025-06-30T09:24:29.573388Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1751275469573349 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-30T09:24:29.573424Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-30T09:24:29.583867Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 18 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-30T09:24:29.583899Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:29.584570Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:971:2760], serverId# [4:972:2761], sessionId# [0:0:0] 2025-06-30T09:24:29.591427Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:973:2762], serverId# [4:974:2763], sessionId# [0:0:0] >> KqpSqlIn::KeySuffix_OnlyTail >> IndexBuildTestReboots::BaseCase [GOOD] >> KqpNewEngine::LocksMultiShardOk [GOOD] >> KqpNewEngine::LocksNoMutations >> KqpNewEngine::DuplicatedResults >> KqpNewEngine::Aggregate [GOOD] >> KqpNewEngine::AggregateTuple >> KqpSqlIn::CantRewrite [GOOD] >> KqpSqlIn::KeySuffix ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::ManualLaunch3CyclesWithNotConsistentCountersInSchemeShardAndBSC [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:24:19.795296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:19.795328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.795335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:19.795343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:19.795363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:19.795368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:19.795380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:19.795402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:19.795575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:19.795670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:19.823451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:24:19.823479Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:19.829273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:19.829334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:19.829379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:19.831122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:19.831217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:19.831373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.831453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:19.832234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.832286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:19.832652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.832667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:19.832699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:19.832709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:19.832716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:19.832738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.834312Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:19.860400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:19.860470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.860525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:19.860533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:19.860577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:19.860590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:19.861315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.861371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:19.861415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.861425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:19.861431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:19.861437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:19.861976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.861994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:19.862001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:19.862415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.862428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:19.862436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.862444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:19.863264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:19.863760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:19.863802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:19.864012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:19.864042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:19.864051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.864133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:19.864140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:19.864177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:19.864190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:24:19.864673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:19.864683Z node 1 :FLAT_TX_SCHEMESHARD ... shard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:29.515284Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-30T09:24:29.515314Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-30T09:24:29.515326Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-30T09:24:29.878352Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.878397Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.878464Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:464:2414], Recipient [2:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.878471Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.878526Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [2:802:2688], Recipient [2:464:2414]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409546 2025-06-30T09:24:29.878532Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:24:29.878573Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:24:29.878659Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 3 took 65us result status StatusSuccess 2025-06-30T09:24:29.879172Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-30T09:24:29.966980Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:967:2823]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.967020Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:29.967052Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:967:2823], Recipient [2:967:2823]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.967058Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:29.967121Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122945, Sender [2:1383:3167], Recipient [2:967:2823]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409553 2025-06-30T09:24:29.967127Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-30T09:24:29.967162Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-30T09:24:29.967243Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409553 describe pathId 3 took 61us result status StatusSuccess 2025-06-30T09:24:29.967424Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409553 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 108 CreateStep: 400 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409559 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409558 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409559 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 SchemeShard: 72075186233409553 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-30T09:24:30.058015Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:298:2280]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:30.058063Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-30T09:24:30.058095Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271124999, Sender [2:298:2280], Recipient [2:298:2280]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:30.058101Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-30T09:24:30.150952Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125517, Sender [0:0:0], Recipient [2:298:2280]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:30.150998Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5163: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-30T09:24:30.151009Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 101 2025-06-30T09:24:30.151139Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 268637738, Sender [2:303:2283], Recipient [2:298:2280]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 101 Completed: true Progress10k: 10000 2025-06-30T09:24:30.151148Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5162: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-30T09:24:30.151154Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7894: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-30T09:24:30.151188Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-30T09:24:30.151195Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-30T09:24:30.151201Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 101, duration# 2 s 2025-06-30T09:24:30.160648Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-30T09:24:30.160921Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [2:4080:5355], Recipient [2:298:2280]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:30.160935Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:24:30.160941Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046678944 2025-06-30T09:24:30.160986Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271125519, Sender [2:3236:4681], Recipient [2:298:2280]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-30T09:24:30.160993Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5160: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-30T09:24:30.160998Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7845: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> KqpMergeCn::TopSortBy_PK_Uint64_Limit3 >> KqpNewEngine::PkSelect2 [GOOD] >> KqpNewEngine::PkRangeSelect3 >> KqpSort::ReverseOptimized >> TPersQueueTest::CheckDeleteTopic [GOOD] >> TPersQueueTest::CheckDecompressionTasksWithoutSession >> KqpNotNullColumns::UpsertNotNullPkPg >> KqpNotNullColumns::ReplaceNotNullPk [GOOD] >> KqpNotNullColumns::ReplaceNotNullPkPg >> KqpRanges::WhereInSubquery [GOOD] >> KqpRanges::UpdateWhereInNoFullScan+UseSink >> CdcStreamChangeCollector::SchemaChanges [GOOD] >> TIncrementalRestoreWithRebootsTests::OperationIdempotencyAfterReboots [GOOD] |83.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_json_change_record/unittest >> TPersQueueTest::PreferredCluster_RemotePreferredClusterEnabledWhileSessionInitializing_SessionDiesOnlyAfterInitializationAndDelay [GOOD] >> TPersQueueTest::PartitionsMapping >> KqpNotNullColumns::JoinLeftTableWithNotNullPk-StreamLookup [GOOD] >> KqpNotNullColumns::JoinRightTableWithNotNullColumns+StreamLookup |83.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_data_erasure/test-results/unittest/{meta.json ... results_accumulator.log} |83.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_json_change_record/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::SchemaChanges [GOOD] Test command err: 2025-06-30T09:24:23.485162Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:24:23.485239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:24:23.485259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0032e5/r3tmp/tmpJDUOOl/pdisk_1.dat 2025-06-30T09:24:23.587239Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:24:23.588232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:23.605518Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:23.606684Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-30T09:24:23.606861Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275463062001 != 1751275463062005 2025-06-30T09:24:23.649601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:23.649646Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:23.660380Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:23.739565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:23.756505Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:630:2534] 2025-06-30T09:24:23.756582Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-30T09:24:23.763840Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-30T09:24:23.763879Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-30T09:24:23.764016Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-30T09:24:23.764024Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-30T09:24:23.764029Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-30T09:24:23.764089Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-30T09:24:23.764102Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-30T09:24:23.764111Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:646:2534] in generation 1 2025-06-30T09:24:23.774396Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-30T09:24:23.777523Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-30T09:24:23.777615Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-30T09:24:23.777644Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:648:2544] 2025-06-30T09:24:23.777649Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:23.777653Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-30T09:24:23.777657Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.777850Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-30T09:24:23.777879Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-30T09:24:23.777888Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:23.777894Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:23.777903Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-30T09:24:23.777907Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:23.777924Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2531], serverId# [1:637:2538], sessionId# [0:0:0] 2025-06-30T09:24:23.778050Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:23.778104Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-30T09:24:23.778119Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-30T09:24:23.778421Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:23.788742Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:24:23.788792Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-30T09:24:23.932537Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:662:2552], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-30T09:24:23.933709Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:24:23.933741Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.934074Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:23.934090Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:24:23.934104Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-30T09:24:23.934192Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-30T09:24:23.934232Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:24:23.934398Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:23.934418Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-30T09:24:23.934986Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:24:23.935122Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:23.935527Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-30T09:24:23.935540Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.935692Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-30T09:24:23.935707Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:23.936073Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:23.936088Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-30T09:24:23.936096Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-30T09:24:23.936116Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:375:2369], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:23.936128Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-30T09:24:23.936156Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:23.937083Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:23.937543Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-30T09:24:23.937562Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:23.937805Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-30T09:24:23.943410Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:23.943477Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-30T09:24:2 ... UG: datashard.cpp:1822: Add schema snapshot: pathId# [OwnerId: 72057594046644480, LocalPathId: 2], version# 2, step# 1500, txId# 281474976715658, at tablet# 72075186224037888 2025-06-30T09:24:30.861942Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:30.903081Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-30T09:24:30.903123Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:30.903130Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:30.903141Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:30.903164Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:30.903178Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-30T09:24:30.903193Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:30.903699Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-30T09:24:30.903708Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:30.906485Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:842:2679], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.906509Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:853:2684], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.906518Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.907913Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:30.909310Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:31.077207Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-30T09:24:31.077725Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:856:2687], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:24:31.117141Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:912:2724] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:31.135302Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jz02e01t0kwr0n57wbwxebh2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ODQ5ZWIzMjYtNzgyZGY3NzktZDc4MGU3ZDgtNTg3N2UzNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-30T09:24:31.136077Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:943:2741], serverId# [4:944:2742], sessionId# [0:0:0] 2025-06-30T09:24:31.136247Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:252: Executing write operation for [0:3] at 72075186224037888 2025-06-30T09:24:31.136338Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1751275471136286 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 32b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-30T09:24:31.136380Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:417: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-30T09:24:31.147922Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 32 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-30T09:24:31.147959Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:31.158512Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-30T09:24:31.159508Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-30T09:24:31.159574Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715662 ssId 72057594046644480 seqNo 2:3 2025-06-30T09:24:31.159594Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 3 current version# 2 expected version# 3 at tablet# 72075186224037888 txId# 281474976715662 2025-06-30T09:24:31.159601Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715662 at tablet 72075186224037888 2025-06-30T09:24:31.171141Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-30T09:24:31.279783Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715662 at step 2500 at tablet 72075186224037888 { Transactions { TxId: 281474976715662 AckTo { RawX1: 0 RawX2: 0 } } Step: 2500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-30T09:24:31.279820Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:31.279876Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:31.279888Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-30T09:24:31.279900Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2500:281474976715662] in PlanQueue unit at 72075186224037888 2025-06-30T09:24:31.279987Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2500:281474976715662 keys extracted: 0 2025-06-30T09:24:31.280028Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-30T09:24:31.280054Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-30T09:24:31.280076Z node 4 :TX_DATASHARD INFO: alter_table_unit.cpp:145: Trying to ALTER TABLE at 72075186224037888 version 3 2025-06-30T09:24:31.280244Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1822: Add schema snapshot: pathId# [OwnerId: 72057594046644480, LocalPathId: 2], version# 3, step# 2500, txId# 281474976715662, at tablet# 72075186224037888 2025-06-30T09:24:31.280280Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 0 Step: 2500 TxId: 281474976715662 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcSchemaChange Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 3 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-30T09:24:31.280386Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-30T09:24:31.280999Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2500} 2025-06-30T09:24:31.281022Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:31.281312Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-30T09:24:31.281332Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 3 } 2025-06-30T09:24:31.281357Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2500 : 281474976715662] from 72075186224037888 at tablet 72075186224037888 send result to client [4:374:2368], exec latency: 0 ms, propose latency: 0 ms 2025-06-30T09:24:31.281373Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715662 state Ready TxInFly 0 2025-06-30T09:24:31.281389Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 3 } 2025-06-30T09:24:31.281395Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-30T09:24:31.282075Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-06-30T09:24:31.282096Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-30T09:24:31.288767Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:986:2779], serverId# [4:987:2780], sessionId# [0:0:0] 2025-06-30T09:24:31.300758Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:989:2782], serverId# [4:990:2783], sessionId# [0:0:0] >> JsonChangeRecord::Heartbeat [GOOD] >> KqpNotNullColumns::UpsertNotNullPkPg [GOOD] >> KqpRanges::DateKeyPredicate >> KqpNotNullColumns::ReplaceNotNullPkPg [GOOD] >> KqpNotNullColumns::SelectNotNullColumns |83.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_json_change_record/unittest >> KqpRanges::IsNullInValue [GOOD] >> KqpRanges::IsNullInJsonValue >> KqpAgg::AggWithLookup [GOOD] >> KqpAgg::AggWithSelfLookup ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> TIncrementalRestoreWithRebootsTests::OperationIdempotencyAfterReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:24:26.808833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:26.808870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:26.808879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:26.808887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:26.808905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:26.808911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:26.808924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:26.808941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:26.809071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:26.809181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:26.824770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:24:26.824798Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:26.824898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:26.828621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:26.828950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:26.828987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:26.830758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:26.830819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:26.830953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:26.831035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:26.831886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:26.831928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:26.832204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:26.832215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:26.832233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:26.832241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:26.832247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:26.832288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:26.833785Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:26.857360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:26.857433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:26.857488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:26.857494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:26.857534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:26.857549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:26.858447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:26.858513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:26.858573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:26.858584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:26.858590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:26.858597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:26.859187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:26.859204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:26.859211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:26.859763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:26.859776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:26.859783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:26.859791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:26.860550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:26.861464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:26.861506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:26.861654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:26.861692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:26.861698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:26.861747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:26.861753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:26.861779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:26.861792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... BUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1007: satisfy waiter [16:657:2616] TestWaitNotification: OK eventTxId 1007 TestModificationResults wait txId: 1008 2025-06-30T09:24:31.773443Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/.backups/collections/" OperationType: ESchemeOpRestoreBackupCollection RestoreBackupCollection { Name: "IdempotencyTestCollection" } } TxId: 1008 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:31.773559Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_copy_table.cpp:343: TCopyTable Propose, path: /MyRoot/IdempotencyTable, opId: 1008:0, at schemeshard: 72057594046678944 2025-06-30T09:24:31.773599Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1008:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/IdempotencyTable', error: path exist, request doesn't accept it (id: [OwnerId: 72057594046678944, LocalPathId: 8], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp:430, at schemeshard: 72057594046678944 2025-06-30T09:24:31.774065Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1008, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/IdempotencyTable\', error: path exist, request doesn\'t accept it (id: [OwnerId: 72057594046678944, LocalPathId: 8], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp:430" TxId: 1008 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:31.774127Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1008, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/IdempotencyTable', error: path exist, request doesn't accept it (id: [OwnerId: 72057594046678944, LocalPathId: 8], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp:430, operation: RESTORE, path: /MyRoot/.backups/collections//IdempotencyTestCollection TestModificationResult got TxId: 1008, wait until txId: 1008 2025-06-30T09:24:31.774232Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IdempotencyTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:31.774280Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IdempotencyTable" took 49us result status StatusSuccess 2025-06-30T09:24:31.774403Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IdempotencyTable" PathDescription { Self { Name: "IdempotencyTable" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1007 CreateStep: 5000008 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IdempotencyTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 No operations in database - this is expected after operation completion Verifying path states after reboot for regular restore operation... 2025-06-30T09:24:31.775612Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IdempotencyTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:31.775650Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IdempotencyTable" took 49us result status StatusSuccess 2025-06-30T09:24:31.775753Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IdempotencyTable" PathDescription { Self { Name: "IdempotencyTable" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1007 CreateStep: 5000008 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IdempotencyTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'IdempotencyTable' state: EPathStateNoChanges 2025-06-30T09:24:31.775822Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/IdempotencyTestCollection" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:31.775841Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/IdempotencyTestCollection" took 20us result status StatusSuccess 2025-06-30T09:24:31.775905Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/IdempotencyTestCollection" PathDescription { Self { Name: "IdempotencyTestCollection" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 1004 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "backup_001_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1005 CreateStep: 5000006 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "IdempotencyTestCollection" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/IdempotencyTable" } } Cluster { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Backup collection 'IdempotencyTestCollection' state: EPathStateNoChanges Idempotency test completed - system correctly rejected duplicate operation >> KqpNewEngine::AggregateTuple [GOOD] >> KqpNewEngine::AsyncIndexUpdate >> KqpSqlIn::KeySuffix_OnlyTail [GOOD] >> KqpSqlIn::KeyTypeMissmatch_Int >> KqpNewEngine::DuplicatedResults [GOOD] >> KqpNewEngine::FlatmapLambdaMutiusedConnections >> KqpNewEngine::LocksNoMutations [GOOD] >> KqpNewEngine::LocksNoMutationsSharded |83.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_json_change_record/unittest >> JsonChangeRecord::Heartbeat [GOOD] |83.5%| [TA] $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpNewEngine::PkRangeSelect3 [GOOD] >> KqpNewEngine::PkRangeSelect4 |83.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_json_change_record/unittest >> KqpSort::ReverseOptimized [GOOD] >> KqpSort::ReverseOptimizedWithPredicate >> TExternalTableTestReboots::SimpleDropExternalTableWithReboots2 [GOOD] |83.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |83.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query >> KqpMergeCn::TopSortBy_PK_Uint64_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Int32_Limit3 >> KqpRanges::UpdateWhereInNoFullScan+UseSink [GOOD] >> KqpRanges::UpdateWhereInNoFullScan-UseSink |83.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/test-results/unittest/{meta.json ... results_accumulator.log} |83.5%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpNotNullColumns::SelectNotNullColumns [GOOD] >> KqpNotNullColumns::ReplaceNotNullPg >> JsonChangeRecord::DataChange [GOOD] |83.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query >> KqpSqlIn::KeySuffix [GOOD] >> KqpSqlIn::KeySuffix_NotPointPrefix >> JsonChangeRecord::DataChangeVersion [GOOD] >> KqpRanges::DateKeyPredicate [GOOD] >> KqpRanges::DuplicateKeyPredicateLiteral |83.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_json_change_record/unittest >> KqpNotNullColumns::JoinRightTableWithNotNullColumns+StreamLookup [GOOD] >> KqpNotNullColumns::JoinRightTableWithNotNullColumns-StreamLookup ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::SimpleDropExternalTableWithReboots2 [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:24:18.214941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:18.214974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:18.214981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:18.214988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:18.215005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:18.215008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:18.215016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:18.215029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:18.215139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:18.215217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:18.230063Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:24:18.230094Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:18.230204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:24:18.233296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:18.234181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:18.234221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:18.236719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:18.236806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:18.236966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:18.237038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:18.238592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:18.238671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:18.239033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:18.239049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:18.239060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:18.239071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:18.239077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:18.239114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:24:18.241122Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:24:18.267543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:18.267643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.267724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:18.267734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:18.267787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:18.267804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:18.268816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:18.268865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:18.268935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.268948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:18.268955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:18.268961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:18.269500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.269515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:18.269522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:18.270070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.270085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.270092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:18.270101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:18.270923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:18.271427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:18.271477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:18.271715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:18.271745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:32.844139Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:24:32.844159Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:24:32.844182Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:32.844188Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:210:2210], at schemeshard: 72057594046678944, txId: 1004, path id: 1 2025-06-30T09:24:32.844194Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:210:2210], at schemeshard: 72057594046678944, txId: 1004, path id: 4 2025-06-30T09:24:32.844198Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:210:2210], at schemeshard: 72057594046678944, txId: 1004, path id: 3 2025-06-30T09:24:32.844268Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:24:32.844276Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-30T09:24:32.844292Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:24:32.844302Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:24:32.844307Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:24:32.844310Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:24:32.844315Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: false 2025-06-30T09:24:32.844322Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:24:32.844328Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:0 2025-06-30T09:24:32.844333Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:0 2025-06-30T09:24:32.844347Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:24:32.844351Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:24:32.844356Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1004, publications: 3, subscribers: 0 2025-06-30T09:24:32.844360Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-30T09:24:32.844364Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-30T09:24:32.844368Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:24:32.844450Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:32.844461Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:32.844466Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:24:32.844471Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:24:32.844476Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:24:32.844551Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:24:32.844559Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:24:32.844572Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:24:32.844667Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:32.844679Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:32.844683Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:24:32.844687Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-30T09:24:32.844692Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:24:32.844971Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:32.844989Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:32.844995Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:24:32.844999Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:24:32.845004Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:24:32.845020Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1004, subscribers: 0 2025-06-30T09:24:32.845624Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:24:32.845697Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:24:32.845748Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:24:32.845938Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1004 2025-06-30T09:24:32.846007Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-30T09:24:32.846017Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-30T09:24:32.846121Z node 50 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-30T09:24:32.846145Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:24:32.846150Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [50:392:2381] TestWaitNotification: OK eventTxId 1004 2025-06-30T09:24:32.846232Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:32.846266Z node 50 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 46us result status StatusPathDoesNotExist 2025-06-30T09:24:32.846310Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |83.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_json_change_record/unittest |83.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_json_change_record/unittest >> JsonChangeRecord::DataChange [GOOD] >> KqpNewEngine::AsyncIndexUpdate [GOOD] >> KqpNewEngine::AutoChooseIndex >> KqpAgg::AggWithSelfLookup [GOOD] >> KqpAgg::AggWithSelfLookup2 >> TSyncNeighborsTests::SerDes2 [GOOD] >> TSyncBrokerTests::ShouldReturnTokensWithSameVDiskId [GOOD] >> TSyncNeighborsTests::SerDes1 [GOOD] >> KqpSqlIn::KeyTypeMissmatch_Int [GOOD] >> KqpSqlIn::KeyTypeMissmatch_Str >> KqpRanges::IsNullInJsonValue [GOOD] >> KqpRanges::IsNullPartial |83.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_json_change_record/unittest >> JsonChangeRecord::DataChangeVersion [GOOD] |83.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |83.6%| [LD] {RESULT} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut >> KqpNewEngine::LocksNoMutationsSharded [GOOD] >> KqpNewEngine::MultiEffects >> KqpNewEngine::FlatmapLambdaMutiusedConnections [GOOD] >> KqpNewEngine::EmptyMapWithBroadcast |83.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_json_change_record/unittest >> KqpSort::ReverseOptimizedWithPredicate [GOOD] >> KqpSort::ReverseMixedOrderNotOptimized >> KqpNewEngine::PkRangeSelect4 [GOOD] >> KqpNewEngine::PruneEffectPartitions+UseSink |83.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |83.6%| [LD] {RESULT} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |83.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncNeighborsTests::SerDes2 [GOOD] >> KqpNotNullColumns::ReplaceNotNullPg [GOOD] >> KqpNotNullColumns::SecondaryKeyWithNotNullColumn ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncNeighborsTests::SerDes1 [GOOD] Test command err: 2025-06-30T09:24:34.062699Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-06-30T09:24:34.062758Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:50: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:6:2053], token sent, active: 1, waiting: 0 >> KqpMergeCn::TopSortBy_Int32_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Float_Limit4 |83.6%| [TA] $(B)/ydb/core/tx/replication/service/ut_json_change_record/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpRanges::UpdateWhereInNoFullScan-UseSink [GOOD] >> KqpRanges::UpdateWhereInWithNull >> TIncrementalRestoreWithRebootsTests::BackupTablePathStateRestorationWithMultipleCollections [GOOD] >> TExternalTableTestReboots::SimpleDropExternalTableWithReboots [GOOD] >> KqpRanges::DuplicateKeyPredicateLiteral [GOOD] >> KqpRanges::DuplicateCompositeKeyPredicate >> TEvLocalSyncDataTests::SqueezeBlocks1 [GOOD] >> TEvLocalSyncDataTests::SqueezeBlocks2 [GOOD] >> TQuorumTrackerTests::Erasure4Plus2BlockNotIncludingMyFailDomain_8_2 [GOOD] >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_4_2 [GOOD] >> TSyncNeighborsTests::SerDes3 [GOOD] >> TSyncBrokerTests::ShouldEnqueue >> KqpSqlIn::KeySuffix_NotPointPrefix [GOOD] >> KqpSqlIn::ComplexKey >> TSyncBrokerTests::ShouldEnqueue [GOOD] >> TSyncBrokerTests::ShouldEnqueueWithSameVDiskId [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsert+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsert-UseSink >> KqpNotNullColumns::JoinRightTableWithNotNullColumns-StreamLookup [GOOD] >> KqpNotNullColumns::OptionalParametersDataQuery |83.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TEvLocalSyncDataTests::SqueezeBlocks2 [GOOD] >> KqpSqlIn::KeyTypeMissmatch_Str [GOOD] >> TEvLocalSyncDataTests::SqueezeBlocks3 [GOOD] >> KqpSqlIn::SecondaryIndex_PgKey >> TQuorumTrackerTests::Erasure4Plus2BlockIncludingMyFailDomain_8_2 [GOOD] >> TSyncBrokerTests::ShouldProcessAfterRelease >> KqpSort::ReverseMixedOrderNotOptimized [GOOD] >> KqpSort::ReverseRangeOptimized ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> TIncrementalRestoreWithRebootsTests::BackupTablePathStateRestorationWithMultipleCollections [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:24:27.096973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:27.097006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:27.097013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:27.097019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:27.097037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:27.097043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:27.097065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:27.097082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:27.097203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:27.097313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:27.115044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:24:27.115072Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:27.115199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:27.119139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:27.119508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:27.119552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:27.121573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:27.121682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:27.121821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:27.121922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:27.122877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:27.122928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:27.123252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:27.123265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:27.123288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:27.123298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:27.123304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:27.123357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.125084Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:27.147870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:27.147954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.148028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:27.148037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:27.148086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:27.148102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:27.148963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:27.149037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:27.149098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.149112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:27.149118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:27.149125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:27.149687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.149704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:27.149713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:27.150123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.150134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.150141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:27.150149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:27.150897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:27.151363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:27.151406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:27.151632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:27.151664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:27.151672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:27.151741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:27.151749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:27.151790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:27.151803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... rnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:34.844462Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/Collection1" took 29us result status StatusSuccess 2025-06-30T09:24:34.844533Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/Collection1" PathDescription { Self { Name: "Collection1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 1004 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "backup_001_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1005 CreateStep: 5000006 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 13 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "Collection1" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/Table1" } } Cluster { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Backup collection 'Collection1' state: EPathStateNoChanges Verifying path states after reboot for regular restore operation... 2025-06-30T09:24:34.844600Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:34.844615Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table2" took 16us result status StatusSuccess 2025-06-30T09:24:34.844664Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table2" PathDescription { Self { Name: "Table2" PathId: 13 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1012 CreateStep: 5000013 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table2" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 13 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 13 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'Table2' state: EPathStateNoChanges 2025-06-30T09:24:34.844727Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:34.844739Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table3" took 13us result status StatusSuccess 2025-06-30T09:24:34.844784Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table3" PathDescription { Self { Name: "Table3" PathId: 14 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1012 CreateStep: 5000013 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table3" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 13 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 14 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'Table3' state: EPathStateNoChanges 2025-06-30T09:24:34.844842Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/Collection2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:34.844855Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/Collection2" took 14us result status StatusSuccess 2025-06-30T09:24:34.844901Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/Collection2" PathDescription { Self { Name: "Collection2" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 1007 CreateStep: 5000008 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "backup_001_full" PathId: 9 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1008 CreateStep: 5000009 ParentPathId: 8 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 13 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "Collection2" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/Table2" } Entries { Type: ETypeTable Path: "/MyRoot/Table3" } } Cluster { } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Backup collection 'Collection2' state: EPathStateNoChanges Multiple collections backup table path state restoration test completed successfully |83.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_4_2 [GOOD] >> KqpNewEngine::EmptyMapWithBroadcast [GOOD] |83.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncNeighborsTests::SerDes3 [GOOD] >> KqpNewEngine::FlatMapLambdaInnerPrecompute >> KqpRanges::IsNullPartial [GOOD] >> KqpRanges::LiteralOr >> TSyncBrokerTests::ShouldProcessAfterRelease [GOOD] >> TSyncBrokerTests::ShouldReleaseInQueue [GOOD] >> KqpNewEngine::MultiEffects [GOOD] >> KqpNewEngine::MultiEffectsOnSameTable >> KqpNotNullColumns::SecondaryKeyWithNotNullColumn [GOOD] >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumn ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::SimpleDropExternalTableWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:24:18.789638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:18.789665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:18.789689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:18.789694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:18.789709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:18.789713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:18.789723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:18.789739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:18.789854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:18.789940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:18.807151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:24:18.807182Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:18.807313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:24:18.819570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:18.820758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:18.820808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:18.830208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:18.830365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:18.830556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:18.830670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:18.831849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:18.831922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:18.832264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:18.832278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:18.832289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:18.832300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:18.832308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:18.832344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:24:18.837335Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:24:18.876295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:18.876401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.876470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:18.876481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:18.876536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:18.876556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:18.877494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:18.877552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:18.877624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.877637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:18.877644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:18.877650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:18.878326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.878343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:18.878350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:18.879026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.879044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:18.879054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:18.879063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:18.880011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:18.880605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:18.880657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:18.880958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:18.880994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:35.028224Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:24:35.028242Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:24:35.028263Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:35.028267Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:207:2207], at schemeshard: 72057594046678944, txId: 1004, path id: 1 2025-06-30T09:24:35.028273Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:207:2207], at schemeshard: 72057594046678944, txId: 1004, path id: 4 2025-06-30T09:24:35.028277Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:207:2207], at schemeshard: 72057594046678944, txId: 1004, path id: 3 2025-06-30T09:24:35.028328Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:24:35.028335Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-30T09:24:35.028352Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:24:35.028357Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:24:35.028362Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:24:35.028366Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:24:35.028370Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: false 2025-06-30T09:24:35.028376Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:24:35.028381Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:0 2025-06-30T09:24:35.028385Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:0 2025-06-30T09:24:35.028398Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:24:35.028402Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:24:35.028407Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1004, publications: 3, subscribers: 0 2025-06-30T09:24:35.028411Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-30T09:24:35.028414Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-30T09:24:35.028418Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:24:35.028487Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:35.028496Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:35.028501Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:24:35.028506Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:24:35.028510Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:24:35.028582Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:24:35.028588Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:24:35.028600Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:24:35.028659Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:35.028668Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:35.028673Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:24:35.028677Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-30T09:24:35.028681Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:24:35.028930Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:35.028945Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:35.028950Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:24:35.028954Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:24:35.028959Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:24:35.028971Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1004, subscribers: 0 2025-06-30T09:24:35.029576Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:24:35.029733Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:24:35.029754Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:24:35.029825Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1004 2025-06-30T09:24:35.029884Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-30T09:24:35.029893Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-30T09:24:35.029976Z node 50 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-30T09:24:35.029993Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:24:35.029998Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [50:385:2374] TestWaitNotification: OK eventTxId 1004 2025-06-30T09:24:35.030079Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:35.030111Z node 50 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 43us result status StatusPathDoesNotExist 2025-06-30T09:24:35.030148Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TQuorumTrackerTests::ErasureNoneNeverHasQuorum_4_1 [GOOD] >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_5_2 [GOOD] >> KqpNewEngine::PruneEffectPartitions+UseSink [GOOD] >> KqpNewEngine::PrecomputeKey >> KqpAgg::AggWithSelfLookup2 [GOOD] >> KqpAgg::AggWithHop >> KqpNewEngine::AutoChooseIndex [GOOD] >> KqpNewEngine::AutoChooseIndexOrderByLimit ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncBrokerTests::ShouldEnqueueWithSameVDiskId [GOOD] Test command err: 2025-06-30T09:24:35.434884Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-06-30T09:24:35.434926Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [1:6:2053], enqueued, active: 1, waiting: 1 2025-06-30T09:24:35.470843Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token sent, active: 1, waiting: 0 2025-06-30T09:24:35.470883Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [2:6:2053], enqueued, active: 1, waiting: 1 2025-06-30T09:24:35.470903Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:79: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [2:7:2054], enqueued, active: 1, waiting: 1 >> TIncrementalRestoreWithRebootsTests::IncrementalBackupTablePathStatesWithReboots [GOOD] >> KqpMergeCn::TopSortBy_Float_Limit4 [GOOD] >> KqpMergeCn::TopSortBy_String_Limit3 |83.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TQuorumTrackerTests::Erasure4Plus2BlockIncludingMyFailDomain_8_2 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncBrokerTests::ShouldReleaseInQueue [GOOD] Test command err: 2025-06-30T09:24:35.816478Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-06-30T09:24:35.816523Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [1:6:2053], enqueued, active: 1, waiting: 1 2025-06-30T09:24:35.816532Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:123: TEvReleaseSyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token released, active: 1, waiting: 1 2025-06-30T09:24:35.816540Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:105: ProcessQueue(), VDisk actor id: [0:1:2], actor id: [1:6:2053], token sent, active: 0, waiting: 1 2025-06-30T09:24:35.846835Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token sent, active: 1, waiting: 0 2025-06-30T09:24:35.846876Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [2:6:2053], enqueued, active: 1, waiting: 1 2025-06-30T09:24:35.846896Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:146: TEvReleaseSyncToken, VDisk actor id: [0:1:2], actor id: [2:6:2053], removed from queue, active: 1, waiting: 0 |83.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_5_2 [GOOD] >> TSyncBrokerTests::ShouldReturnToken [GOOD] >> TSyncBrokerTests::ShouldReleaseToken >> KqpRanges::UpdateWhereInWithNull [GOOD] >> KqpRanges::ValidatePredicates >> TSyncBrokerTests::ShouldReleaseToken [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> TIncrementalRestoreWithRebootsTests::IncrementalBackupTablePathStatesWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:24:27.197877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:27.197906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:27.197912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:27.197918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:27.197934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:27.197939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:27.197949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:27.197965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:27.198106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:27.198182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:27.211915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:24:27.211941Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:27.212030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:27.217009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:27.217280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:27.217313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:27.218450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:27.218499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:27.218629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:27.218712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:27.219505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:27.219553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:27.219841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:27.219852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:27.219876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:27.219886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:27.219892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:27.219940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.221341Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:27.242005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:27.242092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.242172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:27.242181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:27.242231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:27.242244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:27.243202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:27.243268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:27.243320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.243331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:27.243337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:27.243343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:27.243779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.243790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:27.243796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:27.244094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.244102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.244108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:27.244116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:27.244793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:27.245246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:27.245289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:27.245497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:27.245523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:27.245532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:27.245592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:27.245599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:27.245635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:27.245646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... ize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 12 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 7 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:36.146351Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/IncrementalRestoreCollection/backup_001_incremental/IncrementalTable1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:36.146373Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/IncrementalRestoreCollection/backup_001_incremental/IncrementalTable1" took 27us result status StatusSuccess 2025-06-30T09:24:36.146454Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/IncrementalRestoreCollection/backup_001_incremental/IncrementalTable1" PathDescription { Self { Name: "IncrementalTable1" PathId: 10 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1009 CreateStep: 5000010 ParentPathId: 9 PathState: EPathStateAwaitingOutgoingIncrementalRestore Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrementalTable1" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "incremental_data" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 12 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 10 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:36.146564Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/IncrementalRestoreCollection/backup_001_full/IncrementalTable2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:36.146586Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/IncrementalRestoreCollection/backup_001_full/IncrementalTable2" took 23us result status StatusSuccess 2025-06-30T09:24:36.146662Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/IncrementalRestoreCollection/backup_001_full/IncrementalTable2" PathDescription { Self { Name: "IncrementalTable2" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1007 CreateStep: 5000008 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrementalTable2" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 12 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:36.157637Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/IncrementalRestoreCollection/backup_001_incremental/IncrementalTable2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:36.157773Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/IncrementalRestoreCollection/backup_001_incremental/IncrementalTable2" took 164us result status StatusSuccess 2025-06-30T09:24:36.157977Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/IncrementalRestoreCollection/backup_001_incremental/IncrementalTable2" PathDescription { Self { Name: "IncrementalTable2" PathId: 11 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1010 CreateStep: 5000011 ParentPathId: 9 PathState: EPathStateAwaitingOutgoingIncrementalRestore Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrementalTable2" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "incremental_data" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 12 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 11 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Incremental backup table trimmed name reconstruction verification completed successfully Incremental backup table path states correctly preserved after reboot |83.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncBrokerTests::ShouldReleaseToken [GOOD] Test command err: 2025-06-30T09:24:36.699180Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-06-30T09:24:36.730127Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token sent, active: 1, waiting: 0 2025-06-30T09:24:36.730188Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:123: TEvReleaseSyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token released, active: 1, waiting: 0 |83.6%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_json_change_record/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [LD] {RESULT} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut >> KqpNamedExpressions::NamedExpressionSimple+UseSink >> KqpNewEngine::MultiEffectsOnSameTable [GOOD] >> KqpNewEngine::LookupColumns >> KqpQuery::CreateAsSelectBadTypes+IsOlap >> KqpRanges::DuplicateCompositeKeyPredicate [GOOD] >> KqpRanges::DeleteNotFullScan+UseSink >> KqpSqlIn::ComplexKey [GOOD] >> KqpSqlIn::Dict >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumn [GOOD] >> KqpNotNullColumns::UpsertNotNullPk >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumnPg >> KqpSort::ReverseRangeOptimized [GOOD] >> KqpSort::ReverseRangeLimitOptimized >> KqpNewEngine::PrecomputeKey [GOOD] >> KqpNewEngine::PrimaryView >> KqpNewEngine::FlatMapLambdaInnerPrecompute [GOOD] >> KqpNewEngine::DqSourceSequentialLimit >> KqpNewEngine::PureExpr >> KqpNotNullColumns::OptionalParametersDataQuery [GOOD] >> KqpNotNullColumns::OptionalParametersScanQuery >> KqpQuery::OlapCreateAsSelect_Simple >> KqpAgg::AggWithHop [GOOD] >> KqpAgg::GroupByLimit >> SelfHeal::TestOneInactiveMaintenanceRequestOneMaintenanceRequestMirror3dc [GOOD] >> KqpTypes::UnsafeTimestampCastV0 >> SelfHeal::TestOneInactiveMaintenanceRequestOneMaintenanceRequest4Plus2Block >> KqpMergeCn::TopSortBy_String_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Utf8_Limit2 >> KqpQuery::CreateAsSelectTypes-NotNull-IsOlap |83.6%| [TA] $(B)/ydb/core/blobstorage/vdisk/syncer/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> IndexBuildTestReboots::IndexPartitioning [GOOD] >> KqpParams::MissingOptionalParameter+UseSink >> KqpSqlIn::SecondaryIndex_PgKey [GOOD] >> KqpSqlIn::SecondaryIndex_SimpleKey >> KqpRanges::LiteralOr [GOOD] >> KqpRanges::LiteralOrCompisite >> KqpNewEngine::AutoChooseIndexOrderByLimit [GOOD] >> KqpNewEngine::AutoChooseIndexOrderByLambda >> TExternalTableTestReboots::ParallelCreateDrop [GOOD] >> KqpQuery::CreateAsSelectBadTypes+IsOlap [GOOD] >> KqpQuery::CreateAsSelectBadTypes-IsOlap >> KqpNotNullColumns::UpsertNotNullPk [GOOD] >> KqpNotNullColumns::UpsertNotNull |83.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest |83.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |83.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats >> KqpNewEngine::DqSourceSequentialLimit [GOOD] >> KqpNewEngine::DqSourceLocksEffects >> KqpNewEngine::PureExpr [GOOD] >> KqpNewEngine::PureTxMixedWithDeferred >> KqpSort::ReverseRangeLimitOptimized [GOOD] >> KqpSort::TopParameter ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::ParallelCreateDrop [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:24:17.514586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:17.514621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:17.514627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:17.514633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:17.514649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:17.514654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:17.514665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:17.514683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:17.514848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:17.514978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:17.527538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:24:17.527570Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:17.527693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:24:17.531292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:17.532454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:17.532513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:17.535621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:17.535702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:17.535868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:17.535944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:17.537318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:17.537396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:17.537784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:17.537798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:17.537809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:17.537818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:17.537825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:17.537868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:24:17.539814Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:24:17.567527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:17.567645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:17.567735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:17.567745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:17.567801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:17.567820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:17.568859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:17.568918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:17.569004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:17.569018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:17.569026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:17.569033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:17.569646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:17.569664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:17.569670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:17.570210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:17.570244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:17.570252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:17.570260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:17.571053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:17.571577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:17.571626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:17.571864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:17.571896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... 2025-06-30T09:24:38.114849Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:38.114917Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:24:38.114943Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:24:38.114984Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:38.114991Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [67:211:2211], at schemeshard: 72057594046678944, txId: 1004, path id: 1 2025-06-30T09:24:38.114998Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [67:211:2211], at schemeshard: 72057594046678944, txId: 1004, path id: 4 2025-06-30T09:24:38.115003Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [67:211:2211], at schemeshard: 72057594046678944, txId: 1004, path id: 3 2025-06-30T09:24:38.115019Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:24:38.115030Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-30T09:24:38.115050Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:24:38.115055Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:24:38.115063Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:24:38.115066Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:24:38.115072Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: false 2025-06-30T09:24:38.115081Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:24:38.115086Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:0 2025-06-30T09:24:38.115093Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:0 2025-06-30T09:24:38.115121Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:24:38.115126Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:24:38.115133Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1004, publications: 3, subscribers: 0 2025-06-30T09:24:38.115138Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-30T09:24:38.115146Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-30T09:24:38.115150Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:24:38.115419Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:38.115441Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:38.115449Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:24:38.115456Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:24:38.115463Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:24:38.115534Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:24:38.115542Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:24:38.115557Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:24:38.115823Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:38.115838Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:38.115843Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:24:38.115850Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-30T09:24:38.115856Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:24:38.118926Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:38.118964Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:24:38.118977Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:24:38.118984Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:24:38.118993Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:24:38.119023Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1004, subscribers: 0 2025-06-30T09:24:38.120097Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:24:38.120178Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:24:38.120229Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:24:38.120512Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1004 2025-06-30T09:24:38.120588Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-30T09:24:38.120599Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-30T09:24:38.120693Z node 67 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-30T09:24:38.120716Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:24:38.120723Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [67:399:2388] TestWaitNotification: OK eventTxId 1004 2025-06-30T09:24:38.120818Z node 67 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DropMe" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:38.120864Z node 67 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DropMe" took 61us result status StatusPathDoesNotExist 2025-06-30T09:24:38.120910Z node 67 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/DropMe\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/DropMe" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |83.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> KqpNewEngine::LookupColumns [GOOD] >> KqpNamedExpressions::NamedExpressionSimple+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionSimple-UseSink >> KqpRanges::DeleteNotFullScan+UseSink [GOOD] >> KqpRanges::DeleteNotFullScan-UseSink >> KqpTypes::UnsafeTimestampCastV0 [GOOD] >> KqpTypes::UnsafeTimestampCastV1 >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumnPg [GOOD] >> KqpQuery::CreateAsSelectBadTypes-IsOlap [GOOD] >> KqpQuery::CreateAsSelectPath+UseTablePathPrefix ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::BaseCase [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:32.257179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:32.257203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:32.257208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:32.257212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:32.257224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:32.257227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:32.257234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:32.257246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:32.257344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:32.257417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:32.274289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:32.274319Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:32.274453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:32.278093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:32.279105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:32.279141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:32.281265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:32.281350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:32.281513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:32.281571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:32.282960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:32.283015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:32.283370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:32.283384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:32.283395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:32.283405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:32.283413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:32.283449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:32.285382Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:32.313362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:32.313468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:32.313547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:32.313557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:32.313617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:32.313634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:32.314626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:32.314679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:32.314772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:32.314787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:32.314794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:32.314801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:32.315408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:32.315422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:32.315431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:32.315924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:32.315936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:32.315943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:32.315952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:32.316873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:32.317363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:32.317408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:32.317658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:32.317689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... DomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:30.624725Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:24:30.624773Z node 278 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1" took 52us result status StatusSuccess 2025-06-30T09:24:30.624974Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1" PathDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "index1" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:30.625067Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:24:30.625107Z node 278 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplTable" took 41us result status StatusSuccess 2025-06-30T09:24:30.625281Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpQuery::CreateAsSelectTypes-NotNull-IsOlap [GOOD] >> KqpQuery::CreateAsSelectTypes+NotNull-IsOlap >> KqpNotNullColumns::OptionalParametersScanQuery [GOOD] >> KqpNewEngine::PrimaryView [GOOD] >> KqpMergeCn::TopSortBy_Utf8_Limit2 [GOOD] >> KqpMergeCn::TopSortBy_Timestamp_Limit2 >> KqpNotNullColumns::UpsertNotNull [GOOD] >> KqpNotNullColumns::UpsertNotNullPg >> KqpQuery::OlapCreateAsSelect_Simple [GOOD] >> KqpQuery::OltpCreateAsSelect_Simple >> YdbSdkSessionsPool::PeriodicTask/1 [GOOD] >> KqpSqlIn::Dict [GOOD] >> KqpSqlIn::Delete >> KqpParams::MissingOptionalParameter+UseSink [GOOD] >> KqpParams::ImplicitParameterTypes >> TPersQueueTest::PartitionsMapping [GOOD] >> TPersQueueTest::MessageMetadata >> KqpNewEngine::AutoChooseIndexOrderByLambda [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::LookupColumns [GOOD] Test command err: Trying to start YDB, gRPC: 4453, MsgBus: 18656 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002149/r3tmp/tmpjfLhsv/pdisk_1.dat 2025-06-30T09:24:27.859156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:24:27.892768Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:27.895185Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670858774341280:2080] 1751275467769064 != 1751275467769067 TServer::EnableGrpc on GrpcPort 4453, node 1 2025-06-30T09:24:27.910785Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:27.910802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:27.910803Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:27.910847Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18656 2025-06-30T09:24:27.952844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:27.952876Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:27.953772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18656 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:28.026992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:28.030503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:28.037134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:24:28.058478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.090673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:28.105251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:28.379428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670863069310170:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.379470Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.435620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.445076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.456455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.470423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.484420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.497175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.511471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.526864Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670863069310823:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.526915Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.526927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670863069310828:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.527684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:28.530726Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670863069310830:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:28.621821Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670863069310881:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:28.799441Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 21893, MsgBus: 22332 2025-06-30T09:24:29.246471Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670866340919379:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:29.247147Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002149/r3tmp/tmpKUpXBf/pdisk_1.dat 2025-06-30T09:24:29.315246Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21893, node 2 2025-06-30T09:24:29.335378Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdate ... 4976710669 completed, doublechecking } 2025-06-30T09:24:36.687805Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670898188544954:3399] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:36.922839Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 18650, MsgBus: 3093 2025-06-30T09:24:37.316010Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521670900304434938:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:37.316204Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002149/r3tmp/tmpJlEIKF/pdisk_1.dat 2025-06-30T09:24:37.349347Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18650, node 7 2025-06-30T09:24:37.361558Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:37.361576Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:37.361581Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:37.361644Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3093 2025-06-30T09:24:37.418679Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:37.418715Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:37.420021Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3093 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:24:37.444734Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:37.447245Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:37.453132Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:37.537605Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:24:37.596021Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:37.619741Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:37.989619Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670900304436483:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:37.989647Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:37.998214Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.012849Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.025807Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.044660Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.059005Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.076950Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.142434Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.185972Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670904599404434:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.185998Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.186198Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670904599404439:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.187210Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:38.189572Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:38.189753Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670904599404441:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:38.279235Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670904599404492:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:38.317962Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpRanges::LiteralOrCompisite [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumnPg [GOOD] Test command err: Trying to start YDB, gRPC: 64719, MsgBus: 32398 2025-06-30T09:24:30.358387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670872370867927:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:30.358415Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0020ce/r3tmp/tmpSgukly/pdisk_1.dat 2025-06-30T09:24:30.452879Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:30.462013Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:30.462041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 64719, node 1 2025-06-30T09:24:30.467251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:30.483862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:30.483879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:30.483881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:30.483923Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32398 TClient is connected to server localhost:32398 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:30.575242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:30.577869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:30.848111Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670872370868511:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.848158Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.900344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:30.939032Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670872370868612:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.939070Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.939194Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670872370868617:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.940205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:30.942686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-30T09:24:30.942761Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670872370868619:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:24:31.024268Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670876665835966:2386] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:31.065208Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670876665836008:2314], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:14: Error: Missing key column in input: Key for table: /Root/TestReplaceNotNullPk, code: 2029 2025-06-30T09:24:31.066064Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YzEwYTkzNmUtODk1YTM4YWQtNDhkNTEwYjUtOTBmYmQzZDU=, ActorId: [1:7521670872370868488:2288], ActorState: ExecuteState, TraceId: 01jz02e06k51nxrpxxwy9etx53, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: 2025-06-30T09:24:31.070984Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670876665836017:2318], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:49: Error: Failed to convert type: Struct<'Key':Null,'Value':String> to Struct<'Key':Uint64,'Value':String?>
:1:49: Error: Failed to convert 'Key': Null to Uint64
:1:49: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-30T09:24:31.071485Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YzEwYTkzNmUtODk1YTM4YWQtNDhkNTEwYjUtOTBmYmQzZDU=, ActorId: [1:7521670872370868488:2288], ActorState: ExecuteState, TraceId: 01jz02e06v034v8expsh9j2hag, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 8052, MsgBus: 9941 2025-06-30T09:24:31.320486Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670876986944314:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:31.320514Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0020ce/r3tmp/tmpFv4per/pdisk_1.dat 2025-06-30T09:24:31.352107Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8052, node 2 2025-06-30T09:24:31.379000Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:31.379013Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:31.379015Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:31.379063Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9941 2025-06-30T09:24:31.433493Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:31.433519Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:9941 2025-06-30T09:24:31.437991Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:31.461424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:31.464981Z node 2 :FLAT_TX_SCHEMESHAR ... bTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:24:37.640231Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:38.171786Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670906018168707:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.171814Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.175850Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.200884Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670906018168856:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.200913Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.201023Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670906018168861:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.202228Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:38.207163Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-30T09:24:38.207330Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670906018168863:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:24:38.258872Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670906018168914:2424] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:38.424610Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [7:7521670906018169055:2338], TxId: 281474976715663, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=YjFkMGE5NzQtZGMyZGM1OTgtODdhMzc1MjItOWYwZWJiNjg=. TraceId : 01jz02e7b18xzqsv7k7dmwj1tv. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: BAD_REQUEST KIKIMR_BAD_COLUMN_TYPE: {
: Error: Tried to insert NULL value into NOT NULL column: Index1, code: 2031 }. 2025-06-30T09:24:38.424755Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [7:7521670906018169056:2339], TxId: 281474976715663, task: 2. Ctx: { SessionId : ydb://session/3?node_id=7&id=YjFkMGE5NzQtZGMyZGM1OTgtODdhMzc1MjItOWYwZWJiNjg=. CustomerSuppliedId : . TraceId : 01jz02e7b18xzqsv7k7dmwj1tv. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [7:7521670906018169051:2288], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-30T09:24:38.424790Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [7:7521670906018169057:2340], TxId: 281474976715663, task: 3. Ctx: { SessionId : ydb://session/3?node_id=7&id=YjFkMGE5NzQtZGMyZGM1OTgtODdhMzc1MjItOWYwZWJiNjg=. TraceId : 01jz02e7b18xzqsv7k7dmwj1tv. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [7:7521670906018169051:2288], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-30T09:24:38.424811Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [7:7521670906018169058:2341], TxId: 281474976715663, task: 4. Ctx: { SessionId : ydb://session/3?node_id=7&id=YjFkMGE5NzQtZGMyZGM1OTgtODdhMzc1MjItOWYwZWJiNjg=. TraceId : 01jz02e7b18xzqsv7k7dmwj1tv. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [7:7521670906018169051:2288], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-30T09:24:38.424885Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=7&id=YjFkMGE5NzQtZGMyZGM1OTgtODdhMzc1MjItOWYwZWJiNjg=, ActorId: [7:7521670906018168691:2288], ActorState: ExecuteState, TraceId: 01jz02e7b18xzqsv7k7dmwj1tv, Create QueryResponse for error on request, msg: 2025-06-30T09:24:38.437750Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7521670906018169083:2345], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing not null column in input: Index1. All not null columns should be initialized, code: 2032 2025-06-30T09:24:38.437892Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=7&id=YjFkMGE5NzQtZGMyZGM1OTgtODdhMzc1MjItOWYwZWJiNjg=, ActorId: [7:7521670906018168691:2288], ActorState: ExecuteState, TraceId: 01jz02e7d065b60q3119vkw8we, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-30T09:24:38.442653Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:38.443938Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7521670906018169103:2353], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing not null column in input: Index1. All not null columns should be initialized, code: 2032 2025-06-30T09:24:38.444057Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=7&id=YjFkMGE5NzQtZGMyZGM1OTgtODdhMzc1MjItOWYwZWJiNjg=, ActorId: [7:7521670906018168691:2288], ActorState: ExecuteState, TraceId: 01jz02e7d8dpbxh2y4qf4g36e2, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-30T09:24:38.448776Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7521670906018169122:2361], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:14: Error: Missing not null column in input: Index1. All not null columns should be initialized, code: 2032 2025-06-30T09:24:38.449406Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=7&id=YjFkMGE5NzQtZGMyZGM1OTgtODdhMzc1MjItOWYwZWJiNjg=, ActorId: [7:7521670906018168691:2288], ActorState: ExecuteState, TraceId: 01jz02e7ddbkkvr171c3h5yd24, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-30T09:24:38.512503Z node 7 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-30T09:24:38.516210Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7521670906018169141:2369], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
: Error: Tried to insert NULL value into NOT NULL column: Index1, code: 2031 2025-06-30T09:24:38.517520Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=7&id=YjFkMGE5NzQtZGMyZGM1OTgtODdhMzc1MjItOWYwZWJiNjg=, ActorId: [7:7521670906018168691:2288], ActorState: ExecuteState, TraceId: 01jz02e7dpdn6y0qnhe0jfptjk, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-30T09:24:38.606964Z node 7 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-30T09:24:38.608182Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7521670906018169170:2379], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
: Error: Tried to insert NULL value into NOT NULL column: Index1, code: 2031 2025-06-30T09:24:38.610503Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=7&id=YjFkMGE5NzQtZGMyZGM1OTgtODdhMzc1MjItOWYwZWJiNjg=, ActorId: [7:7521670906018168691:2288], ActorState: ExecuteState, TraceId: 01jz02e7fqckmjptn4898xcv0v, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-30T09:24:38.702112Z node 7 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-30T09:24:38.703421Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7521670906018169190:2388], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
: Error: Tried to insert NULL value into NOT NULL column: Index1, code: 2031 2025-06-30T09:24:38.705008Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=7&id=YjFkMGE5NzQtZGMyZGM1OTgtODdhMzc1MjItOWYwZWJiNjg=, ActorId: [7:7521670906018168691:2288], ActorState: ExecuteState, TraceId: 01jz02e7jk0hdkhz36gmdmpwnt, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: >> KqpRanges::LiteralOrCompisiteCollision >> KqpSqlIn::SecondaryIndex_SimpleKey [GOOD] >> KqpSqlIn::SecondaryIndex_ComplexKey_In_And_In |83.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |83.6%| [LD] {RESULT} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |83.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PrimaryView [GOOD] Test command err: Trying to start YDB, gRPC: 10756, MsgBus: 13665 2025-06-30T09:24:28.229645Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670862119994881:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:28.229681Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00212c/r3tmp/tmpAdAoP8/pdisk_1.dat 2025-06-30T09:24:28.298465Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10756, node 1 2025-06-30T09:24:28.313903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:28.313918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:28.313919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:28.313963Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:28.331846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:28.331877Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:28.332994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13665 TClient is connected to server localhost:13665 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:28.381734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:28.384589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:28.391975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:28.419217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:28.439007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:28.452073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:28.617517Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670862119996443:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.617566Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.666171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.675788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.686007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.700424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.715110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.729188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.743226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.760085Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670862119997094:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.760113Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.760135Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670862119997099:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.761128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:28.770902Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670862119997101:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:28.830664Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670862119997152:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 5618, MsgBus: 28203 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00212c/r3tmp/tmpDONNtl/pdisk_1.dat 2025-06-30T09:24:29.552398Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670867242696387:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:29.565177Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:24:29.598049Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5618, node 2 2025-06-30T09:24:29.618969Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:29.618984Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty ... on=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00212c/r3tmp/tmptsU90I/pdisk_1.dat 2025-06-30T09:24:37.470364Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1841, node 7 2025-06-30T09:24:37.493690Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:37.493709Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:37.493711Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:37.493756Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16012 TClient is connected to server localhost:16012 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:24:37.556990Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:37.557016Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:37.558586Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:24:37.560485Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:37.563357Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:37.576561Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:37.611745Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:37.665355Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:37.690675Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.063653Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670906743414316:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.063706Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.067664Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.083537Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.098378Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.123286Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.187550Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.245256Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.254719Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.276445Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670906743414976:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.276478Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.276601Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670906743414981:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.278235Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:38.282330Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670906743414983:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:38.366829Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670906743415034:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:38.453991Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:38.568314Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.586482Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.601462Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::OptionalParametersScanQuery [GOOD] Test command err: Trying to start YDB, gRPC: 11761, MsgBus: 16116 2025-06-30T09:24:27.627731Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670860195082425:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:27.627760Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00223b/r3tmp/tmpolhxld/pdisk_1.dat 2025-06-30T09:24:27.729737Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:27.730069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:27.730099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:27.736840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11761, node 1 2025-06-30T09:24:27.778993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:27.779008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:27.779010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:27.779066Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16116 TClient is connected to server localhost:16116 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:27.968743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:27.985036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:28.258556Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670864490050318:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.258580Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.306546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.369985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670864490050420:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.370013Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670864490050425:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.370018Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.370792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:28.376517Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670864490050427:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:24:28.464209Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670864490050478:2387] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:28.491541Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670864490050520:2314], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:14: Error: Missing not null column in input: Value. All not null columns should be initialized, code: 2032 2025-06-30T09:24:28.491665Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=MWQ0ZmI0MDgtOGViNWQ2YzctNzFhNzVkOWYtODdkZTJkNDc=, ActorId: [1:7521670864490050314:2288], ActorState: ExecuteState, TraceId: 01jz02dxp6drwde8w054gwem00, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-30T09:24:28.496610Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670864490050529:2318], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:47: Error: Failed to convert type: Struct<'Key':Int32,'Value':Null> to Struct<'Key':Uint64?,'Value':String>
:1:47: Error: Failed to convert 'Value': Null to String
:1:47: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-30T09:24:28.496720Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=MWQ0ZmI0MDgtOGViNWQ2YzctNzFhNzVkOWYtODdkZTJkNDc=, ActorId: [1:7521670864490050314:2288], ActorState: ExecuteState, TraceId: 01jz02dxpc6n4e3t2vw10jcz3r, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 12917, MsgBus: 7043 2025-06-30T09:24:28.746274Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670862927638141:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:28.746348Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00223b/r3tmp/tmpf46PT0/pdisk_1.dat 2025-06-30T09:24:28.778997Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12917, node 2 2025-06-30T09:24:28.795014Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:28.795029Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:28.795032Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:28.795095Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7043 2025-06-30T09:24:28.847066Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:28.847105Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:28.848184Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7043 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:24:28.859319Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:28.860809Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:28.865881Z node 2 :FLAT_TX_SCHEMESHAR ... 7762515]; 2025-06-30T09:24:37.517980Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; TServer::EnableGrpc on GrpcPort 18109, node 7 2025-06-30T09:24:37.541679Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:37.544619Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:37.544630Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:37.544631Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:37.544678Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10574 2025-06-30T09:24:37.615091Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:37.615130Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:10574 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:24:37.623318Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:37.640230Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:37.651027Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:37.663743Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:24:37.700038Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:37.762236Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:37.802502Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.196183Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670908383554858:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.196215Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.213090Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.227076Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.283595Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.341518Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.355250Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.367030Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.381133Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.406472Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670908383555515:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.406507Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.406719Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670908383555520:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.408582Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:38.414032Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:38.414189Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670908383555522:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:38.475175Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670908383555573:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:38.515956Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:38.704391Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.808704Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275478848, txId: 281474976715674] shutting down 2025-06-30T09:24:38.867081Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275478904, txId: 281474976715676] shutting down 2025-06-30T09:24:38.907193Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275478946, txId: 281474976715678] shutting down >> KqpNewEngine::DqSourceLocksEffects [GOOD] >> KqpNewEngine::FullScanCount >> TPersQueueTest::CheckDecompressionTasksWithoutSession [GOOD] >> TPersQueueTest::Codecs_InitWriteSession_DefaultTopicSupportedCodecsInInitResponse >> KqpNotNullColumns::UpsertNotNullPg [GOOD] >> KqpNotNullColumns::UpdateTable_DontChangeNotNullWithIndex >> TIncrementalRestoreWithRebootsTests::MultipleIncrementalBackupSnapshots [GOOD] >> KqpNewEngine::PureTxMixedWithDeferred [GOOD] >> KqpNewEngine::PrunePartitionsByLiteral >> KqpNamedExpressions::NamedExpressionSimple-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning-UseSink-UseDataQuery >> KqpQuery::CreateAsSelectPath+UseTablePathPrefix [GOOD] >> KqpQuery::CreateAsSelectPath-UseTablePathPrefix >> KqpQuery::CreateAsSelectTypes+NotNull-IsOlap [GOOD] >> KqpQuery::CreateAsSelectTypes-NotNull+IsOlap ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::AutoChooseIndexOrderByLambda [GOOD] Test command err: Trying to start YDB, gRPC: 16730, MsgBus: 10888 2025-06-30T09:24:27.634210Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670861303370521:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:27.635232Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002159/r3tmp/tmpzezlBt/pdisk_1.dat 2025-06-30T09:24:27.700607Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16730, node 1 2025-06-30T09:24:27.722972Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:27.722990Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:27.722993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:27.723042Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10888 2025-06-30T09:24:27.775031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:27.775065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:27.779123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10888 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:27.808542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:27.825795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:27.892774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:24:27.919758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:27.939950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:28.104078Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670865598339311:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.104109Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.182083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.194017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.204694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.262880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.319758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.374553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.385322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:28.404067Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670865598339974:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.404102Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.404127Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670865598339979:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:28.405083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:28.411904Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670865598339981:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:24:28.492402Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670865598340032:3401] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:28.630989Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 11990, MsgBus: 6477 2025-06-30T09:24:28.991067Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670863775639702:2093];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:28.992794Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002159/r3tmp/tmpbRMmuO/pdisk_1.dat 2025-06-30T09:24:29.034223Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11990, node 2 2025-06-30T09:24:29.059415Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:29.059431Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09 ... n type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 27706, MsgBus: 17236 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002159/r3tmp/tmpycrVgF/pdisk_1.dat 2025-06-30T09:24:38.149566Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521670907349241721:2243];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:38.156316Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:24:38.194928Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:38.197285Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7521670907349241493:2080] 1751275478137567 != 1751275478137570 TServer::EnableGrpc on GrpcPort 27706, node 7 2025-06-30T09:24:38.215185Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:38.215201Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:38.215204Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:38.215269Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17236 2025-06-30T09:24:38.255159Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:38.255200Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:38.256361Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17236 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:38.288097Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:38.289820Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:38.295276Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.315538Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.380726Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.398969Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.603186Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670907349243104:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.603210Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.611004Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.672288Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.692590Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.704907Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.731045Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.745940Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.758934Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.780076Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670907349243761:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.780105Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.780324Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670907349243766:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.781417Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:38.786489Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670907349243768:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:38.883705Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670907349243819:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:39.104235Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:39.135822Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpTypes::UnsafeTimestampCastV1 [GOOD] >> KqpTypes::Time64Columns+EnableTableDatetime64 >> KqpSort::TopParameter [GOOD] >> KqpSort::TopParameterFilter >> KqpRanges::DeleteNotFullScan-UseSink [GOOD] >> KqpRanges::CastKeyBounds >> KqpNamedExpressions::NamedExpressionRandomInsert-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomDataQuery+UseSink >> KqpQuery::OltpCreateAsSelect_Simple [GOOD] >> KqpQuery::OltpCreateAsSelect_Disable |83.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> KqpParams::ImplicitParameterTypes [GOOD] >> KqpParams::ImplicitSameParameterTypesQueryCacheCheck |83.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> KqpMergeCn::TopSortBy_Timestamp_Limit2 [GOOD] >> KqpMergeCn::TopSortBy_Interval_Limit3 >> KqpAgg::GroupByLimit [GOOD] >> KqpAgg::AggHashShuffle+UseSink |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> LabeledDbCounters::OneTabletRemoveCounters [GOOD] >> LabeledDbCounters::OneTabletRestart >> KqpTypes::Time64Columns+EnableTableDatetime64 [GOOD] >> KqpTypes::Time64Columns-EnableTableDatetime64 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> TIncrementalRestoreWithRebootsTests::MultipleIncrementalBackupSnapshots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:24:27.730542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:24:27.730566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:27.730573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:24:27.730578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:24:27.730593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:24:27.730598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:24:27.730609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:24:27.730624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:24:27.730785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:27.730886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:24:27.749542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:24:27.749565Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:27.749715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:24:27.754175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:24:27.754401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:24:27.754433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:24:27.755910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:24:27.755968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:24:27.756093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:27.756957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:24:27.757946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:27.757991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:24:27.758264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:27.758275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:27.758302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:24:27.758311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:27.758319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:24:27.758361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.759914Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:24:27.787530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:24:27.787592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.787648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:24:27.787657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:24:27.787704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:24:27.787721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:27.788345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:27.788398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:24:27.788437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.788448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:24:27.788454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:24:27.788460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:24:27.788934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.788948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:27.788960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:24:27.789355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.789370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:24:27.789377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:27.789385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:24:27.790233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:24:27.790684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:24:27.790723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:24:27.790927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:27.790957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:27.790965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:27.791025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:24:27.791034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:24:27.791064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:24:27.791077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 23 PathsLimit: 10000 ShardsInside: 15 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 21 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ✓ Verified incremental backup table 'backup_003_incremental/SnapshotTable3' has correct state: EPathStateAwaitingOutgoingIncrementalRestore 2025-06-30T09:24:40.532481Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MultiSnapshotCollection/backup_001_incremental" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:40.532501Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MultiSnapshotCollection/backup_001_incremental" took 22us result status StatusSuccess 2025-06-30T09:24:40.532583Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MultiSnapshotCollection/backup_001_incremental" PathDescription { Self { Name: "backup_001_incremental" PathId: 10 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1009 CreateStep: 5000010 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 8 } ChildrenExist: true } Children { Name: "SnapshotTable1" PathId: 11 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1010 CreateStep: 5000011 ParentPathId: 10 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "SnapshotTable2" PathId: 12 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1011 CreateStep: 5000012 ParentPathId: 10 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "SnapshotTable3" PathId: 13 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1012 CreateStep: 5000013 ParentPathId: 10 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 23 PathsLimit: 10000 ShardsInside: 15 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 10 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ✓ Verified trimmed name reconstruction: backup_001 -> backup_001_incremental 2025-06-30T09:24:40.532725Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MultiSnapshotCollection/backup_002_incremental" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:40.532747Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MultiSnapshotCollection/backup_002_incremental" took 24us result status StatusSuccess 2025-06-30T09:24:40.532806Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MultiSnapshotCollection/backup_002_incremental" PathDescription { Self { Name: "backup_002_incremental" PathId: 14 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1013 CreateStep: 5000014 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 8 } ChildrenExist: true } Children { Name: "SnapshotTable1" PathId: 15 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1014 CreateStep: 5000015 ParentPathId: 14 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "SnapshotTable2" PathId: 16 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1015 CreateStep: 5000016 ParentPathId: 14 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "SnapshotTable3" PathId: 17 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1016 CreateStep: 5000017 ParentPathId: 14 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 23 PathsLimit: 10000 ShardsInside: 15 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 14 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ✓ Verified trimmed name reconstruction: backup_002 -> backup_002_incremental 2025-06-30T09:24:40.532943Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MultiSnapshotCollection/backup_003_incremental" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:24:40.532960Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MultiSnapshotCollection/backup_003_incremental" took 18us result status StatusSuccess 2025-06-30T09:24:40.533021Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MultiSnapshotCollection/backup_003_incremental" PathDescription { Self { Name: "backup_003_incremental" PathId: 18 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1017 CreateStep: 5000018 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 8 } ChildrenExist: true } Children { Name: "SnapshotTable1" PathId: 19 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1018 CreateStep: 5000019 ParentPathId: 18 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "SnapshotTable2" PathId: 20 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1019 CreateStep: 5000020 ParentPathId: 18 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "SnapshotTable3" PathId: 21 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1020 CreateStep: 5000021 ParentPathId: 18 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 23 PathsLimit: 10000 ShardsInside: 15 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 18 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ✓ Verified trimmed name reconstruction: backup_003 -> backup_003_incremental Multiple incremental backup snapshots test passed: 9 incremental backup tables |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> KqpQuery::CreateAsSelectPath-UseTablePathPrefix [GOOD] >> KqpQuery::OltpCreateAsSelect_Disable [GOOD] >> KqpQuery::OlapCreateAsSelect_Complex >> KqpSqlIn::Delete [GOOD] >> KqpSqlIn::InWithCast >> KqpRanges::LiteralOrCompisiteCollision [GOOD] >> KqpRanges::Like >> KqpNotNullColumns::UpdateTable_DontChangeNotNullWithIndex [GOOD] >> KqpNotNullColumns::UpdateTable_UniqIndex >> KqpNewEngine::PrunePartitionsByLiteral [GOOD] >> KqpNewEngine::PrunePartitionsByExpr |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::PeriodicTask/1 [GOOD] >> KqpSort::TopParameterFilter [GOOD] |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> KqpNewEngine::FullScanCount [GOOD] >> KqpSqlIn::SecondaryIndex_ComplexKey_In_And_In [GOOD] >> KqpSqlIn::PhasesCount >> KqpRanges::CastKeyBounds [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::CreateAsSelectPath-UseTablePathPrefix [GOOD] Test command err: Trying to start YDB, gRPC: 3158, MsgBus: 13644 2025-06-30T09:24:37.461308Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670903618135843:2085];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:37.463378Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004275/r3tmp/tmpN8ijSG/pdisk_1.dat 2025-06-30T09:24:37.534518Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3158, node 1 2025-06-30T09:24:37.569747Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:37.569761Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:37.569764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:37.569821Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13644 2025-06-30T09:24:37.597869Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:37.597923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:37.599047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13644 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:37.670147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:37.675050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:37.981451Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670903618136391:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:37.981498Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:37.982927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670903618136416:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:37.984046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:37.990982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:24:37.991507Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670903618136418:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:24:38.058991Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670907913103765:2329] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:38.149268Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670907913103786:2299], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:6:62: Error: At function: KiCreateTable!
:6:20: Error: Invalid type for column: Value. Only YQL data types and PG types are currently supported, code: 2031 2025-06-30T09:24:38.149935Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YmY0NzdjOWUtYjM5MjRlNTgtYjZkM2EwYzYtZGYyODc2MDY=, ActorId: [1:7521670903618136387:2288], ActorState: ExecuteState, TraceId: 01jz02e6nq32g6ttk58r77q3vv, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-30T09:24:38.167873Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670907913103810:2308], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:6:45: Error: At function: KiCreateTable!
:6:20: Error: Invalid type for column: Value. Only YQL data types and PG types are currently supported, code: 2031 2025-06-30T09:24:38.168674Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=MzgxZjgxMTgtNTkxMzFhMTUtYTM5ODA0MGMtNzMzZTVlODA=, ActorId: [1:7521670907913103799:2302], ActorState: ExecuteState, TraceId: 01jz02e749ez52wtjywbv8j8hk, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-30T09:24:38.181067Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670907913103822:2314], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:6:43: Error: At function: KiCreateTable!
:6:20: Error: Invalid type for column: Value. Only YQL data types and PG types are currently supported, code: 2031 2025-06-30T09:24:38.181701Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZmViMWI0NTUtNmU0NWY3ZDUtNGNmOTcyNmUtYzczNGYwMjc=, ActorId: [1:7521670907913103816:2311], ActorState: ExecuteState, TraceId: 01jz02e74tdmg7wgxbcp45q9dm, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: Trying to start YDB, gRPC: 13631, MsgBus: 9832 2025-06-30T09:24:38.360074Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670905166992994:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:38.360109Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004275/r3tmp/tmpWtswBe/pdisk_1.dat 2025-06-30T09:24:38.380459Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:38.380702Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670905166992974:2080] 1751275478359956 != 1751275478359959 TServer::EnableGrpc on GrpcPort 13631, node 2 2025-06-30T09:24:38.394974Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:38.394991Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:38.394994Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:38.395053Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9832 TClient is connected to server localhost:9832 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:38.467637Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:38.467677Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:38.468055Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:38.468735Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:24:38.472166Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:38.812184Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ... rst GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:24:39.545527Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-30T09:24:39.551585Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:24:39.916459Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670910821291842:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:39.916499Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:39.918873Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670910821291877:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:39.919849Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:39.922263Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-30T09:24:39.922343Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670910821291879:2300], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-30T09:24:39.995821Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670910821291930:2355] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:40.006395Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:40.155933Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:40.257981Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 19775, MsgBus: 19518 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004275/r3tmp/tmpK2DNPG/pdisk_1.dat 2025-06-30T09:24:40.686884Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:24:40.687162Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:40.687420Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521670912969236334:2080] 1751275480662317 != 1751275480662320 TServer::EnableGrpc on GrpcPort 19775, node 4 2025-06-30T09:24:40.702967Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:40.702980Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:40.702983Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:40.703036Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19518 2025-06-30T09:24:40.775379Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:40.775411Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:40.775877Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19518 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:40.803982Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:40.807083Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:40.831805Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:24:40.838223Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-30T09:24:40.842334Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:24:41.209286Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670917264204277:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.209358Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.209575Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670917264204312:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.210559Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:41.213733Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:24:41.213812Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521670917264204314:2300], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:24:41.299881Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521670917264204365:2358] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:41.305877Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.391016Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.661994Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpParams::ImplicitSameParameterTypesQueryCacheCheck [GOOD] >> KqpParams::InvalidJson >> KqpQuery::CreateAsSelectTypes-NotNull+IsOlap [GOOD] >> KqpQuery::CreateAsSelectTypes+NotNull+IsOlap |83.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |83.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |83.7%| [LD] {RESULT} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut >> KqpTypes::Time64Columns-EnableTableDatetime64 [GOOD] >> KqpAgg::AggHashShuffle+UseSink [GOOD] >> KqpAgg::AggHashShuffle-UseSink >> TImportWithRebootsTests::ShouldSucceedOnViewsAndTables [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::FullScanCount [GOOD] Test command err: Trying to start YDB, gRPC: 65507, MsgBus: 14511 2025-06-30T09:24:30.887799Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670873322624807:2246];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:30.890897Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001f8d/r3tmp/tmpeF97rn/pdisk_1.dat 2025-06-30T09:24:31.004549Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:31.004796Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670873322624572:2080] 1751275470876847 != 1751275470876850 TServer::EnableGrpc on GrpcPort 65507, node 1 2025-06-30T09:24:31.043231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:31.043267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:31.051385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:31.151367Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:31.151379Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:31.151381Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:31.151421Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14511 TClient is connected to server localhost:14511 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:31.479995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:31.489099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:31.504128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.596860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.655745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.671230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.784534Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670877617593465:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.784579Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.872859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.883362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.887455Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:31.896058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.905917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.921382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.934569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.948808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.971441Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670877617594129:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.971481Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670877617594134:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.971481Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.972372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:31.980245Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670877617594136:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:32.055452Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670881912561484:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 5115, MsgBus: 62043 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001f8d/r3tmp/tmpO5w5ys/pdisk_1.dat 2025-06-30T09:24:32.603773Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670879890602847:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:32.610536Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:24:32.625564Z node 2 :IMPORT WARN: schemeshard_impo ... PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521670915076001287:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:40.573840Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001f8d/r3tmp/tmpREWDy3/pdisk_1.dat 2025-06-30T09:24:40.605633Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16809, node 7 2025-06-30T09:24:40.644992Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:40.645011Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:40.645013Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:40.645098Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15675 2025-06-30T09:24:40.683429Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:40.683463Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:40.684427Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15675 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:40.739651Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:40.748571Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:24:40.759727Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:40.798239Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:40.839089Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:40.880118Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:41.176869Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670919370970134:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.176897Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.188295Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.203904Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.218898Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.232642Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.246406Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.259661Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.274002Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.299450Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670919370970787:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.299478Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.299665Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670919370970792:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.300987Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:41.306005Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670919370970794:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:41.406132Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670919370970845:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:41.573517Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup;
: Warning: Type annotation, code: 1030
:3:17: Warning: At function: RemovePrefixMembers, At function: PersistableRepr, At function: SqlProject
:3:33: Warning: At function: Filter, At lambda, At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108
: Warning: Type annotation, code: 1030
:3:17: Warning: At function: RemovePrefixMembers, At function: PersistableRepr, At function: SqlProject
:3:33: Warning: At function: Filter, At lambda, At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpSort::TopParameterFilter [GOOD] Test command err: Trying to start YDB, gRPC: 5911, MsgBus: 10264 2025-06-30T09:24:31.183132Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670877280438431:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:31.183991Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001f06/r3tmp/tmpV0qnRX/pdisk_1.dat 2025-06-30T09:24:31.295210Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:31.299254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:31.299277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:31.303592Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5911, node 1 2025-06-30T09:24:31.353361Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:31.353375Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:31.353377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:31.353427Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10264 TClient is connected to server localhost:10264 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:31.492616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:31.495531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:31.501735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.580448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.610962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.628213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.931236Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670877280439971:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.931270Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.989449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.999005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.055561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.112683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.122832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.137245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.152301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.183824Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:32.187443Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670881575407934:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:32.187475Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:32.187737Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670881575407939:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:32.189223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:32.192432Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670881575407941:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:32.269304Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670881575407992:3405] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 5994, MsgBus: 9525 2025-06-30T09:24:32.889374Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670880322096998:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:32.890831Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001f06/r3tmp/tmpdPtDx1/pdisk_1.dat 2025-06-30T09:24:32.914230Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5994, node 2 2025-06-30T09:24:32.926147Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken ... sage; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001f06/r3tmp/tmpDvZWyV/pdisk_1.dat 2025-06-30T09:24:40.848211Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29172, node 7 2025-06-30T09:24:40.871082Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:40.871098Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:40.871100Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:40.871155Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4470 2025-06-30T09:24:40.935280Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:40.935312Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:40.939085Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4470 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:40.950768Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:40.963872Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:40.979744Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:41.026076Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:41.093731Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:41.345229Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670919723859963:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.345273Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.352944Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.365792Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.379364Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.395811Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.406673Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.419864Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.434606Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.455183Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670919723860610:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.455208Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.455349Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670919723860618:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.456126Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:41.460666Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670919723860620:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:41.555702Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670919723860671:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ( (declare $limit (DataType 'Uint64)) (declare $value (DataType 'Int32)) (let $1 (KqpTable '"/Root/TwoShard" '"72057594046644480:2" '"" '1)) (let $2 '('"Key" '"Value1" '"Value2")) (let $3 (KqpRowsSourceSettings $1 $2 '() (Void) '())) (let $4 (DataType 'Int32)) (let $5 (Min (Uint64 '"1001") $limit)) (let $6 (StructType '('"Key" (OptionalType (DataType 'Uint32))) '('"Value1" (OptionalType (DataType 'String))) '('"Value2" (OptionalType $4)))) (let $7 '('('"_logical_id" '497) '('"_id" '"a9911f00-cd4f7375-c2b50fbc-b0c64632") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $3)) (lambda '($12) (block '( (let $13 (lambda '($16) (block '( (let $17 (Member $16 '"Value2")) (return (Member $16 '"Key") (Member $16 '"Value1") $17 (Coalesce (!= $17 $value) (Bool 'false))) )))) (let $14 (WideFilter (ExpandMap (ToFlow $12) $13) (lambda '($18 $19 $20 $21) $21) $5)) (let $15 (lambda '($22 $23 $24 $25) $22 $23 $24)) (return (FromFlow (WideMap $14 $15))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($26) (FromFlow (NarrowMap (Take (ToFlow $26) $5) (lambda '($27 $28 $29) (AsStruct '('"Key" $27) '('"Value1" $28) '('"Value2" $29)))))) '('('"_logical_id" '510) '('"_id" '"474ce421-7745269b-2f0fdb27-83c65af5")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($8 $10) '($11) '('('"$limit") '('"$value")) '('('"type" '"data")))) '((KqpTxResultBinding (ListType $6) '"0" '"0")) '('('"type" '"data_query")))) ) 2025-06-30T09:24:41.831073Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> KqpMergeCn::TopSortBy_Interval_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Decimal_Limit5 >> TPersQueueTest::DirectReadRestartPQRB [GOOD] >> TPersQueueTest::DirectReadRestartTablet |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpRanges::CastKeyBounds [GOOD] Test command err: Trying to start YDB, gRPC: 27488, MsgBus: 11989 2025-06-30T09:24:31.365216Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670876251457448:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:31.365360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ef7/r3tmp/tmp4fX8XY/pdisk_1.dat 2025-06-30T09:24:31.455122Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:31.467904Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:31.467930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 27488, node 1 2025-06-30T09:24:31.471376Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:31.484272Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:31.484282Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:31.484284Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:31.484326Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11989 TClient is connected to server localhost:11989 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:31.567511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:31.570286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:31.802238Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670876251457945:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.802265Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.847747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.875943Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670876251458045:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.875984Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.876165Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670876251458050:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.877076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:31.879236Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670876251458052:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:24:31.952350Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670876251458103:2386] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:31.989726Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670876251458145:2314], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing key column in input: Key for table: /Root/TestUpsertNotNullPk, code: 2029 2025-06-30T09:24:31.990352Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=M2Y2ODljMjMtZGZjODA4NjYtOTYzYTVjYjEtNTg0YjRjNmM=, ActorId: [1:7521670876251457929:2288], ActorState: ExecuteState, TraceId: 01jz02e13g1pft4xffzhykbbpk, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: 2025-06-30T09:24:32.004908Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7521670880546425461:2288], TxId: 281474976715662, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=M2Y2ODljMjMtZGZjODA4NjYtOTYzYTVjYjEtNTg0YjRjNmM=. TraceId : 01jz02e13qb27ba1nyqew70g28. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: BAD_REQUEST KIKIMR_BAD_COLUMN_TYPE: {
: Error: Tried to insert NULL value into NOT NULL column: Key, code: 2031 }. 2025-06-30T09:24:32.005070Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=M2Y2ODljMjMtZGZjODA4NjYtOTYzYTVjYjEtNTg0YjRjNmM=, ActorId: [1:7521670876251457929:2288], ActorState: ExecuteState, TraceId: 01jz02e13qb27ba1nyqew70g28, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 6013, MsgBus: 26389 2025-06-30T09:24:32.240855Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670880501124432:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:32.242564Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ef7/r3tmp/tmp5zNOUT/pdisk_1.dat 2025-06-30T09:24:32.256934Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6013, node 2 2025-06-30T09:24:32.278894Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:32.278907Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:32.278909Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:32.278965Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26389 2025-06-30T09:24:32.344903Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:32.344933Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:26389 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:24:32.347136Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:24:32.354233Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:32.355834Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:32.362867Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: O ... ":"Join2"},{"Inputs":[],"Iterator":"precompute_0_0","Name":"Iterator"}],"Node Type":"Delete-ConstantExpr","Stats":{"ComputeNodes":[{"Tasks":[{"NodeId":6,"FinishTimeMs":1751275480477,"TaskId":1,"Host":"ghrun-x4qzxsyz2q","ComputeTimeUs":57}],"CpuTimeUs":246}],"UseLlvm":"undefined","Tasks":1,"FinishedTasks":0,"PhysicalStageId":0,"StageDurationUs":0,"Table":[{"Path":"\/Root\/Join2"}],"BaseTimeMs":1751275480477,"NodesScanShards":[],"CpuTimeUs":{"Count":1,"Sum":246,"Max":246,"Min":246}},"CTE Name":"precompute_0_0"}],"Node Type":"Effect"}],"Node Type":"Query","Stats":{"Compilation":{"FromCache":false,"DurationUs":72166,"CpuTimeUs":69907},"ProcessCpuTimeUs":757,"TotalDurationUs":76873,"ResourcePoolId":"default","QueuedTimeUs":356},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"A-SelfCpu":0.246,"A-Cpu":0.246,"Path":"\/Root\/Join2","Name":"Delete","Table":"Join2"}],"Node Type":"Delete"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query"}} Trying to start YDB, gRPC: 4784, MsgBus: 20208 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ef7/r3tmp/tmpGkXrX3/pdisk_1.dat 2025-06-30T09:24:40.858955Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:24:40.864554Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:40.865512Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7521670914343006503:2080] 1751275480831686 != 1751275480831689 TServer::EnableGrpc on GrpcPort 4784, node 7 2025-06-30T09:24:40.892504Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:40.892518Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:40.892519Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:40.892566Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20208 2025-06-30T09:24:40.947234Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:40.947279Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:40.948846Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20208 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:24:41.027274Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:41.033129Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:41.069418Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:41.141242Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:41.164326Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:41.435205Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670918637975418:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.435234Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.441692Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.454276Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.466563Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.477879Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.489863Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.505755Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.518287Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.536047Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670918637976071:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.536093Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.536183Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670918637976076:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.537171Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:41.544773Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670918637976078:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:41.626731Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670918637976129:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:41.837346Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpTypes::Time64Columns-EnableTableDatetime64 [GOOD] Test command err: Trying to start YDB, gRPC: 6033, MsgBus: 25642 2025-06-30T09:24:37.823587Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670901043436524:2243];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:37.831430Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004266/r3tmp/tmpa5zL2B/pdisk_1.dat 2025-06-30T09:24:37.921890Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670901043436296:2080] 1751275477814860 != 1751275477814863 2025-06-30T09:24:37.925562Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6033, node 1 2025-06-30T09:24:37.940841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:37.940858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:37.940861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:37.940912Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:37.975820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:37.975858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:37.976819Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25642 TClient is connected to server localhost:25642 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:38.113459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:38.121940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:38.127867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.211748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.284197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.304128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.359985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670905338405203:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.360014Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.411849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.426925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.437910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.456893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.484626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.496905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.521367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.591263Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670905338405862:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.591295Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.591460Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670905338405867:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.592713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:38.597483Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670905338405869:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:38.685587Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670905338405920:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:38.822339Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:38.894164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Warning: Optimization, code: 1070
:3:29: Warning: Unsafe conversion integral value to Timestamp, consider using date types, code: 1102 Trying to start YDB, gRPC: 15314, MsgBus: 23100 2025-06-30T09:24:39.190230Z node 2 :METADATA_PROVI ... IC_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:6:25: Error: At function: AsList
:6:46: Error: At function: AsStruct, At tuple
:3:29: Error: At function: Just, At function: UnsafeTimestampCast
:3:29: Error: Unsafe timestamp cast restricted from SQL v1. Trying to start YDB, gRPC: 17254, MsgBus: 17776 2025-06-30T09:24:40.781575Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521670916825210834:2246];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004266/r3tmp/tmpTqSgTi/pdisk_1.dat 2025-06-30T09:24:40.784674Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:24:40.798764Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:40.800159Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7521670916825210599:2080] 1751275480777538 != 1751275480777541 TServer::EnableGrpc on GrpcPort 17254, node 3 2025-06-30T09:24:40.812182Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:40.812195Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:40.812198Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:40.812249Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17776 TClient is connected to server localhost:17776 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:24:40.885547Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:40.885583Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:40.886557Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:40.902188Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:40.914225Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:41.271418Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670921120178516:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.271452Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.275425Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.304776Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670921120178616:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.304807Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.304919Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521670921120178621:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:41.305963Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:41.309254Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-30T09:24:41.309374Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521670921120178623:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:24:41.387306Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521670921120178674:2384] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 26703, MsgBus: 30400 2025-06-30T09:24:41.700248Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521670917994199650:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:41.700282Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004266/r3tmp/tmpJ51BUO/pdisk_1.dat 2025-06-30T09:24:41.722722Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:41.723033Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521670917994199629:2080] 1751275481700093 != 1751275481700096 TServer::EnableGrpc on GrpcPort 26703, node 4 2025-06-30T09:24:41.739618Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:41.739632Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:41.739634Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:41.739700Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30400 TClient is connected to server localhost:30400 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:41.806593Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:41.806626Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:41.807117Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:41.808033Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:42.199188Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670922289167523:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.199268Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.201935Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521670922289167567:2297] txid# 281474976715658, issues: { message: "Type \'Datetime64\' specified for column \'DatetimePK\', but support for new date/time 64 types is disabled (EnableTableDatetime64 feature flag is off)" severity: 1 } |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> KqpSqlIn::InWithCast [GOOD] >> KqpRanges::Like [GOOD] >> KqpNotNullColumns::UpdateTable_UniqIndex [GOOD] >> KqpNotNullColumns::UpdateTable_UniqIndexPg |83.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |83.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |83.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence >> KqpQuery::OlapCreateAsSelect_Complex [GOOD] >> KqpSqlIn::PhasesCount [GOOD] >> KqpNewEngine::PrunePartitionsByExpr [GOOD] >> KqpNewEngine::PruneWritePartitions+UseSink >> KqpParams::InvalidJson [GOOD] |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::InWithCast [GOOD] Test command err: Trying to start YDB, gRPC: 15664, MsgBus: 24587 2025-06-30T09:24:28.775391Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670864225550864:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:28.775459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002118/r3tmp/tmpiTzTbQ/pdisk_1.dat 2025-06-30T09:24:28.843407Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15664, node 1 2025-06-30T09:24:28.867854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:28.867869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:28.867872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:28.867949Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:28.876800Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:28.876835Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:28.883467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24587 TClient is connected to server localhost:24587 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:28.955992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:28.961855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:28.971583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:29.014443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:29.068091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:29.099392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:29.310830Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670868520519552:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:29.310862Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:29.386977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:29.412819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:29.441243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:29.457333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:29.470833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:29.489747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:29.515853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:29.536402Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670868520520207:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:29.536433Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:29.536520Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670868520520212:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:29.537754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:29.541063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:29.541195Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670868520520214:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:29.596293Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670868520520265:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:29.779247Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:29.853912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:29.919697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, firs ... unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.110148Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 9284, MsgBus: 5041 2025-06-30T09:24:41.867397Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521670921342121168:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:41.867436Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002118/r3tmp/tmp82JS2d/pdisk_1.dat 2025-06-30T09:24:41.889218Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9284, node 7 2025-06-30T09:24:41.898357Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:41.898374Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:41.898377Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:41.898437Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5041 2025-06-30T09:24:41.972891Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:41.972927Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:41.974197Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5041 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:41.986112Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:41.988323Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:41.992310Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.008257Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.073058Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.085712Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.372826Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670925637090041:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.372871Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.384638Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.396153Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.458979Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.471265Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.487676Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.549572Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.567146Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.586633Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670925637090702:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.586686Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.586812Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670925637090707:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.588102Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:42.592273Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:42.594825Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670925637090709:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:42.694322Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670925637090760:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:42.871004Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpRanges::Like [GOOD] Test command err: Trying to start YDB, gRPC: 15404, MsgBus: 2134 2025-06-30T09:24:30.211707Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670870384854985:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:30.218905Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002107/r3tmp/tmpnieep5/pdisk_1.dat 2025-06-30T09:24:30.347272Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15404, node 1 2025-06-30T09:24:30.411263Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:30.411279Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:30.411282Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:30.411335Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2134 2025-06-30T09:24:30.543268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:30.543314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:30.543916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2134 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:30.660001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:30.732181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:30.803800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:24:30.879551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:30.931775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.111371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670874679823820:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.111413Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.222992Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:31.267606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.292257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.312762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.342146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.368892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.405822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.442159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.491843Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670874679824481:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.491872Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.492084Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670874679824486:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.493326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:31.498709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:31.498840Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670874679824488:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:31.596287Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670874679824539:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:31.816988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.888400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.940430Z node 1 :FL ... ble, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.517476Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 18248, MsgBus: 13524 2025-06-30T09:24:41.906879Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521670920112484928:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:41.906904Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002107/r3tmp/tmpnwa1V0/pdisk_1.dat 2025-06-30T09:24:41.927379Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18248, node 7 2025-06-30T09:24:41.943702Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:41.943719Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:41.943722Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:41.943780Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13524 TClient is connected to server localhost:13524 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:24:42.011596Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:42.011632Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:42.012119Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:42.013948Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:24:42.019949Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.036945Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.100179Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.116190Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.401157Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670924407453770:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.401183Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.416220Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.439463Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.451853Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.463734Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.483856Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.510003Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.524617Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.547362Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670924407454426:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.547386Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.547426Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670924407454431:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.548412Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:42.552910Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670924407454433:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:42.615411Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670924407454484:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:42.812125Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.910826Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpAgg::AggHashShuffle-UseSink [GOOD] >> KqpExtractPredicateLookup::ComplexRange >> KqpQuery::CreateAsSelectTypes+NotNull+IsOlap [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::PhasesCount [GOOD] Test command err: Trying to start YDB, gRPC: 18291, MsgBus: 20072 2025-06-30T09:24:30.799040Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670870878163972:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:30.811164Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001fbe/r3tmp/tmp4DoSAZ/pdisk_1.dat 2025-06-30T09:24:30.900018Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18291, node 1 2025-06-30T09:24:30.945502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:30.945518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:30.945520Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:30.945568Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:30.975005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:30.975040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:30.979126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20072 TClient is connected to server localhost:20072 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:31.051238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:31.059549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:31.067967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.094896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.124785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.161486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.347214Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670875173132814:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.347247Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.407695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.416837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.434510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.453059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.469066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.480984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.495104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.520197Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670875173133467:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.520221Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.520380Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670875173133472:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.521391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:31.525044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:31.525134Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670875173133474:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:31.604512Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670875173133525:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:31.817494Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:31.821000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.844544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, firs ... d__operation_create_table.cpp:664) 2025-06-30T09:24:41.222137Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:41.239588Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 19169, MsgBus: 21291 2025-06-30T09:24:42.140578Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521670923678759254:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:42.142439Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001fbe/r3tmp/tmp58p2Te/pdisk_1.dat 2025-06-30T09:24:42.157862Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19169, node 7 2025-06-30T09:24:42.165972Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:42.165984Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:42.165986Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:42.166030Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21291 TClient is connected to server localhost:21291 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:42.243157Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:42.243187Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:42.244170Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:42.246016Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:42.251077Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:42.261020Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.272487Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.295306Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.307825Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.551182Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670923678760795:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.551218Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.556490Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.565285Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.628638Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.646218Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.669163Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.684475Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.703803Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.732070Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670923678761451:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.732125Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.732232Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670923678761456:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.733538Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:42.739547Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670923678761458:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:42.803701Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670923678761509:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:43.147761Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::OlapCreateAsSelect_Complex [GOOD] Test command err: Trying to start YDB, gRPC: 4903, MsgBus: 16080 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004272/r3tmp/tmp98qkvh/pdisk_1.dat 2025-06-30T09:24:37.716016Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:24:37.758785Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:37.759831Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670901616304738:2080] 1751275477626006 != 1751275477626009 TServer::EnableGrpc on GrpcPort 4903, node 1 2025-06-30T09:24:37.788818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:37.788833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:37.788835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:37.788883Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:37.794367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:37.794406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:37.796161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16080 TClient is connected to server localhost:16080 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:24:37.879454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:37.883102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:38.207261Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670905911272635:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.207301Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.210973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670905911272670:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.212147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:38.215225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:24:38.215307Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670905911272672:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:24:38.285038Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670905911272723:2330] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:38.343452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:24:38.367002Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521670905911272879:2305];fline=columnshard.cpp:98;event=initialize_shard;step=OnActivateExecutor; 2025-06-30T09:24:38.367002Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];fline=columnshard.cpp:98;event=initialize_shard;step=OnActivateExecutor; 2025-06-30T09:24:38.374396Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521670905911272879:2305];fline=columnshard.cpp:116;event=initialize_shard;step=initialize_tiring_finished; 2025-06-30T09:24:38.374485Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037896 2025-06-30T09:24:38.376902Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];fline=columnshard.cpp:116;event=initialize_shard;step=initialize_tiring_finished; 2025-06-30T09:24:38.376965Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037897 2025-06-30T09:24:38.377709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:24:38.377794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:24:38.377874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:24:38.377900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:24:38.377921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:24:38.377942Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:24:38.377964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:24:38.377986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:24:38.378008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:24:38.378030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:24:38.378054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:24:38.378076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521670905911272878:2304];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:24:38.379348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521670905911272879:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:24:38.379375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521670905911272879:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:24:38.379424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521670905911272879:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:24:38.379446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521670905911272879:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:24:38.379466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521670905911272879:2305];tablet_ ... ne=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:24:42.656202Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:24:42.656209Z node 4 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037903;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:24:42.656217Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:24:42.656222Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:24:42.656266Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:24:42.656275Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:24:42.656287Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:24:42.656291Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:24:42.656394Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:24:42.656400Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:24:42.656411Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:24:42.656416Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:24:42.656437Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:24:42.656441Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:24:42.656454Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:24:42.656459Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:24:42.656467Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:24:42.656473Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:24:42.656502Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:24:42.656507Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:24:42.656551Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:24:42.656556Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:24:42.656569Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:24:42.656574Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:24:42.656580Z node 4 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:24:42.656585Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:24:42.656590Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:24:42.656630Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:24:42.656635Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:24:42.656653Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:24:42.656657Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:24:42.656807Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:24:42.656813Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:24:42.656825Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:24:42.656830Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:24:42.656854Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:24:42.656860Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:24:42.656872Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:24:42.656877Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:24:42.656890Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:24:42.656895Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:24:42.656925Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:24:42.656931Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:24:42.656953Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:24:42.656959Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:24:42.656973Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:24:42.656981Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:24:42.656990Z node 4 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:24:42.656996Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:24:42.657001Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:24:42.657048Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:24:42.657053Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpParams::InvalidJson [GOOD] Test command err: Trying to start YDB, gRPC: 14004, MsgBus: 23715 2025-06-30T09:24:38.026444Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670908025008656:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:38.026541Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00425a/r3tmp/tmpPDYR6g/pdisk_1.dat 2025-06-30T09:24:38.196315Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14004, node 1 2025-06-30T09:24:38.222956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:38.222970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:38.222973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:38.223022Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23715 TClient is connected to server localhost:23715 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:38.322724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:38.327348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:38.334407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.346390Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:38.346429Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:38.348473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:24:38.404117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.468327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:24:38.492284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.668459Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670908025010054:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.668480Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.725813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.734801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.797858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.811003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.828811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.844855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.904409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.928147Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670908025010708:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.928174Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.928389Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670908025010713:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.929992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:38.934660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:38.934756Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670908025010715:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:39.008702Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670912319978062:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:39.025551Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 29975, MsgBus: 11454 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00425a/r3tmp/tmpoUOwwS/pdisk_1.dat 2025-06-30T09:24:39.790900Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:24:39.791691Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:39.794916Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670910739828794:2080] 1751275479757060 != 1 ... MsgBus: 24838 2025-06-30T09:24:42.337086Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521670922253088609:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:42.337111Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00425a/r3tmp/tmpiwhoFJ/pdisk_1.dat 2025-06-30T09:24:42.359534Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64770, node 4 2025-06-30T09:24:42.373211Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:42.373227Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:42.373230Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:42.373288Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24838 TClient is connected to server localhost:24838 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:42.438661Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:42.438691Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:42.440844Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:42.444930Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:42.447364Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:42.460133Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.487493Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.517886Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.577769Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.832392Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670922253090197:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.832423Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:42.844707Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.862851Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.876084Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.893205Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.911635Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.972434Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.988947Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:43.007427Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670926548058149:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:43.007455Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:43.007535Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521670926548058154:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:43.008380Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:43.015353Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521670926548058156:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:43.085610Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521670926548058207:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:43.298491Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:43.343409Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:43.357201Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZmFiMWFmMTgtNDYwMTE4YmMtZThiMjcxMzAtMjExZTIzNTI=, ActorId: [4:7521670926548058470:2469], ActorState: ExecuteState, TraceId: 01jz02ec6bbgyfv4x2d3watznb, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1095: Invalid Json value
: Error: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1095: Invalid Json value >> KqpMergeCn::TopSortBy_Decimal_Limit5 [GOOD] |83.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |83.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |83.7%| [LD] {RESULT} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::CreateAsSelectTypes+NotNull+IsOlap [GOOD] Test command err: Trying to start YDB, gRPC: 6363, MsgBus: 19868 2025-06-30T09:24:37.922001Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670901241652872:2246];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:37.922874Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004261/r3tmp/tmpXlmzh9/pdisk_1.dat 2025-06-30T09:24:38.015250Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:38.030972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:38.031013Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:38.039225Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6363, node 1 2025-06-30T09:24:38.067066Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:38.067091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:38.067095Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:38.067180Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19868 TClient is connected to server localhost:19868 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:24:38.239661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:38.472175Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670905536620528:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.472222Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.478832Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670905536620555:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.480025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:38.490899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:24:38.494917Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670905536620557:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:24:38.572056Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670905536620608:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:38.665794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.817304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.921324Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 16061, MsgBus: 13383 2025-06-30T09:24:39.367640Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670910943629884:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:39.369317Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004261/r3tmp/tmpCVHZU5/pdisk_1.dat 2025-06-30T09:24:39.423569Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16061, node 2 2025-06-30T09:24:39.449217Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:39.449232Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:39.449236Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:39.449292Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:39.486985Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:39.487021Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:39.487286Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13383 TClient is connected to server localhost:13383 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:39.615924Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:39.623266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:40.052533Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670915238597731:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:40.052572Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:40.052700Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670915238597758:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:40.053594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:40.057295Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:24:40.057437Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670915238597760:229 ... _id=72075186224037926;self_id=[4:7521670928900362083:2363];tablet_id=72075186224037926;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:24:43.226984Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[4:7521670928900362083:2363];tablet_id=72075186224037926;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:24:43.227007Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[4:7521670928900362083:2363];tablet_id=72075186224037926;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:24:43.227030Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[4:7521670928900362083:2363];tablet_id=72075186224037926;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:24:43.227053Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[4:7521670928900362083:2363];tablet_id=72075186224037926;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:24:43.227074Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[4:7521670928900362083:2363];tablet_id=72075186224037926;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:24:43.228854Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[4:7521670928900361826:2328];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:24:43.228871Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[4:7521670928900361826:2328];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:24:43.228913Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[4:7521670928900361826:2328];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:24:43.228929Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[4:7521670928900361826:2328];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:24:43.228944Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[4:7521670928900361826:2328];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:24:43.228961Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[4:7521670928900361826:2328];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:24:43.228984Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[4:7521670928900361826:2328];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:24:43.229005Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[4:7521670928900361826:2328];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:24:43.229028Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[4:7521670928900361826:2328];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:24:43.229044Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[4:7521670928900361826:2328];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:24:43.229060Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[4:7521670928900361826:2328];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:24:43.229074Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[4:7521670928900361826:2328];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:24:43.234391Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;self_id=[4:7521670928900362087:2367];tablet_id=72075186224037921;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:24:43.234421Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;self_id=[4:7521670928900362087:2367];tablet_id=72075186224037921;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:24:43.234489Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;self_id=[4:7521670928900362087:2367];tablet_id=72075186224037921;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:24:43.234512Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;self_id=[4:7521670928900362087:2367];tablet_id=72075186224037921;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:24:43.234537Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;self_id=[4:7521670928900362087:2367];tablet_id=72075186224037921;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:24:43.234562Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;self_id=[4:7521670928900362087:2367];tablet_id=72075186224037921;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:24:43.234585Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;self_id=[4:7521670928900362087:2367];tablet_id=72075186224037921;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:24:43.234610Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;self_id=[4:7521670928900362087:2367];tablet_id=72075186224037921;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:24:43.234636Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;self_id=[4:7521670928900362087:2367];tablet_id=72075186224037921;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:24:43.234658Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;self_id=[4:7521670928900362087:2367];tablet_id=72075186224037921;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:24:43.234680Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;self_id=[4:7521670928900362087:2367];tablet_id=72075186224037921;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:24:43.234704Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;self_id=[4:7521670928900362087:2367];tablet_id=72075186224037921;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:24:43.234865Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[4:7521670928900362019:2348];tablet_id=72075186224037941;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:24:43.234885Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[4:7521670928900362019:2348];tablet_id=72075186224037941;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:24:43.234944Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[4:7521670928900362019:2348];tablet_id=72075186224037941;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:24:43.234969Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[4:7521670928900362019:2348];tablet_id=72075186224037941;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:24:43.234991Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[4:7521670928900362019:2348];tablet_id=72075186224037941;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:24:43.235015Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[4:7521670928900362019:2348];tablet_id=72075186224037941;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:24:43.235035Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[4:7521670928900362019:2348];tablet_id=72075186224037941;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:24:43.235057Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[4:7521670928900362019:2348];tablet_id=72075186224037941;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:24:43.235079Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[4:7521670928900362019:2348];tablet_id=72075186224037941;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:24:43.235099Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[4:7521670928900362019:2348];tablet_id=72075186224037941;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:24:43.235122Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[4:7521670928900362019:2348];tablet_id=72075186224037941;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:24:43.235148Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[4:7521670928900362019:2348];tablet_id=72075186224037941;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:24:43.241413Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037952;self_id=[4:7521670928900362017:2347];tablet_id=72075186224037952;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpMergeCn::TopSortBy_Decimal_Limit5 [GOOD] Test command err: Trying to start YDB, gRPC: 32656, MsgBus: 29030 2025-06-30T09:24:31.118730Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670874697393646:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:31.124868Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001f32/r3tmp/tmpIW6F6H/pdisk_1.dat 2025-06-30T09:24:31.329985Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:31.331219Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670874697393454:2080] 1751275471112445 != 1751275471112448 TServer::EnableGrpc on GrpcPort 32656, node 1 2025-06-30T09:24:31.371857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:31.371891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:31.375346Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:31.422320Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:31.422343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:31.422346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:31.422397Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29030 TClient is connected to server localhost:29030 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:31.663613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:31.667145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:31.671801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.748635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.804117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.867032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.990025Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670874697395064:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.990064Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:32.058826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.070345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.105277Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:32.127863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.193938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.207198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.268943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.329183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.348083Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670878992363030:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:32.348134Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:32.348277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670878992363035:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:32.349448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:32.353030Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670878992363037:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:32.452232Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670878992363088:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:32.667131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:32.842876Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275472884, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 10522, MsgBus: ... tor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:42.252521Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275482299, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 12378, MsgBus: 9607 2025-06-30T09:24:42.634894Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7521670924518935657:2180];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:42.637483Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001f32/r3tmp/tmppWdD2H/pdisk_1.dat 2025-06-30T09:24:42.666661Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12378, node 8 2025-06-30T09:24:42.685535Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:42.685551Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:42.685554Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:42.685621Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9607 2025-06-30T09:24:42.739194Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:42.739236Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:42.740496Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9607 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:42.776041Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:42.778257Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:42.792189Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:42.816661Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:24:42.849029Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:42.917235Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:43.283163Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7521670928813904400:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:43.283218Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:43.292969Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:43.314502Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:43.327588Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:43.346473Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:43.364765Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:43.381108Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:43.401433Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:43.428236Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7521670928813905052:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:43.428274Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7521670928813905057:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:43.428276Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:43.429337Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:43.432727Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7521670928813905059:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:43.496538Z node 8 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [8:7521670928813905110:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:43.632429Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:43.723445Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:43.926908Z node 8 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275483958, txId: 281474976715674] shutting down |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> YdbTableSplit::MergeByNoLoadAfterSplit [GOOD] >> KqpNewEngine::PruneWritePartitions+UseSink [GOOD] >> KqpNewEngine::PruneWritePartitions-UseSink >> KqpNamedExpressions::NamedExpressionRandomDataQuery+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomDataQuery-UseSink >> KqpNotNullColumns::UpdateTable_UniqIndexPg [GOOD] >> KqpNotNullColumns::UpdateTable_Immediate |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |83.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |83.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::MergeByNoLoadAfterSplit [GOOD] Test command err: 2025-06-30T09:23:50.038776Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670701040865366:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:50.038804Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045a8/r3tmp/tmp4w7Th9/pdisk_1.dat 2025-06-30T09:23:50.092069Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:50.098945Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 8570, node 1 2025-06-30T09:23:50.104676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:50.104690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:50.104692Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:50.104737Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11561 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:50.139092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:50.139124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:50.140734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:50.171040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Triggering split by load TClient is connected to server localhost:11561 2025-06-30T09:23:50.363547Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866271:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.363571Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.392858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:50.458661Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866440:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.458687Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.461910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275430499 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275430499 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-30T09:23:50.475990Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866545:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476023Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476045Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866565:2357], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866566:2358], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476065Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866568:2359], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476074Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866571:2361], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476097Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866570:2360], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866574:2362], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476475Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866597:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476496Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866618:2375], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476501Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866604:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476505Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476618Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670701040866635:2380], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:50.476629Z node 1 :KQP_WORKLOAD_SE ... ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-30T09:24:40.671100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__table_stats.cpp:450: Propose merge request : Transaction { WorkingDir: "/Root" OperationType: ESchemeOpSplitMergeTablePartitions SplitMergeTablePartitions { TablePath: "/Root/Foo" SourceTabletId: 72075186224037889 SourceTabletId: 72075186224037890 SchemeshardId: 72057594046644480 } Internal: true FailOnExist: false } TxId: 281474976710658 TabletId: 72057594046644480, reason: shard with tabletId: 72075186224037889 merge by load (shardLoad: 0.02), shardToMergeCount: 2, totalSize: 0, sizeToMerge: 0, totalLoad: 0.04, loadThreshold: 0.07 2025-06-30T09:24:40.671166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:804: TSplitMerge Propose, tableStr: /Root/Foo, tableId: , opId: 281474976710658:0, at schemeshard: 72057594046644480, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037889 SourceTabletId: 72075186224037890 SchemeshardId: 72057594046644480 2025-06-30T09:24:40.671392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1083: TSplitMerge Propose accepted, tableStr: /Root/Foo, tableId: , opId: 281474976710658:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "\002\000\004\000\000\000\323bH\204\000\000\000\200" TabletID: 72075186224037889 ShardIdx: 2 } SourceRanges { KeyRangeBegin: "\002\000\004\000\000\000\323bH\204\000\000\000\200" KeyRangeEnd: "" TabletID: 72075186224037890 ShardIdx: 3 } DestinationRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "" ShardIdx: 4 }, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037889 SourceTabletId: 72075186224037890 SchemeshardId: 72057594046644480 2025-06-30T09:24:40.671403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:24:40.679212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710658:0 ProgressState, operation type: TxSplitTablePartition, at tablet# 72057594046644480 2025-06-30T09:24:40.685031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 281474976710658:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046644480 2025-06-30T09:24:40.685084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710658:0 2 -> 3 2025-06-30T09:24:40.690288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:84: TSplitMerge TConfigureDestination ProgressState, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-30T09:24:40.693126Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [1:7521670915789733790:6812] 2025-06-30T09:24:40.703081Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037891 2025-06-30T09:24:40.703165Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037891, state: WaitScheme 2025-06-30T09:24:40.703265Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-30T09:24:40.706384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:38: TSplitMerge TConfigureDestination operationId# 281474976710658:0 HandleReply TEvInitSplitMergeDestinationAck, operationId: 281474976710658:0, at schemeshard: 72057594046644480 message# OperationCookie: 281474976710658 TabletId: 72075186224037891 2025-06-30T09:24:40.706420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710658:0 3 -> 131 2025-06-30T09:24:40.708743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:334: TSplitMerge TTransferData operationId# 281474976710658:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:24:40.717863Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037891 2025-06-30T09:24:40.717929Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037891 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-30T09:24:40.717966Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-30T09:24:40.717986Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037891 2025-06-30T09:24:40.718115Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-30T09:24:40.727240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:207: TSplitMerge TTransferData operationId# 281474976710658:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976710658 TabletId: 72075186224037890 2025-06-30T09:24:40.727421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:207: TSplitMerge TTransferData operationId# 281474976710658:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976710658 TabletId: 72075186224037889 2025-06-30T09:24:40.727608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710658:0 131 -> 132 2025-06-30T09:24:40.735406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-30T09:24:40.735562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-30T09:24:40.735584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:437: TSplitMerge TNotifySrc, operationId: 281474976710658:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:24:40.737051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-30T09:24:40.737079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710658 2025-06-30T09:24:40.737086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710658, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 6 2025-06-30T09:24:40.741971Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-06-30T09:24:40.742543Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037890 Initiating switch from PreOffline to Offline state 2025-06-30T09:24:40.743275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:392: TSplitMerge TNotifySrc, operationId: 281474976710658:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-06-30T09:24:40.743435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:392: TSplitMerge TNotifySrc, operationId: 281474976710658:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037890, at schemeshard: 72057594046644480 2025-06-30T09:24:40.743458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710658:0 progress is 1/1 2025-06-30T09:24:40.743462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710658:0 progress is 1/1 2025-06-30T09:24:40.743470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710658:0 2025-06-30T09:24:40.744502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:261: Unable to activate 281474976710658:0 2025-06-30T09:24:40.744695Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-30T09:24:40.744750Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-06-30T09:24:40.745110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-30T09:24:40.745175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-30T09:24:40.748207Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-30T09:24:40.748236Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-30T09:24:40.748431Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-30T09:24:40.748466Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-30T09:24:40.748471Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-06-30T09:24:40.748472Z node 1 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-30T09:24:40.749298Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-30T09:24:40.749317Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1751275430499 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 3 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> KqpNewEngine::PruneWritePartitions-UseSink [GOOD] >> KqpNewEngine::PruneEffectPartitions-UseSink >> KqpRanges::ValidatePredicates [GOOD] >> KqpRanges::ValidatePredicatesDataQuery |83.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_restore/unittest >> TImportWithRebootsTests::ShouldSucceedOnViewsAndTables [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:21:04.591254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:21:04.591271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.591275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:21:04.591278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:21:04.591282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:21:04.591285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:21:04.591291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:21:04.591301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:21:04.591388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:21:04.591436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:21:04.601728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:21:04.601747Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:21:04.601875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:21:04.604484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:21:04.605501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:21:04.605545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:21:04.607292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:21:04.607346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:21:04.607457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.607490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:21:04.608220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.608251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:21:04.608472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:21:04.608481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:21:04.608490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:21:04.608499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:21:04.608505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:21:04.608529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:21:04.609556Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:21:04.623645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:21:04.623714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.623761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:21:04.623767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:21:04.623827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:21:04.623841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:21:04.624421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.624462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:21:04.624504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.624511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:21:04.624515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:21:04.624519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:21:04.624859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.624867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:21:04.624870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:21:04.625122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.625129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:21:04.625133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:21:04.625138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:21:04.625623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:21:04.625981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:21:04.626011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:21:04.626162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:21:04.626181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... ishToSchemeBoard Send, to populator: [293:210:2210], at schemeshard: 72057594046678944, txId: 281474976710759, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710759 2025-06-30T09:24:42.220189Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:24:42.220202Z node 293 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TRestore TProposedWaitParts, opId: 281474976710759:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:24:42.220402Z node 293 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-30T09:24:42.220419Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-30T09:24:42.220426Z node 293 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710759 2025-06-30T09:24:42.220432Z node 293 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710759, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-30T09:24:42.220440Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-30T09:24:42.220460Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 0/1, is published: true 2025-06-30T09:24:42.221158Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710759 REQUEST: HEAD /table/data_00.csv HTTP/1.1 HEADERS: Host: localhost:4510 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7575F73B-E9FD-4C4A-B316-3A660DA282BC amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /table/data_00.csv / 14 2025-06-30T09:24:42.223933Z node 293 :DATASHARD_RESTORE DEBUG: import_s3.cpp:526: [Import] [s3:281474976710759] Handle NKikimr::NWrappers::NExternalStorage::TEvHeadObjectResponse { Key: null Result: HeadObjectResult { ETag: a3ed28bfb53c9214f635c51ed6b618c4 ContentLength: 14 } } 2025-06-30T09:24:42.224069Z node 293 :DATASHARD_RESTORE DEBUG: import_s3.cpp:605: [Import] [s3:281474976710759] Handle NKikimr::TEvDataShard::TEvS3DownloadInfo { Info: { DataETag: (empty maybe) ProcessedBytes: 0 WrittenBytes: 0 WrittenRows: 0 ChecksumState: DownloadState: } } 2025-06-30T09:24:42.224687Z node 293 :DATASHARD_RESTORE DEBUG: import_s3.cpp:605: [Import] [s3:281474976710759] Handle NKikimr::TEvDataShard::TEvS3DownloadInfo { Info: { DataETag: a3ed28bfb53c9214f635c51ed6b618c4 ProcessedBytes: 0 WrittenBytes: 0 WrittenRows: 0 ChecksumState: DownloadState: } } 2025-06-30T09:24:42.224705Z node 293 :DATASHARD_RESTORE NOTICE: import_s3.cpp:620: [Import] [s3:281474976710759] Process download info at 'DownloadInfo': info# { DataETag: a3ed28bfb53c9214f635c51ed6b618c4 ProcessedBytes: 0 WrittenBytes: 0 WrittenRows: 0 ChecksumState: DownloadState: } 2025-06-30T09:24:42.224721Z node 293 :DATASHARD_RESTORE DEBUG: import_s3.cpp:516: [Import] [s3:281474976710759] GetObject: key# table/data_00.csv, range# 0-13 REQUEST: GET /table/data_00.csv HTTP/1.1 HEADERS: Host: localhost:4510 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: F6D6A55D-56B4-49C7-A4DC-44C18514DABA amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-13 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /table/data_00.csv / 14 2025-06-30T09:24:42.231399Z node 293 :DATASHARD_RESTORE DEBUG: import_s3.cpp:655: [Import] [s3:281474976710759] Handle NKikimr::NWrappers::NExternalStorage::TEvGetObjectResponse { Key: null Result: a3ed28bfb53c9214f635c51ed6b618c4 Body: 14b } 2025-06-30T09:24:42.231433Z node 293 :DATASHARD_RESTORE TRACE: import_s3.cpp:672: [Import] [s3:281474976710759] Content size: processed-bytes# 0, content-length# 14, body-size# 14 2025-06-30T09:24:42.231473Z node 293 :DATASHARD_RESTORE INFO: import_s3.cpp:805: [Import] [s3:281474976710759] Upload rows: count# 1, size# 34 2025-06-30T09:24:42.232526Z node 293 :DATASHARD_RESTORE DEBUG: import_s3.cpp:813: [Import] [s3:281474976710759] Handle NKikimr::TEvDataShard::TEvS3UploadRowsResponse { Record: TabletID: 72075186233409546 Status: 0 Info: { DataETag: a3ed28bfb53c9214f635c51ed6b618c4 ProcessedBytes: 14 WrittenBytes: 8 WrittenRows: 1 ChecksumState: DownloadState: } } 2025-06-30T09:24:42.232547Z node 293 :DATASHARD_RESTORE NOTICE: import_s3.cpp:620: [Import] [s3:281474976710759] Process download info at 'UploadResponse': info# { DataETag: a3ed28bfb53c9214f635c51ed6b618c4 ProcessedBytes: 14 WrittenBytes: 8 WrittenRows: 1 ChecksumState: DownloadState: } 2025-06-30T09:24:42.232556Z node 293 :DATASHARD_RESTORE NOTICE: import_s3.cpp:961: [Import] [s3:281474976710759] Finish: success# 1, error# , writtenBytes# 8, writtenRows# 1 2025-06-30T09:24:42.235812Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 398 RawX2: 1258425420095 } Origin: 72075186233409546 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 8 RowsProcessed: 1 } 2025-06-30T09:24:42.235839Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409546, partId: 0 2025-06-30T09:24:42.235875Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 398 RawX2: 1258425420095 } Origin: 72075186233409546 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 8 RowsProcessed: 1 } 2025-06-30T09:24:42.235907Z node 293 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TRestore TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 398 RawX2: 1258425420095 } Origin: 72075186233409546 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 8 RowsProcessed: 1 } 2025-06-30T09:24:42.235924Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:42.235930Z node 293 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:24:42.235936Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:24:42.235945Z node 293 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976710759:0 129 -> 240 2025-06-30T09:24:42.235995Z node 293 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TRestore, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:42.236635Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:24:42.236757Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-30T09:24:42.236771Z node 293 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-06-30T09:24:42.236788Z node 293 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-30T09:24:42.236793Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-30T09:24:42.236800Z node 293 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-30T09:24:42.236804Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-30T09:24:42.236811Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-06-30T09:24:42.236826Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [293:128:2152] message: TxId: 281474976710759 2025-06-30T09:24:42.236837Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-30T09:24:42.236843Z node 293 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710759:0 2025-06-30T09:24:42.236849Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710759:0 2025-06-30T09:24:42.236879Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-30T09:24:42.237764Z node 293 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-30T09:24:42.237787Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710759 2025-06-30T09:24:42.237801Z node 293 :IMPORT DEBUG: schemeshard_import__create.cpp:365: TImport::TTxProgress: DoExecute 2025-06-30T09:24:42.237808Z node 293 :IMPORT DEBUG: schemeshard_import__create.cpp:1476: TImport::TTxProgress: OnNotifyResult: txId# 281474976710759 2025-06-30T09:24:42.238285Z node 293 :IMPORT DEBUG: schemeshard_import__create.cpp:389: TImport::TTxProgress: DoComplete 2025-06-30T09:24:42.238309Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-30T09:24:42.238316Z node 293 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [293:330:2318] TestWaitNotification: OK eventTxId 1002 |83.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |83.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |83.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning-UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert-UseSink-UseDataQuery ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::IndexPartitioning [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:31.404634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:31.404662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:31.404669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:31.404674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:31.404689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:31.404695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:31.404705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:31.404720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:31.404839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:31.404915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:31.422307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:31.422332Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:31.422456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:31.425876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:31.427234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:31.427286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:31.429907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:31.430002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:31.430145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:31.430194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:31.431520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:31.431574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:31.431897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:31.431913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:31.431924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:31.431934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:31.431942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:31.431973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:31.433939Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:31.458941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:31.459034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.459106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:31.459116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:31.459169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:31.459184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:31.460084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:31.460133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:31.460207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.460235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:31.460241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:31.460247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:31.460892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.460913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:31.460920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:31.461373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.461386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:31.461393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:31.461400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:31.462004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:31.462524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:31.462572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:31.462847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:31.462881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:37.784741Z node 323 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:24:37.784781Z node 323 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index" took 43us result status StatusSuccess 2025-06-30T09:24:37.784959Z node 323 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index" PathDescription { Self { Name: "Index" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Index" LocalPathId: 4 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:37.785030Z node 323 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:24:37.785086Z node 323 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable" took 57us result status StatusSuccess 2025-06-30T09:24:37.785261Z node 323 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "alice" } } Tuple { } } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "bob" } } Tuple { } } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\005\000\000\000alice\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409547 } TablePartitions { EndOfRangeKeyPrefix: "\002\000\003\000\000\000bob\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 3 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> KqpNotNullColumns::UpdateTable_Immediate [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::UpdateTable_Immediate [GOOD] Test command err: Trying to start YDB, gRPC: 63262, MsgBus: 24449 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ede/r3tmp/tmpTsyegA/pdisk_1.dat 2025-06-30T09:24:37.535106Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:24:37.547030Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63262, node 1 2025-06-30T09:24:37.587205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:37.587235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:37.590953Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:37.590966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:37.590969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:37.591024Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:37.592460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24449 TClient is connected to server localhost:24449 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:37.746661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:37.755300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:38.023079Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670907826022687:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.023123Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.070970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.103275Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670907826022786:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.103301Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.103503Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670907826022792:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.104445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:38.107305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-30T09:24:38.107526Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670907826022794:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:24:38.192005Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670907826022845:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:38.219855Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670907826022887:2314], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing key column in input: Key for table: /Root/TestUpsertNotNullPk, code: 2029 2025-06-30T09:24:38.220378Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZjY3YzYxMjktMzJjY2ZkOTItNzVjODFiY2QtMjliYzlkZQ==, ActorId: [1:7521670907826022684:2288], ActorState: ExecuteState, TraceId: 01jz02e7679x4bje1wj3twhmam, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: 2025-06-30T09:24:38.224679Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670907826022896:2318], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:47: Error: Failed to convert type: Struct<'Key':Null,'Value':String> to Struct<'Key':Uint64,'Value':String?>
:1:47: Error: Failed to convert 'Key': Null to Uint64
:1:47: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-30T09:24:38.224795Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZjY3YzYxMjktMzJjY2ZkOTItNzVjODFiY2QtMjliYzlkZQ==, ActorId: [1:7521670907826022684:2288], ActorState: ExecuteState, TraceId: 01jz02e76d3cndghna3ezbsvdf, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 2659, MsgBus: 31072 2025-06-30T09:24:38.412180Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670906136729885:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:38.412472Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ede/r3tmp/tmpU5dkQf/pdisk_1.dat 2025-06-30T09:24:38.439224Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2659, node 2 2025-06-30T09:24:38.462985Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:38.463001Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:38.463003Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:38.463060Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31072 2025-06-30T09:24:38.513028Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:38.513082Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:38.514131Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31072 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:38.563266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:38.571234Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:38.865823Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp ... 644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:43.629338Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:43.634867Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:44.023451Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670932418678944:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:44.023477Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:44.025285Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521670932418678976:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:44.026219Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:44.028842Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:24:44.028934Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670932418678978:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:24:44.119641Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670932418679029:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:44.128477Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:44.481631Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:45.241692Z node 6 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jz02eds03r57sdrpas814tf6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=YmM5NDYyYzYtZmZmNmNmYTktOWVlOGRhY2QtNGNjZjExNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-30T09:24:45.241868Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=6&id=YmM5NDYyYzYtZmZmNmNmYTktOWVlOGRhY2QtNGNjZjExNQ==, ActorId: [6:7521670932418679797:2469], ActorState: ExecuteState, TraceId: 01jz02eds03r57sdrpas814tf6, Create QueryResponse for error on request, msg: 2025-06-30T09:24:45.497815Z node 6 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jz02ee1y54rqhh6c77nbpgyt, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=OWY0MzU5MDYtMjc0YzcwM2QtNTQyZTcyYTItMjA3YzE5NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-30T09:24:45.497917Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=6&id=OWY0MzU5MDYtMjc0YzcwM2QtNTQyZTcyYTItMjA3YzE5NWI=, ActorId: [6:7521670936713647157:2491], ActorState: ExecuteState, TraceId: 01jz02ee1y54rqhh6c77nbpgyt, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 23084, MsgBus: 31085 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ede/r3tmp/tmpDC2bpe/pdisk_1.dat 2025-06-30T09:24:45.903502Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521670936268881439:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:45.905337Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:24:45.925839Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23084, node 7 2025-06-30T09:24:45.952977Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:45.952993Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:45.952995Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:45.953061Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31085 2025-06-30T09:24:46.001290Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:46.001324Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:46.002217Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31085 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:46.042945Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:46.048974Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:46.387265Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670940563849263:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:46.387292Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:46.391651Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:46.435294Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670940563849413:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:46.435334Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:46.435550Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670940563849418:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:46.436755Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:46.439427Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670940563849420:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:24:46.504782Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670940563849471:2419] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:46.910036Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TPersQueueTest::MessageMetadata [GOOD] >> TPersQueueTest::LOGBROKER_7820 >> KqpNewEngine::PruneEffectPartitions-UseSink [GOOD] >> TPersQueueTest::Codecs_InitWriteSession_DefaultTopicSupportedCodecsInInitResponse [GOOD] >> TPersQueueTest::Codecs_WriteMessageWithDefaultCodecs_MessagesAreAcknowledged >> TYdbControlPlaneStorageModifyBinding::ShouldSuccess >> TYdbControlPlaneStorageModifyConnection::ShouldSuccess |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |83.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |83.8%| [LD] {RESULT} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut >> TYdbControlPlaneStorageDeleteQuery::ShouldSuccess ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PruneEffectPartitions-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 22780, MsgBus: 5678 2025-06-30T09:24:37.622643Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670900808986481:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:37.622881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001e69/r3tmp/tmpeNSSFh/pdisk_1.dat 2025-06-30T09:24:37.787502Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22780, node 1 2025-06-30T09:24:37.827681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:37.827694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:37.827696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:37.827744Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5678 TClient is connected to server localhost:5678 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:24:37.932061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:37.932085Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:37.934620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:37.936868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:37.945447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:37.960048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.028556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.072493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.118993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:38.217768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670905103955335:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.217796Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.276328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.287606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.300734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.316900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.327365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.340496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.355276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.375541Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670905103955988:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.375578Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.375701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670905103955993:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.376669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:38.379603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:38.380000Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670905103955995:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:38.440737Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670905103956046:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:38.612135Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 10630, MsgBus: 21082 2025-06-30T09:24:39.004083Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670906100972289:2088];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:39.005791Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001e69/r3tmp/tmprr3Uud/pdisk_1.dat 2025-06-30T09:24:39.058613Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Tabl ... lpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521670937735942510:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:45.936130Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521670937735942561:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:46.230762Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 15332, MsgBus: 29334 2025-06-30T09:24:46.564281Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521670942901454757:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:46.564300Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001e69/r3tmp/tmpJy1yab/pdisk_1.dat TServer::EnableGrpc on GrpcPort 15332, node 7 2025-06-30T09:24:46.599260Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:46.602947Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:46.602965Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:46.602968Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:46.603033Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29334 2025-06-30T09:24:46.669260Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:46.669309Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:46.670491Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29334 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:46.724197Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:24:46.728153Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:46.744140Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:46.778990Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:46.816425Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:47.263307Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670947196423604:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:47.263339Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:47.280510Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:47.307157Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:47.326876Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:47.364278Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:47.437167Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:47.457914Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:47.479633Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:47.548690Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670947196424260:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:47.548717Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:47.548863Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521670947196424265:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:47.549983Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:47.554432Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:47.554533Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521670947196424267:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:47.569137Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:47.635750Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521670947196424327:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageCreateQuery::ShouldSucccess >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPublic |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageListConnections::ShouldSuccess >> TYdbControlPlaneStorageDescribeQuery::ShouldSuccess |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStoragePipeline::ShouldSkipBindingIfDisabledConnection >> TYdbControlPlaneStorageListQueries::ShouldSuccess >> TYdbControlPlaneStorageModifyBinding::ShouldValidate >> TYdbControlPlaneStorageModifyQuery::ShouldSuccess >> InMemoryControlPlaneStorage::ExecuteSimpleStreamQuery >> TYdbControlPlaneStorageCreateConnection::ShouldSucccess |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageCreateQuery::ShouldSucccess [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldValidate >> KqpRanges::ValidatePredicatesDataQuery [GOOD] >> KqpReturning::Random |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageDeleteQuery::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldValidate >> TYdbControlPlaneStorageListBindings::ShouldSuccess >> KqpNamedExpressions::NamedExpressionRandomDataQuery-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsertDataQuery+UseSink >> TYdbControlPlaneStorageDescribeQuery::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageDescribeQuery::ShouldValidate >> TYdbControlPlaneStorageCreateBinding::ShouldSucceed |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStoragePipeline::ShouldCheckSimplePipeline >> TYdbControlPlaneStorageModifyConnection::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldValidate >> TYdbControlPlaneStorageCreateQuery::ShouldValidate [GOOD] >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionManagePublicSuccess |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut >> TYdbControlPlaneStorageListQueries::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldPageToken >> TYdbControlPlaneStorageListConnections::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldPageToken |83.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |83.8%| [LD] {RESULT} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut >> TYdbControlPlaneStorageDeleteQuery::ShouldValidate [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckSuperUser >> KqpReturning::Random [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckLowerCaseName >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageGetResult::ShouldSuccess >> TYdbControlPlaneStorageDescribeQuery::ShouldValidate [GOOD] >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckSuperUser |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageCreateConnection::ShouldSucccess [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldDisableCurrentIam >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionManagePublicSuccess [GOOD] >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionManagePublicFailed >> TYdbControlPlaneStorageModifyQuery::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldModifyRunningQuery >> TYdbControlPlaneStorageModifyConnection::ShouldValidate [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckSuperUser |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpReturning::Random [GOOD] Test command err: Trying to start YDB, gRPC: 16156, MsgBus: 14176 2025-06-30T09:24:30.071290Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670872337107603:2088];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:30.071530Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0020b3/r3tmp/tmpzNRXUe/pdisk_1.dat 2025-06-30T09:24:30.126815Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16156, node 1 2025-06-30T09:24:30.154989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:30.155010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:30.155012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:30.155071Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:30.161870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:30.161892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:30.163268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14176 TClient is connected to server localhost:14176 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:30.252118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:30.255445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:30.263794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:30.336540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:30.375862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:30.443935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:30.513359Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670872337109131:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.513388Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.570448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:30.588720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:30.606934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:30.630988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:30.649923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:30.662465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:30.680374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:30.695975Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670872337109784:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.696003Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.696160Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670872337109789:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:30.697148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:30.701383Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670872337109791:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:30.803537Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670872337109842:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:31.074115Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup;
: Warning: Type annotation, code: 1030
:4:13: Warning: At function: RemovePrefixMembers, At function: RemoveSystemMembers, At function: PersistableRepr, At function: SqlProject
:4:27: Warning: At function: Filter, At lambda, At function: Coalesce
:4:50: Warning: At function: SqlIn
:4:50: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 Trying to start YDB, gRPC: 31470, MsgBus: 19112 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0020b3/r3tmp/tmp7XCSnH/pdisk_1.dat 2025-06-30T09:24:31.434916Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670874891353147:2142];send_to=[0:7307199536658146 ... ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:47.708518Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 5440, MsgBus: 25610 2025-06-30T09:24:50.305787Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7521670959945145740:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:50.312551Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0020b3/r3tmp/tmpL9z6dd/pdisk_1.dat 2025-06-30T09:24:50.334971Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5440, node 8 2025-06-30T09:24:50.344006Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:50.344024Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:50.344026Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:50.344083Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25610 TClient is connected to server localhost:25610 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:50.413933Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:50.413971Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:50.414209Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:50.416755Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:50.416842Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:50.432131Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:50.459295Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:50.495090Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:50.513383Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:50.748208Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7521670959945147285:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:50.748237Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:50.764520Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:50.821361Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:50.833710Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:50.848432Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:50.862324Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:50.876424Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:50.890022Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:50.957283Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7521670959945147944:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:50.957311Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:50.957398Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7521670959945147949:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:50.958232Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:50.960669Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7521670959945147951:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:51.036692Z node 8 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [8:7521670964240115298:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:51.189815Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:51.311346Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> TYdbControlPlaneStorageListBindings::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldFilterByName >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionManagePublicFailed [GOOD] >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionQueryInvokeSuccess |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest |83.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut >> TYdbControlPlaneStorageCreateConnection::ShouldDisableCurrentIam [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldValidate >> InMemoryControlPlaneStorage::ExecuteSimpleStreamQuery [GOOD] >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionEmpty |83.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |83.8%| [LD] {RESULT} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPreviousRevisionSuccess >> TYdbControlPlaneStorageModifyBinding::ShouldValidate [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouleCheckObjectStorageProjectionByColumns >> InMemoryControlPlaneStorage::ExecuteSimpleAnalyticsQuery >> TYdbControlPlaneStorageCreateConnection::ShouldValidate [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckUniqueName >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldSuccess >> TYdbControlPlaneStorageCreateBinding::ShouldSucceed [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckMaxLengthName |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionQueryInvokeSuccess [GOOD] >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionQueryInvokeFailed >> TYdbControlPlaneStorageGetResult::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageGetResult::ShouldEmpty >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldSuccess >> TYdbControlPlaneStorageModifyBinding::ShouldCheckLowerCaseName [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMaxLengthName |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TExportToS3WithRebootsTests::ShouldSucceedOnMultiShardTable [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldProhibitDeletionOfRunningQuery >> TYdbControlPlaneStorageModifyBinding::ShouleCheckObjectStorageProjectionByColumns [GOOD] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageModifyConnection::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckWithoutIdempotencyKey |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionQueryInvokeFailed [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldSuccess >> TYdbControlPlaneStorageCreateConnection::ShouldCheckUniqueName [GOOD] >> TYdbControlPlaneStorageCreateConnectionPermissions::ShouldApplyPermissionManagePublicSuccess >> TYdbControlPlaneStorageModifyQuery::ShouldModifyRunningQuery [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldValidate |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_and_create_queue[std] |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> TYdbControlPlaneStorageDeleteConnection::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckPermission >> TYdbControlPlaneStorageCreateBinding::ShouldCheckMaxLengthName [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckMultipleDotsName >> Initializer::Simple >> InMemoryControlPlaneStorage::ExecuteSimpleAnalyticsQuery [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldFilterByName [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldFilterByMe >> TYdbControlPlaneStorageDescribeConnection::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldCheckPermission |83.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut >> TYdbControlPlaneStorageModifyQuery::ShouldValidate [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckSuperUser >> TPersQueueTest::Codecs_WriteMessageWithDefaultCodecs_MessagesAreAcknowledged [GOOD] >> TPersQueueTest::Codecs_WriteMessageWithNonDefaultCodecThatHasToBeConfiguredAdditionally_SessionClosedWithBadRequestError |83.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |83.8%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut >> KqpNamedExpressions::NamedExpressionRandomInsertDataQuery+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsertDataQuery-UseSink >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageListQueries::ShouldPageToken [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldEmptyPageToken >> ShouldNotShowPassword::ShouldNotShowPasswordClickHouse >> TYdbControlPlaneStorageCreateConnectionPermissions::ShouldApplyPermissionManagePublicSuccess [GOOD] >> TYdbControlPlaneStorageCreateConnectionPermissions::ShouldApplyPermissionManagePublicFailed |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageDeleteQuery::ShouldProhibitDeletionOfRunningQuery [GOOD] >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionEmpty |83.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |83.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |83.9%| [LD] {RESULT} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |83.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |83.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |83.9%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMaxLengthName [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMultipleDotsName >> TYdbControlPlaneStorageGetResult::ShouldEmpty [GOOD] >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionEmpty |83.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |83.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |83.9%| [LD] {RESULT} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageModifyConnection::ShouldCheckWithoutIdempotencyKey [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPreviousRevisionSuccess >> TYdbControlPlaneStoragePipeline::ShouldSkipBindingIfDisabledConnection [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldSaveTopicConsumers >> TYdbControlPlaneStorageCreateConnectionPermissions::ShouldApplyPermissionManagePublicFailed [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckIdempotencyKey >> TSubscriberCombinationsTest::CombinationsRootDomain [GOOD] >> TSubscriberCombinationsTest::CombinationsMigratedPath >> KqpExtractPredicateLookup::ComplexRange [GOOD] |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewPrivatePublic >> TYdbControlPlaneStorageDeleteBinding::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckPermission >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckExist |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageListConnections::ShouldPageToken [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldEmptyPageToken >> TYdbControlPlaneStoragePipeline::ShouldCheckSimplePipeline [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldIncrementGeneration >> TYdbControlPlaneStorageModifyQuery::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckWithoutIdempotencyKey |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |83.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 >> TYdbControlPlaneStorageDescribeConnection::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldCheckExist >> ShouldNotShowPassword::ShouldNotShowPasswordClickHouse [GOOD] >> ShouldNotShowPassword::ShouldNotShowPasswordPostgreSQL |83.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |83.9%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpExtractPredicateLookup::ComplexRange [GOOD] Test command err: Trying to start YDB, gRPC: 4642, MsgBus: 21648 2025-06-30T09:24:30.673878Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670873352048320:2246];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:30.679015Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001f65/r3tmp/tmpLm4ZAZ/pdisk_1.dat 2025-06-30T09:24:30.846313Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4642, node 1 2025-06-30T09:24:30.887031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:30.887063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:30.891393Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:30.891407Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:30.891409Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:30.891462Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:30.891827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21648 TClient is connected to server localhost:21648 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:31.075959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:31.083232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:31.107549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.180357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.237577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.279928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.367547Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670877647016981:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.367580Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.453116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.475725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.503916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.528202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.552214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.586456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.610143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.661600Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670877647017633:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.661628Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.661804Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670877647017638:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.663106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:31.669348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:31.669450Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670877647017640:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:31.671346Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:31.748302Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670877647017700:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 24230, MsgBus: 5119 2025-06-30T09:24:32.414069Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670880361861022:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:32.414260Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001f65/r3tmp/tmpr0yutd/pdisk_1.dat 2025-06-30T09:24:32.437174Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Tabl ... on_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:55.079271Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:24:55.093483Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:55.131667Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:55.189059Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:55.222715Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:55.551930Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7521670979704416571:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:55.551968Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:55.571210Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:55.641934Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:55.663814Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:55.677636Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:55.690104Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:55.722318Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:55.746600Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:55.776385Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7521670979704417227:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:55.776415Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:55.776530Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7521670979704417232:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:55.777787Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:55.780866Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7521670979704417234:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:24:55.859532Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:55.860666Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7521670979704417285:3401] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:56.213791Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:56.233386Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:56.245916Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:56.262721Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:56.291692Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:56.314667Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:56.330262Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:56.340441Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:56.364844Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:56.382847Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TYdbControlPlaneStorageCreateQuery::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCreateJob >> TYdbControlPlaneStorageCreateBinding::ShouldCheckMultipleDotsName [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckNotAvailable |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionManagePublic |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v1-fifo] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewAst >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldValidate >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldMoveFromScopeToPrivateWithError >> TYdbControlPlaneStorageCreateQuery::ShouldCreateJob [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckListJobs |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMultipleDotsName [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckPermission |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStoragePipeline::ShouldIncrementGeneration [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckStopModifyRun >> TYdbControlPlaneStorageDescribeConnection::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldValidate >> TYdbControlPlaneStoragePipeline::ShouldSaveTopicConsumers [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldSaveDqGraphs >> TYdbControlPlaneStorageListBindings::ShouldFilterByMe [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldPageToken >> ShouldNotShowPassword::ShouldNotShowPasswordPostgreSQL [GOOD] >> TYdbControlPlaneStorageControlQuery::ShouldSucccess >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckExist >> TYdbControlPlaneStorageCreateBinding::ShouldCheckNotAvailable [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldValidate >> TableCreator::CreateTables |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionManagePublic [GOOD] >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionManagePrivate >> TYdbControlPlaneStorageModifyQuery::ShouldCheckWithoutIdempotencyKey [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionQueryInvokeSuccess |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest >> TYdbControlPlaneStorageDeleteConnection::ShouldValidate [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckSuperUser >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPublic |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> TYdbControlPlaneStorageCreateQuery::ShouldCheckListJobs [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldListJobsByQuery >> TableCreator::CreateTables [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewAst [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldNotApplyPermissionViewAstAndViewQueryText >> TYdbControlPlaneStorageDescribeConnection::ShouldValidate [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldCheckSuperUser |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest >> TYdbControlPlaneStorageModifyConnection::ShouldMoveFromScopeToPrivateWithError [GOOD] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionEmpty >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert-UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink-UseDataQuery ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest >> TableCreator::CreateTables [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003741/r3tmp/tmp1Ph96e/pdisk_1.dat 2025-06-30T09:24:59.642348Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670998458136804:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:59.642818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:24:59.729399Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:59.743326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:59.743358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:59.744857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9637 TServer::EnableGrpc on GrpcPort 16692, node 1 2025-06-30T09:24:59.758670Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:59.758681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:59.758684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:59.758731Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-30T09:24:59.786857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:59.797799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:59.798241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TYdbControlPlaneStorageListQueries::ShouldEmptyPageToken [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldValidate >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,0,0,CATEGORY_BLOOM_FILTER] >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionManagePrivate [GOOD] >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionManagePrivatePublic >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,0,1000000,0] >> TYdbControlPlaneStoragePipeline::ShouldSaveDqGraphs [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldSaveResultSetMetas |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest >> TYdbControlPlaneStorageModifyBinding::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckNotExistOldName |83.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |83.9%| [LD] {RESULT} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |83.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable >> TYdbControlPlaneStorageControlQuery::ShouldSucccess [GOOD] >> TYdbControlPlaneStorageControlQuery::ShouldValidate >> TYdbControlPlaneStorageCreateQuery::ShouldListJobsByQuery [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldListJobsCreatedByMe >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionViewPublic >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,0,1000,1000000,0] >> KqpNamedExpressions::NamedExpressionRandomInsertDataQuery-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomSelect+UseSink >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldValidate >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionQueryInvokeSuccess [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionQueryInvokeFailed >> TYdbControlPlaneStorageCreateBinding::ShouldValidate [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldValidateFormatSetting >> TYdbControlPlaneStorageListQueries::ShouldValidate [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterName >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckIdempotencyKey >> TYdbControlPlaneStorageDescribeConnection::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldNotShowClickHousePassword |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest >> TYdbControlPlaneStorageControlQuery::ShouldValidate [GOOD] >> TYdbControlPlaneStorageControlQuery::ShouldCheckIdempotencyKey >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionQueryInvokeFailed [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionEmpty >> SelfHeal::TestOneInactiveMaintenanceRequestOneMaintenanceRequest4Plus2Block [GOOD] >> SelfHeal::TestOneFaultyMaintenanceRequestOneMaintenanceRequestMirror3dc >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldNotApplyPermissionViewAstAndViewQueryText [GOOD] >> TYdbControlPlaneStorageGetQueryStatus::ShouldSuccess >> TYdbControlPlaneStorageCreateQuery::ShouldListJobsCreatedByMe [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckDescribeJob >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionManagePrivatePublic [GOOD] >> TYdbControlPlaneStorageDescribeBinding::ShouldSuccess >> TYdbControlPlaneStoragePipeline::ShouldCheckStopModifyRun [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckJobMeta >> GenericFederatedQuery::YdbFilterPushdown >> KqpNamedExpressions::NamedExpressionRandomSelect+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomSelect-UseSink >> GenericFederatedQuery::IcebergHiveBasicSelectAll >> test_create_tablets.py::TestHive::test_when_create_tablets_then_can_lookup_them >> TYdbControlPlaneStorageDeleteBinding::ShouldValidate [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckSuperUser >> TYdbControlPlaneStoragePipeline::ShouldSaveResultSetMetas [GOOD] >> TYdbControlPlaneStorageQuotas::GetDefaultQuotas >> TYdbControlPlaneStorageGetQueryStatus::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageGetQueryStatus::ShouldCheckPermission >> TPersQueueTest::LOGBROKER_7820 [GOOD] >> TPersQueueTest::InflightLimit >> YdbSdkSessionsPool::StressTestAsync/0 [GOOD] >> YdbSdkSessionsPool::StressTestAsync/1 >> TYdbControlPlaneStorageCreateBinding::ShouldValidateFormatSetting [GOOD] >> TYdbControlPlaneStorageCreateBindingPermissions::ShouldApplyPermissionManagePublicSuccess >> TYdbControlPlaneStorageDescribeConnection::ShouldNotShowClickHousePassword [GOOD] >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionEmpty >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,0,1000000,0] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,0,1000000,0.5] >> TYdbControlPlaneStorageControlQuery::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageControlQuery::ShouldCheckPreviousRevisionFailed >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckPreviousRevisionFailed >> TYdbControlPlaneStorageListQueries::ShouldFilterName [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterByMe >> TPersQueueTest::Codecs_WriteMessageWithNonDefaultCodecThatHasToBeConfiguredAdditionally_SessionClosedWithBadRequestError [GOOD] >> TPersQueueTest::CreateTopicWithMeteringMode >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionViewPublic |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionManagePublic >> TYdbControlPlaneStorageCreateQuery::ShouldCheckDescribeJob [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckDescribeIncorrectJob ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export_reboots_s3/unittest >> TExportToS3WithRebootsTests::ShouldSucceedOnMultiShardTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:23:13.475704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:23:13.475730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:23:13.475734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:23:13.475739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:23:13.475751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:23:13.475754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:23:13.475761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:23:13.475774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:23:13.475862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:23:13.475946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:23:13.486133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:23:13.486162Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:13.486267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:23:13.489162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:23:13.490020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:23:13.490065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:23:13.491783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:23:13.491848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:23:13.492009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:13.492070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:23:13.492979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:13.493022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:23:13.493282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:23:13.493291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:23:13.493299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:23:13.493305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:23:13.493310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:23:13.493335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:23:13.494620Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:23:13.511272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:23:13.511380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:13.511451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:23:13.511459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:23:13.511517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:23:13.511531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:13.512385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:13.512431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:23:13.512485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:13.512494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:23:13.512498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:23:13.512502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:23:13.512916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:13.512927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:23:13.512931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:23:13.513225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:13.513235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:23:13.513239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:23:13.513246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:23:13.513809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:23:13.514123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:23:13.514172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:23:13.514362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:23:13.514387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... tep: 1, at schemeshard: 72057594046678944 2025-06-30T09:24:54.552551Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 1017907251311 } } Step: 5000008 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:24:54.552559Z node 237 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000008, at schemeshard: 72057594046678944 2025-06-30T09:24:54.552590Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-30T09:24:54.552602Z node 237 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-30T09:24:54.552607Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:24:54.552612Z node 237 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-30T09:24:54.552619Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:24:54.552632Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:24:54.552643Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:24:54.552649Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2025-06-30T09:24:54.552656Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-30T09:24:54.552661Z node 237 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-30T09:24:54.552666Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 281474976710761:0 2025-06-30T09:24:54.552679Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:24:54.552686Z node 237 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2025-06-30T09:24:54.552691Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-30T09:24:54.552696Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:24:54.553277Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-30T09:24:54.553292Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-30T09:24:54.553313Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-30T09:24:54.553391Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-30T09:24:54.553398Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-30T09:24:54.553412Z node 237 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:24:54.553774Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-30T09:24:54.553849Z node 237 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:24:54.553857Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:24:54.553903Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:24:54.553932Z node 237 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:24:54.553938Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [237:210:2210], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2025-06-30T09:24:54.553944Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [237:210:2210], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-30T09:24:54.554128Z node 237 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:24:54.554142Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:24:54.554148Z node 237 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-30T09:24:54.554154Z node 237 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-30T09:24:54.554160Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:24:54.554250Z node 237 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:24:54.554262Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-30T09:24:54.554267Z node 237 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-30T09:24:54.554272Z node 237 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:24:54.554277Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:24:54.554288Z node 237 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2025-06-30T09:24:54.554294Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [237:128:2152] 2025-06-30T09:24:54.554356Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:24:54.554366Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:24:54.554380Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:24:54.554893Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-30T09:24:54.555209Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-30T09:24:54.555236Z node 237 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6837: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-30T09:24:54.555249Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6839: Message: TxId: 281474976710761 2025-06-30T09:24:54.555261Z node 237 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-30T09:24:54.555268Z node 237 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-30T09:24:54.555275Z node 237 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 1003, itemIdx# 4294967295 2025-06-30T09:24:54.555342Z node 237 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:24:54.555664Z node 237 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 1003 2025-06-30T09:24:54.555725Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-30T09:24:54.555735Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-30T09:24:54.555815Z node 237 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-30T09:24:54.555833Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-30T09:24:54.555839Z node 237 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [237:880:2811] TestWaitNotification: OK eventTxId 1003 >> TYdbControlPlaneStorageQuotas::GetDefaultQuotas [GOOD] >> TYdbControlPlaneStorageQuotas::OverrideQuotas >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,0,1000,1000000,0] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,0,1000,1000000,0.5] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v1-fifo] >> TYdbControlPlaneStorageGetQueryStatus::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageGetQueryStatus::ShouldCheckExist >> TYdbControlPlaneStorageDescribeBinding::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageDescribeBinding::ShouldCheckPermission >> TYdbControlPlaneStorageModifyBinding::ShouldCheckNotExistOldName [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMoveToScope >> KqpNamedExpressions::NamedExpressionRandomSelect-UseSink [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,0,1000000,0.5] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,10,0,0] >> TYdbControlPlaneStorageQuotas::OverrideQuotas [GOOD] >> TYdbControlPlaneStorageQuotas::GetStaleUsage >> TYdbControlPlaneStorageListConnections::ShouldEmptyPageToken [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldValidate |83.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |83.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |83.9%| [LD] {RESULT} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap >> TYdbControlPlaneStorageCreateQuery::ShouldCheckDescribeIncorrectJob [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckDescribeJobIncorrectVisibility >> TYdbControlPlaneStorageControlQuery::ShouldCheckPreviousRevisionFailed [GOOD] >> TYdbControlPlaneStorageControlQuery::ShouldCheckPreviousRevisionSuccess |83.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |83.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |83.9%| [LD] {RESULT} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckIdempotencyKey >> TYdbControlPlaneStorageGetQueryStatus::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageGetQueryStatus::ShouldValidate ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandomSelect-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 4383, MsgBus: 19528 2025-06-30T09:24:30.367401Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670872671048148:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:30.369542Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ffd/r3tmp/tmpiTLl99/pdisk_1.dat 2025-06-30T09:24:30.519706Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4383, node 1 2025-06-30T09:24:30.574998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:30.575014Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:30.575018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:30.575076Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:30.587115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:30.587152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:30.591122Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19528 TClient is connected to server localhost:19528 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:30.744719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:30.759017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:24:30.768155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:30.864698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:30.949469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.023429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:31.358448Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:31.462153Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670876966017042:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.462184Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.538922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.552751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.569259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.590418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.602913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.661041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.675185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:31.707125Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670876966017699:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.707148Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.708426Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670876966017704:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:31.709553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:31.713580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:31.713709Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670876966017706:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:31.776903Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670876966017757:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } UPSERT INTO [[["675e28a9-c74c-4251-9fa1-461b197549a4"]];[["95d44e33-f801-4819-8931-8945b3a0153b"]]] [[["675e28a9-c74c-4251-9fa1-461b197549a4"]];[["95d44e33-f801-4819-8931-8945b3a0153b"]]] [["758f2bb2-9a56-449c-915a-f68e8d333935"];["c489a2da-5ea3-48b6-a301-2b914221c858"]] Trying to start YDB, gRPC: 18858, MsgBus: 29303 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ffd/r3tmp/tmp9p3Vfs/pdisk_1.dat 2025-06-30T09:24:32.323068Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670880880488282:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:32.325059Z node 2 :METADA ... } 2025-06-30T09:25:02.576113Z node 19 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:02.696404Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) [[2u]] Trying to start YDB, gRPC: 16928, MsgBus: 2966 2025-06-30T09:25:03.142909Z node 20 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[20:7521671015841967828:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:03.142954Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ffd/r3tmp/tmpp6iKHw/pdisk_1.dat 2025-06-30T09:25:03.162563Z node 20 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16928, node 20 2025-06-30T09:25:03.183047Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:03.183060Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:03.183063Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:03.183122Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2966 2025-06-30T09:25:03.243203Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:03.243242Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:03.244399Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2966 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:03.273119Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:03.287776Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:03.315067Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:03.345186Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:03.361024Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:03.675795Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7521671015841969376:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:03.675827Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:03.700651Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:03.722465Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:03.734144Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:03.746536Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:03.761080Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:03.827111Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:03.844339Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:03.908376Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7521671015841970037:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:03.908408Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7521671015841970042:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:03.908411Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:03.909565Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:03.916628Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [20:7521671015841970044:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:03.979941Z node 20 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [20:7521671015841970095:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:04.146834Z node 20 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:04.385689Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) [[2u]] >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,0,0,CATEGORY_BLOOM_FILTER] [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,0,0,BLOOM_FILTER] >> TYdbControlPlaneStorageQuotas::GetStaleUsage [GOOD] >> TYdbControlPlaneStorageQuotas::PushUsageUpdate >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageListQueries::ShouldFilterByMe [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterType >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckPreviousRevisionFailed [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckPreviousRevisionSuccess >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_and_create_queue[std] [GOOD] >> KqpStats::RequestUnitForSuccessExplicitPrepare >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v0-fifo] |83.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,10,0,0] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,10,0,0.5] >> TYdbControlPlaneStorageListConnections::ShouldValidate [GOOD] >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageCreateBindingPermissions::ShouldApplyPermissionManagePublicSuccess [GOOD] >> TYdbControlPlaneStorageCreateBindingPermissions::ShouldApplyPermissionManagePublicFailed >> GenericFederatedQuery::YdbFilterPushdown [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId1 >> TYdbControlPlaneStorageCreateQuery::ShouldCheckDescribeJobIncorrectVisibility [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldSaveQuery >> GenericFederatedQuery::IcebergHiveSaSelectAll >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,0,1000,1000000,0.5] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,1,0,0,0] >> TYdbControlPlaneStorageQuotas::PushUsageUpdate [GOOD] >> TYdbControlPlaneStorageRateLimiter::ShouldValidateCreate |84.0%| [TA] $(B)/ydb/library/table_creator/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TYdbControlPlaneStorageGetQueryStatus::ShouldValidate [GOOD] >> TYdbControlPlaneStorageGetQueryStatus::ShouldCheckSuperUser >> TYdbControlPlaneStorageDescribeBinding::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageDescribeBinding::ShouldCheckExist >> GenericFederatedQuery::IcebergHiveBasicSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectConstant >> TYdbControlPlaneStoragePipeline::ShouldCheckJobMeta [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckClearFields >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionViewPrivate >> KqpLimits::BigParameter >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId1 [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId2 >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionManagePublic [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionManagePrivate >> TYdbControlPlaneStorageControlQuery::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageRateLimiter::ShouldValidateCreate [GOOD] >> TYdbControlPlaneStorageRateLimiter::ShouldValidateDelete >> TYdbControlPlaneStorageCreateQuery::ShouldSaveQuery [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckQueryName >> TYdbControlPlaneStorageListBindings::ShouldPageToken [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldValidate >> TYdbControlPlaneStorageListQueries::ShouldFilterType [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterMode >> KqpStats::RequestUnitForSuccessExplicitPrepare [GOOD] >> KqpStats::RequestUnitForExecute >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,10,0,0.5] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,10,100,0] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v1-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v1-std] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckPreviousRevisionFailed >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageGetQueryStatus::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionEmpty >> KqpExplain::PrecomputeRange >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageCreateBindingPermissions::ShouldApplyPermissionManagePublicFailed [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckNotAvailable >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId2 [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMoveToScope [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckModifyTheSame |83.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |84.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |84.0%| [TA] {RESULT} $(B)/ydb/library/table_creator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results >> TYdbControlPlaneStorageRateLimiter::ShouldValidateDelete [GOOD] >> TYdbControlPlaneStorageRateLimiter::ShouldCreateRateLimiterResource >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,0,0,BLOOM_FILTER] [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,0,0.5,CATEGORY_BLOOM_FILTER] >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,1,0,0,0] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,1,0,0,0.5] >> TYdbControlPlaneStorageDescribeBinding::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageDescribeBinding::ShouldValidate >> KqpStats::RequestUnitForExecute [GOOD] >> KqpStats::StatsProfile >> test_create_tablets.py::TestHive::test_when_create_tablets_then_can_lookup_them [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckQueryName [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckAvailableConnections >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,10,100,0] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,10,100,0.5] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckNotAvailable [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckMaxCountConnections >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageListBindings::ShouldValidate [GOOD] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageListQueries::ShouldFilterMode [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterVisibility >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunningExtSubdomain [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectConstant >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionManagePrivate [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionManagePrivatePublic >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPrivate >> GenericFederatedQuery::IcebergHiveBasicSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectCount >> KqpLimits::BigParameter [GOOD] >> KqpLimits::AffectedShardsLimit >> KqpExplain::PrecomputeRange [GOOD] >> KqpExplain::PureExpr ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken [GOOD] Test command err: Trying to start YDB, gRPC: 7805, MsgBus: 2381 2025-06-30T09:25:03.201857Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671014553400890:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:03.201877Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042cd/r3tmp/tmphAetV6/pdisk_1.dat 2025-06-30T09:25:03.306102Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:03.310715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:03.310894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:03.313857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7805, node 1 2025-06-30T09:25:03.413473Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:03.413488Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:03.413490Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:03.413557Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2381 TClient is connected to server localhost:2381 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:03.581869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:03.673134Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671014553401297:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:03.673169Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:04.201995Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:04.209604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:04.303512Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671018848368725:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:04.303548Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:04.303676Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671018848368730:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:04.308118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:04.317332Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671018848368732:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:04.416533Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671018848368772:2392] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:05.037376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:05.125319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:05.257732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:05.356418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:05.446664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:05.560477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:05.583969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:25:05.989787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:25:06.070374Z node 1 :KQP_GATEWAY WARN: kqp_federated_query_helpers.cpp:320: DescribePath failed,
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2136: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:2136
: Error: Endpoint list is empty for database pgdb, cluster endpoint localhost:2136. Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "localhost" ... :06.665804Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:06.665819Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:06.665823Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:06.665875Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12951 2025-06-30T09:25:06.695119Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:06.695152Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:06.703374Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12951 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:06.759349Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:06.771161Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 9206, MsgBus: 25341 2025-06-30T09:25:07.363283Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521671031455962196:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:07.364717Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042cd/r3tmp/tmp9YspqI/pdisk_1.dat 2025-06-30T09:25:07.397070Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9206, node 3 2025-06-30T09:25:07.415052Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:07.415066Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:07.415069Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:07.415124Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25341 2025-06-30T09:25:07.470596Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:07.470643Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:07.471798Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25341 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:07.518223Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:07.523311Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:07.965048Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:07.965555Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:07.966176Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.085984Z node 3 :KQP_PROXY WARN: kqp_script_executions.cpp:1077: [ScriptExecutions] [TForgetScriptExecutionOperationActor] ExecutionId: , reply BAD_REQUEST, issues: {
: Error: Invalid operation id: ydb/public/sdk/cpp/src/library/operation_id/operation_id.cpp:184: Unable to find key: id } 2025-06-30T09:25:08.089673Z node 3 :KQP_PROXY WARN: kqp_script_executions.cpp:1366: [ScriptExecutions] [TGetScriptExecutionOperationActor] ExecutionId: , reply BAD_REQUEST, issues: {
: Error: Invalid operation id: ydb/public/sdk/cpp/src/library/operation_id/operation_id.cpp:184: Unable to find key: id } Trying to start YDB, gRPC: 16378, MsgBus: 20419 2025-06-30T09:25:08.408071Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671036881741673:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:08.410098Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042cd/r3tmp/tmpdguLO6/pdisk_1.dat 2025-06-30T09:25:08.431637Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:08.431971Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671036881741498:2080] 1751275508404117 != 1751275508404120 TServer::EnableGrpc on GrpcPort 16378, node 4 2025-06-30T09:25:08.443763Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:08.443779Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:08.443782Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:08.443861Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20419 2025-06-30T09:25:08.514559Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:08.514605Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:08.516220Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20419 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:08.577776Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:08.579913Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 >> TYdbControlPlaneStorageRateLimiter::ShouldCreateRateLimiterResource [GOOD] >> TYdbControlPlaneStorageRateLimiter::ShouldDeleteRateLimiterResource >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckPreviousRevisionFailed [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckPreviousRevisionSuccess >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionManagePublic >> TYdbControlPlaneStoragePipeline::ShouldCheckClearFields [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckNodesHealthCheck >> KqpStats::StatsProfile [GOOD] >> KqpStats::SelfJoin >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,10,100,0.5] [GOOD] >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionViewPrivate ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/console/ut/unittest >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunningExtSubdomain [GOOD] Test command err: 2025-06-30T09:20:41.119819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:41.119849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:41.119856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:41.119861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:41.119875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:41.119880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:41.119891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:41.119929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:41.120041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:41.120126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:41.124666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:41.124695Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:41.130834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:41.130938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:41.130996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046578944 2025-06-30T09:20:41.135868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:41.136008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:41.136126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.136250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: dc-1, pathId: [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:41.137087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:41.137147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:41.137369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:41.137383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:41.137488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:41.137501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046578944, domainId: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:41.137509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:41.137528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.180957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "hdd" } StoragePools { Name: "" Kind: "hdd-3" } StoragePools { Name: "" Kind: "hdd-1" } StoragePools { Name: "" Kind: "hdd-2" } } } TxId: 1 TabletId: 72057594046578944 , at schemeshard: 72057594046578944 2025-06-30T09:20:41.181042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //dc-1, opId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.181129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 0 2025-06-30T09:20:41.181139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046578944, LocalPathId: 1] source path: 2025-06-30T09:20:41.181190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046578944 2025-06-30T09:20:41.181207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:41.182187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046578944 PathId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.182258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //dc-1 2025-06-30T09:20:41.182325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.182337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046578944 2025-06-30T09:20:41.182345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:41.182352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:41.183461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.183484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046578944 2025-06-30T09:20:41.183492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:41.184061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.184075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:41.184084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.184093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:41.185074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046578944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:41.185585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046578944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:41.185645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:41.185899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.185910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:20:41.185931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.419221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:41.419296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046578944, at schemeshard: 72057594046578944 2025-06-30T09:20:41.419309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.419423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:41.419437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:41.419480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 1 2025-06-30T09:20:41.419497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:41.420373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:41.420390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046578944, txId: 1, path id: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:41.42044 ... e 108 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6723: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-30T09:24:31.893711Z node 108 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-30T09:24:31.893740Z node 108 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-30T09:24:32.106357Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { ReadStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" } } } 2025-06-30T09:24:32.106649Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd-2" NumGroups: 1000 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046578944 X2: 3 } } } } 2025-06-30T09:24:32.113337Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:24:32.113458Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:24:37.985330Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { ReadStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" } } } 2025-06-30T09:24:37.985552Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd-2" NumGroups: 1000 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046578944 X2: 3 } } } } 2025-06-30T09:24:37.990461Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:24:37.990625Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:24:44.029520Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { ReadStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" } } } 2025-06-30T09:24:44.029744Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd-2" NumGroups: 1000 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046578944 X2: 3 } } } } 2025-06-30T09:24:44.034637Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:24:44.034821Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:24:50.038985Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { ReadStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" } } } 2025-06-30T09:24:50.039208Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd-2" NumGroups: 1000 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046578944 X2: 3 } } } } 2025-06-30T09:24:50.043737Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:24:50.043853Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:24:56.089769Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { ReadStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" } } } 2025-06-30T09:24:56.089991Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd-2" NumGroups: 1000 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046578944 X2: 3 } } } } 2025-06-30T09:24:56.093511Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:24:56.093694Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:25:02.154912Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { ReadStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" } } } 2025-06-30T09:25:02.155203Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd-2" NumGroups: 1000 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046578944 X2: 3 } } } } 2025-06-30T09:25:02.159585Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:25:02.159704Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:25:07.363204Z node 108 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6723: Handle: TEvRunConditionalErase, at schemeshard: 72057594046578944 2025-06-30T09:25:07.363269Z node 108 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046578944 2025-06-30T09:25:07.363300Z node 108 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046578944 2025-06-30T09:25:08.182046Z node 108 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6723: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-30T09:25:08.182107Z node 108 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-30T09:25:08.182132Z node 108 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-30T09:25:08.438388Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { ReadStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" } } } 2025-06-30T09:25:08.438600Z node 108 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 Name: "/dc-1/users/tenant-1:hdd-2" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd-2" NumGroups: 1000 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046578944 X2: 3 } } } } 2025-06-30T09:25:08.442407Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-30T09:25:08.442477Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} >> TYdbControlPlaneStorageDescribeBinding::ShouldValidate [GOOD] >> TYdbControlPlaneStorageDescribeBinding::ShouldCheckSuperUser >> TYdbControlPlaneStorageCreateConnection::ShouldCheckMaxCountConnections [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckIdempotencyKey ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::RestoreFirstLevelVariants[2,false,1,10,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 11134, MsgBus: 19702 2025-06-30T09:25:01.444475Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671004627781584:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:01.444731Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003ba9/r3tmp/tmpwLt2gv/pdisk_1.dat 2025-06-30T09:25:01.696258Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671004627781470:2080] 1751275501430663 != 1751275501430666 2025-06-30T09:25:01.700452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:01.700494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:01.702955Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:01.706170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11134, node 1 2025-06-30T09:25:01.994013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:01.994032Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:01.994035Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:01.994099Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19702 TClient is connected to server localhost:19702 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:02.208769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:02.246915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2); 2025-06-30T09:25:02.283308Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671008922749394:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:02.283340Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:02.445500Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:02.703895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:25:02.725071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671008922749488:2296];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:02.725125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671008922749488:2296];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:25:02.725201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671008922749488:2296];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:25:02.725223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671008922749488:2296];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:25:02.725243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671008922749488:2296];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:25:02.725261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671008922749488:2296];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:25:02.725283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671008922749488:2296];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:25:02.725309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671008922749488:2296];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:25:02.725330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671008922749488:2296];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:25:02.725356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671008922749488:2296];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:25:02.725377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671008922749488:2296];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:25:02.725399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671008922749488:2296];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:25:02.726076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:25:02.726085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:25:02.726099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:25:02.726104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:25:02.726135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:25:02.726140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:25:02.726152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:25:02.726157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:25:02.726167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:25:02.726172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:25:02.726195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:25:02.726201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:25:02.726220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:25:02.726228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:25:02.726243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-0 ... _COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:25:09.727453Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:25:09.727459Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:25:09.727483Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:25:09.727520Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:25:09.727537Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:25:09.727544Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:25:09.727552Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:25:09.727560Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:25:09.727566Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:25:09.727640Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:25:09.727654Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:25:09.727671Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:25:09.727680Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:25:09.762260Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[6:7521671041490946051:2295];ev=NActors::IEventHandle;tablet_id=72075186224037889;tx_id=281474976715658;this=18140798961056;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275509762;max=18446744073709551615;plan=0;src=[6:7521671041490945722:2171];cookie=22:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:25:09.765502Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:25:09.766835Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:25:09.767073Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:25:09.767887Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:25:09.776663Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671041490946137:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:09.776691Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:09.784523Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `SCAN_FIRST_LEVEL_ONLY`=`true`, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`false`, `COLUMNS_LIMIT`=`1`, `SPARSED_DETECTOR_KFF`=`10`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:25:09.787080Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:09.787246Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:09.800925Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671041490946171:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:09.800952Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:09.805850Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:09.809206Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:09.809461Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1", "d" : null, "e.v" : {"c" : 1, "e" : {"c.a" : 2}}}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3", "d" : "d3", "e" : ["a", {"v" : ["c", 5]}]}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}')) 2025-06-30T09:25:09.820315Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671041490946206:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:09.820351Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:09.820406Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671041490946211:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:09.821420Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:09.824569Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671041490946213:2324], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:25:09.912930Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671041490946264:2453] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:09.970164Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:25:09.970466Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":\"{\\\"c\\\":1,\\\"e\\\":{\\\"c.a\\\":2}}\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\",\"e\":\"[\\\"a\\\",{\\\"v\\\":[\\\"c\\\",5]}]\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] OUTPUT: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":\"{\\\"c\\\":1,\\\"e\\\":{\\\"c.a\\\":2}}\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\",\"e\":\"[\\\"a\\\",{\\\"v\\\":[\\\"c\\\",5]}]\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] INDEX:0/0/0 HEADER:0/0/0 2025-06-30T09:25:10.269941Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,1,0,0,0.5] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,1,0,100,0] >> KqpLimits::AffectedShardsLimit [GOOD] >> KqpLimits::CancelAfterRoTx >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPrivatePublic >> TYdbControlPlaneStorageModifyBinding::ShouldCheckModifyTheSame [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckSuperUser >> TYdbControlPlaneStoragePipeline::ShouldCheckNodesHealthCheck [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckResultSetMeta >> KqpExplain::PureExpr [GOOD] >> KqpExplain::ReadTableRangesFullScan >> TYdbControlPlaneStorageRateLimiter::ShouldDeleteRateLimiterResource [GOOD] >> TYdbControlPlaneStorageTest::ShouldCreateTable >> TYdbControlPlaneStorageCreateQuery::ShouldCheckAvailableConnections [GOOD] >> KqpLimits::OutOfSpaceBulkUpsertFail >> TYdbControlPlaneStorageListQueries::ShouldFilterVisibility [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterAutomatic >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPrivateAfterModify >> KqpStats::SelfJoin [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,0,0.5,CATEGORY_BLOOM_FILTER] [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,0,0.5,BLOOM_FILTER] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionEmpty >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v1-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue_batch[tables_format_v0] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionViewPrivatePublic >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionViewPrivatePublic >> TConsoleConfigSubscriptionTests::TestConfigNotificationRetries [GOOD] >> TConsoleConfigSubscriptionTests::TestConfigSubscriptionsCleanup >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageTest::ShouldCreateTable [GOOD] >> TYdbControlPlaneStorageWriteResultData::ShouldValidateWrite >> KqpAnalyze::AnalyzeTable+ColumnStore |84.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |84.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |84.0%| [LD] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> KqpStats::MultiTxStatsFullExpYql ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpStats::SelfJoin [GOOD] Test command err: Trying to start YDB, gRPC: 6118, MsgBus: 7172 2025-06-30T09:25:06.125193Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671024902527739:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:06.125997Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004256/r3tmp/tmpvD0laS/pdisk_1.dat 2025-06-30T09:25:06.221368Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:06.221477Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671024902527535:2080] 1751275506121851 != 1751275506121854 TServer::EnableGrpc on GrpcPort 6118, node 1 2025-06-30T09:25:06.245345Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:06.245361Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:06.245364Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:06.245453Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:06.291214Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:06.291262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:06.295902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7172 TClient is connected to server localhost:7172 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:06.423086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:06.432340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:06.444246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:06.532008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:06.589639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:06.667856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:06.834305Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671024902529144:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:06.834350Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:06.916244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:06.941121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:06.965948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:06.983379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:07.008164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:07.061259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:07.081035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:07.108806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671029197497092:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:07.108839Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:07.109007Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671029197497097:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:07.110346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:07.115625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-30T09:25:07.115718Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671029197497099:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:07.129478Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:07.196601Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671029197497159:3401] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 4707, MsgBus: 18291 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004256/r3tmp/tmp31FARx/pdisk_1.dat 2025-06-30T09:25:07.940892Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:25:07.941711Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were n ... te: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:11.549500Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; {"Plan":{"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Plans":[{"PlanNodeId":7,"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"E-Size":"0","PlanNodeId":3,"LookupKeyColumns":["Key"],"Node Type":"TableLookupJoin","Path":"\/Root\/TwoShard","Columns":["Key"],"E-Rows":"0","Table":"TwoShard","Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["TwoShard"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/TwoShard","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"TwoShard","ReadColumns":["Key"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage","Stats":{"UseLlvm":"undefined","Output":[{"Pop":{"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"ActiveMessageMs":{"Count":2,"Max":2,"Min":1},"FirstMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"Bytes":{"Count":2,"Sum":48,"Max":36,"Min":12,"History":[1,36,2,48]}},"Name":"RESULT","Push":{"LastMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Chunks":{"Count":2,"Sum":6,"Max":3,"Min":3},"ResumeMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"FirstMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"ActiveMessageMs":{"Count":2,"Max":2,"Min":1},"WaitTimeUs":{"Count":2,"Sum":2848,"Max":2009,"Min":839,"History":[1,839,2,2848]},"WaitPeriods":{"Count":2,"Sum":2,"Max":1,"Min":1}}}],"MaxMemoryUsage":{"Count":2,"Sum":2097152,"Max":1048576,"Min":1048576,"History":[1,1048576,2,2097152]},"ResultRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Tasks":2,"ResultBytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"OutputRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"FinishedTasks":2,"IngressRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"PhysicalStageId":0,"StageDurationUs":1000,"Table":[{"Path":"\/Root\/TwoShard","ReadRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"ReadBytes":{"Count":2,"Sum":48,"Max":24,"Min":24}}],"BaseTimeMs":1751275511735,"OutputBytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"CpuTimeUs":{"Count":2,"Sum":339,"Max":197,"Min":142,"History":[1,142,2,339]},"Ingress":[{"Pop":{"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"ActiveMessageMs":{"Count":2,"Max":2,"Min":1},"FirstMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"Bytes":{"Count":2,"Sum":96,"Max":48,"Min":48,"History":[1,48,2,96]}},"External":{},"Name":"KqpReadRangesSource","Ingress":{},"Push":{"LastMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"ResumeMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"FirstMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"ActiveMessageMs":{"Count":2,"Max":2,"Min":1},"Bytes":{"Count":2,"Sum":96,"Max":48,"Min":48,"History":[1,48,2,96]},"WaitTimeUs":{"Count":2,"Sum":2877,"Max":2034,"Min":843,"History":[1,843,2,2877]},"WaitPeriods":{"Count":2,"Sum":2,"Max":1,"Min":1}}}],"UpdateTimeMs":2}}],"PlanNodeType":"Connection","E-Cost":"0"}],"Node Type":"Collect","Stats":{"UseLlvm":"undefined","Output":[{"Pop":{"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"ActiveMessageMs":{"Count":2,"Max":2,"Min":1},"FirstMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"Bytes":{"Count":2,"Sum":48,"Max":36,"Min":12}},"Name":"6","Push":{"LastMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Chunks":{"Count":2,"Sum":6,"Max":3,"Min":3},"ResumeMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"FirstMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"ActiveMessageMs":{"Count":2,"Max":2,"Min":1},"PauseMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"WaitTimeUs":{"Count":2,"Sum":3377,"Max":2203,"Min":1174},"WaitPeriods":{"Count":2,"Sum":2,"Max":1,"Min":1},"WaitMessageMs":{"Count":2,"Max":2,"Min":1}}}],"MaxMemoryUsage":{"Count":2,"Sum":2097152,"Max":1048576,"Min":1048576,"History":[2,1048576,3,2097152]},"InputBytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"Tasks":2,"OutputRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"FinishedTasks":2,"InputRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"PhysicalStageId":1,"StageDurationUs":1000,"Table":[{"Path":"\/Root\/TwoShard","ReadRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"ReadBytes":{"Count":2,"Sum":24,"Max":12,"Min":12}}],"BaseTimeMs":1751275511735,"WaitInputTimeUs":{"Count":2,"Sum":2662,"Max":1832,"Min":830,"History":[2,830,3,2662]},"OutputBytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"CpuTimeUs":{"Count":2,"Sum":163,"Max":83,"Min":80,"History":[2,83,3,163]},"UpdateTimeMs":2,"Input":[{"Pop":{"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"ActiveMessageMs":{"Count":2,"Max":2,"Min":1},"FirstMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"Bytes":{"Count":2,"Sum":48,"Max":36,"Min":12}},"Name":"2","Push":{"LastMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"ResumeMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"FirstMessageMs":{"Count":2,"Sum":3,"Max":2,"Min":1},"ActiveMessageMs":{"Count":2,"Max":2,"Min":1},"Bytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"WaitTimeUs":{"Count":2,"Sum":2635,"Max":1908,"Min":727},"WaitPeriods":{"Count":2,"Sum":2,"Max":1,"Min":1}}}]}}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":5}],"Name":"Limit","Limit":"1001"}],"Node Type":"Limit","Stats":{"UseLlvm":"undefined","Output":[{"Pop":{"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"ActiveMessageMs":{"Count":1,"Max":2,"Min":1},"FirstMessageMs":{"Count":1,"Sum":1,"Max":1,"Min":1},"Bytes":{"Count":1,"Sum":27,"Max":27,"Min":27},"ActiveTimeUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000}},"Name":"8","Push":{"LastMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"Chunks":{"Count":1,"Sum":6,"Max":6,"Min":6},"ResumeMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"FirstMessageMs":{"Count":1,"Sum":1,"Max":1,"Min":1},"ActiveMessageMs":{"Count":1,"Max":2,"Min":1},"ActiveTimeUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000},"WaitTimeUs":{"Count":1,"Sum":2057,"Max":2057,"Min":2057},"WaitPeriods":{"Count":1,"Sum":1,"Max":1,"Min":1}}}],"MaxMemoryUsage":{"Count":1,"Sum":1048576,"Max":1048576,"Min":1048576,"History":[3,1048576]},"DurationUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000},"InputBytes":{"Count":1,"Sum":48,"Max":48,"Min":48},"Tasks":1,"OutputRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"FinishedTasks":1,"InputRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"PhysicalStageId":2,"StageDurationUs":1000,"BaseTimeMs":1751275511735,"WaitInputTimeUs":{"Count":1,"Sum":930,"Max":930,"Min":930,"History":[3,930]},"OutputBytes":{"Count":1,"Sum":27,"Max":27,"Min":27},"CpuTimeUs":{"Count":1,"Sum":227,"Max":227,"Min":227,"History":[3,227]},"UpdateTimeMs":2,"Input":[{"Pop":{"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"ActiveMessageMs":{"Count":1,"Max":2,"Min":1},"FirstMessageMs":{"Count":1,"Sum":1,"Max":1,"Min":1},"Bytes":{"Count":1,"Sum":48,"Max":48,"Min":48},"ActiveTimeUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000}},"Name":"4","Push":{"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"ResumeMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"FirstMessageMs":{"Count":1,"Sum":1,"Max":1,"Min":1},"ActiveMessageMs":{"Count":1,"Max":2,"Min":1},"Bytes":{"Count":1,"Sum":48,"Max":48,"Min":48},"ActiveTimeUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000},"WaitTimeUs":{"Count":1,"Sum":1549,"Max":1549,"Min":1549},"WaitPeriods":{"Count":1,"Sum":2,"Max":2,"Min":2}}}]}}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":7}],"Name":"Limit","Limit":"1001"}],"Node Type":"Limit","Stats":{"UseLlvm":"undefined","OutputRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"PhysicalStageId":3,"FinishedTasks":1,"InputBytes":{"Count":1,"Sum":27,"Max":27,"Min":27},"DurationUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000},"MaxMemoryUsage":{"Count":1,"Sum":1048576,"Max":1048576,"Min":1048576,"History":[3,1048576]},"BaseTimeMs":1751275511735,"Output":[{"Pop":{"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"FirstMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"Bytes":{"Count":1,"Sum":27,"Max":27,"Min":27,"History":[3,27]}},"Name":"RESULT","Push":{"WaitTimeUs":{"Count":1,"Sum":2105,"Max":2105,"Min":2105,"History":[3,2105]},"WaitPeriods":{"Count":1,"Sum":1,"Max":1,"Min":1},"Chunks":{"Count":1,"Sum":6,"Max":6,"Min":6},"ResumeMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"FirstMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2}}}],"CpuTimeUs":{"Count":1,"Sum":130,"Max":130,"Min":130,"History":[3,130]},"StageDurationUs":1000,"WaitInputTimeUs":{"Count":1,"Sum":785,"Max":785,"Min":785,"History":[3,785]},"ResultRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"ResultBytes":{"Count":1,"Sum":27,"Max":27,"Min":27},"OutputBytes":{"Count":1,"Sum":27,"Max":27,"Min":27},"Input":[{"Pop":{"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"FirstMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"Bytes":{"Count":1,"Sum":27,"Max":27,"Min":27}},"Name":"6","Push":{"LastMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"ResumeMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"FirstMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"Bytes":{"Count":1,"Sum":27,"Max":27,"Min":27},"WaitTimeUs":{"Count":1,"Sum":2112,"Max":2112,"Min":2112},"WaitPeriods":{"Count":1,"Sum":1,"Max":1,"Min":1}}}],"UpdateTimeMs":3,"InputRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"Tasks":1}}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"Compilation":{"FromCache":false,"DurationUs":143207,"CpuTimeUs":133873},"ProcessCpuTimeUs":142,"TotalDurationUs":151446,"ResourcePoolId":"default","QueuedTimeUs":282},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":7,"Plans":[{"PlanNodeId":9,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/TwoShard","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"TwoShard","ReadColumns":["Key"],"E-Cost":"0"}],"Node Type":"TableFullScan"},{"Operators":[{"E-Rows":"0","Columns":["Key"],"E-Size":"0","E-Cost":"0","Name":"TableLookup","Table":"TwoShard","LookupKeyColumns":["Key"]}],"Node Type":"TableLookup","PlanNodeType":"TableLookup"}],"Operators":[{"Name":"LookupJoin","LookupKeyColumns":["Key"]}],"Node Type":"LookupJoin","PlanNodeType":"Connection"}],"Operators":[{"A-Rows":6,"A-SelfCpu":0.227,"A-Cpu":0.227,"A-Size":27,"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Operators":[{"A-Rows":6,"A-SelfCpu":0.13,"A-Cpu":0.357,"A-Size":27,"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","PlanNodeType":"Query"}} >> GenericFederatedQuery::IcebergHiveSaSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectCount >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionManagePrivatePublic [GOOD] >> TYdbControlPlaneStorageNodesHealthCheck::ShouldValidate >> KqpExplain::ReadTableRangesFullScan [GOOD] >> KqpExplain::ReadTableRanges >> TYdbControlPlaneStorageCreateConnection::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckLowerCaseName >> GenericFederatedQuery::IcebergHiveBasicSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown >> TYdbControlPlaneStorageDescribeBinding::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageWriteResultData::ShouldValidateWrite [GOOD] >> TYdbControlPlaneStorageWriteResultData::ShouldValidateRead >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v1-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v1-std] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionViewPublic >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert-UseSink+UseDataQuery >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,1,0,100,0] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,1,0,100,0.5] >> TYdbControlPlaneStorageNodesHealthCheck::ShouldValidate [GOOD] >> TYdbControlPlaneStoragePingTask::ShouldValidate >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionManagePublic [GOOD] >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionManagePrivate >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v1-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_generates_event[tables_format_v0] >> TYdbControlPlaneStorageListQueries::ShouldFilterAutomatic [GOOD] >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionEmpty >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_generates_event[tables_format_v0] [SKIPPED] >> TYdbControlPlaneStorageWriteResultData::ShouldValidateRead [GOOD] >> TYdbControlPlaneStorageWriteResultData::ShouldSuccess >> KqpExplain::ExplainStream >> TYdbControlPlaneStoragePingTask::ShouldValidate [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckAbortInTerminatedState ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageCreateQuery::ShouldCheckAvailableConnections [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... NE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries". Create session OK 2025-06-30T09:25:09.602807Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:09.602810Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:09.603117Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-30T09:25:09.603130Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:09.603131Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:09.603211Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets". Create session OK 2025-06-30T09:25:09.603219Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:09.603220Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:09.603273Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-30T09:25:09.603279Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:09.603281Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:09.607898Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/mappings". Create session OK 2025-06-30T09:25:09.607911Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:09.607913Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:09.658828Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:09.658854Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:09.735508Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:09.735537Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:09.771598Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:09.771631Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:09.771713Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:09.771722Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:09.771949Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:09.771965Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:09.772327Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:09.772342Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:09.772513Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:09.772522Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:09.772614Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:09.772625Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:09.772720Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:09.772729Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:09.772814Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:09.772816Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:09.772842Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:09.772848Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:09.772884Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:09.772886Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:09.772943Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:09.772944Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:09.772984Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:09.772986Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:09.773029Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:09.773033Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants": >> KqpStats::MultiTxStatsFullExpYql [GOOD] >> KqpStats::MultiTxStatsFullExpScan >> KqpExplain::ReadTableRanges [GOOD] >> KqpExplain::Predicates >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionViewPrivatePublic >> TYdbControlPlaneStorageCreateConnection::ShouldCheckLowerCaseName [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckMaxLengthName >> TYdbControlPlaneStorageWriteResultData::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckWithoutIdempotencyKey >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionViewPublic >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,0,0.5,BLOOM_FILTER] [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,1000,0,CATEGORY_BLOOM_FILTER] >> TYdbControlPlaneStoragePipeline::ShouldCheckResultSetMeta [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckRemovingOldResultSet >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionViewPrivatePublic >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPrivateAfterModify [GOOD] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPrivatePublic >> KqpExplain::ExplainStream [GOOD] >> KqpExplain::ExplainScanQueryWithParams >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,1,0,100,0.5] [GOOD] >> KqpExplain::Predicates [GOOD] >> KqpExplain::Explain >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPrivate >> KqpStats::MultiTxStatsFullExpScan [GOOD] >> KqpStats::JoinStatsBasicYql+StreamLookupJoin >> TYdbControlPlaneStorageCreateConnection::ShouldCheckMaxLengthName [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckMultipleDotsName >> GenericFederatedQuery::IcebergHiveSaSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveSaFilterPushdown ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks". Create session OK 2025-06-30T09:25:11.816444Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:11.816445Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:11.817569Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys". Create session OK 2025-06-30T09:25:11.817578Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:11.817581Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:11.817660Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-30T09:25:11.817665Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:11.817666Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:11.818135Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections". Create session OK 2025-06-30T09:25:11.818142Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:11.818144Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:11.868518Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:11.868531Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:11.993128Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:11.993147Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:11.993426Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:11.993430Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:11.998212Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:11.998228Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:11.999725Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:11.999737Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:11.999958Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:11.999963Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:12.000038Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:12.000041Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:12.000088Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:12.000090Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:12.000147Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:12.000149Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:12.000207Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:12.000209Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:12.000314Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:12.000317Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:12.000375Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:12.000378Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:12.000425Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:12.000427Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:12.000529Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:12.000532Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:12.000615Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:12.000617Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas": >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionViewPrivate ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 29563, MsgBus: 20743 2025-06-30T09:25:03.199590Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671013966067419:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:03.199628Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042d4/r3tmp/tmpcC8xIN/pdisk_1.dat 2025-06-30T09:25:03.306094Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:03.315130Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:03.315178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:03.323281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29563, node 1 2025-06-30T09:25:03.416221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:03.416235Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:03.416238Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:03.416308Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20743 TClient is connected to server localhost:20743 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:03.581868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:03.767427Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671013966067989:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:03.767456Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:04.204015Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:04.210939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:04.300755Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671018261035417:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:04.300787Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:04.300877Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671018261035422:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:04.307489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:04.313718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-30T09:25:04.313818Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671018261035424:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-30T09:25:04.387196Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671018261035464:2393] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:05.042325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:05.122951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:05.227160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:05.333871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:05.425495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:05.519946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:05.539561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:25:05.928890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:25:05.945387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710695:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:05.946049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:05.946391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710694:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" ... 480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:14.517831Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:14.630996Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:14.744370Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:14.854465Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:14.966618Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:15.069978Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.084805Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:25:15.535196Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::SwitchAccessorCompactionVariants[1,true,1,0,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 8060, MsgBus: 17004 2025-06-30T09:25:01.513885Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671006786585775:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:01.513917Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003daa/r3tmp/tmpHIVWV6/pdisk_1.dat 2025-06-30T09:25:01.693282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:01.693329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:01.700849Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671006786585755:2080] 1751275501513751 != 1751275501513754 2025-06-30T09:25:01.702943Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:01.705607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8060, node 1 2025-06-30T09:25:01.987373Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:01.987387Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:01.987391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:01.987453Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17004 TClient is connected to server localhost:17004 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:02.208765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:02.239045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:25:02.280263Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671011081553684:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:02.280300Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:02.516571Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:02.703692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:25:02.722443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671011081553753:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:02.722507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671011081553753:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:25:02.722577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671011081553753:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:25:02.722601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671011081553753:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:25:02.722627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671011081553753:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:25:02.722648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671011081553753:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:25:02.722671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671011081553753:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:25:02.722692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671011081553753:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:25:02.722713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671011081553753:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:25:02.722835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671011081553753:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:25:02.722869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671011081553753:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:25:02.722893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671011081553753:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:25:02.724132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:25:02.724156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:25:02.724172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:25:02.724177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:25:02.724218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:25:02.724237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:25:02.724249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:25:02.724254Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:25:02.724263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:25:02.724269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:25:02.724299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:25:02.724305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:25:02.724327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:25:02.724342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:25:02.724357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06- ... nt=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:25:14.365727Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:25:14.365752Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:25:14.365761Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:25:14.365781Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:25:14.365789Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:25:14.365801Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:25:14.365811Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:25:14.365818Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:25:14.365915Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:25:14.365923Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:25:14.365939Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:25:14.365943Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:25:14.368247Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521671062332080932:2294];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=126844264568128;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275514368;max=18446744073709551615;plan=0;src=[6:7521671058037113322:2164];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:25:14.373364Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:25:14.374763Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `COMPACTION_PLANNER.CLASS_NAME`=`l-buckets`) 2025-06-30T09:25:14.389764Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671062332081005:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:14.389795Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:14.390440Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:14.394016Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1"}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3"}')), (4u, JsonDocument('{"b" : "b4", "a" : "a4"}')) 2025-06-30T09:25:14.410841Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671062332081036:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:14.410898Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:14.411142Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671062332081041:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:14.412393Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:14.416557Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671062332081043:2315], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-30T09:25:14.516068Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671062332081094:2407] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:14.539169Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=6;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:14.543258Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:25:14.551765Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:14.555171Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`true`, `COLUMNS_LIMIT`=`1`, `SPARSED_DETECTOR_KFF`=`0`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:25:14.567126Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:14.569513Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715665; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(11u, JsonDocument('{"a" : "1a1"}')), (12u, JsonDocument('{"a" : "1a2"}')), (13u, JsonDocument('{"b" : "1b3"}')), (14u, JsonDocument('{"b" : "1b4", "a" : "a4"}')) 2025-06-30T09:25:14.602646Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=16;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1) VALUES(10u) 2025-06-30T09:25:14.611300Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-30T09:25:14.641060Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=22;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:14.645002Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; WAIT_COMPACTION: 0 2025-06-30T09:25:14.745346Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WAIT_COMPACTION: 0 WAIT_COMPACTION: 0 COMPACTION_HAPPENED: 0 -> 1 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4\"}"]];[10u;#];[11u;["{\"a\":\"1a1\"}"]];[12u;["{\"a\":\"1a2\"}"]];[13u;["{\"b\":\"1b3\"}"]];[14u;["{\"a\":\"a4\",\"b\":\"1b4\"}"]]] OUTPUT: [[1u;["{\"a\":\"a1\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4\"}"]];[10u;#];[11u;["{\"a\":\"1a1\"}"]];[12u;["{\"a\":\"1a2\"}"]];[13u;["{\"b\":\"1b3\"}"]];[14u;["{\"a\":\"a4\",\"b\":\"1b4\"}"]]] INDEX:0/0/0 HEADER:0/0/0 >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionManagePrivate [GOOD] >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionManagePrivatePublic ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpExplain::Predicates [GOOD] Test command err: Trying to start YDB, gRPC: 8123, MsgBus: 30027 2025-06-30T09:25:08.444068Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671035721559192:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:08.444554Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004250/r3tmp/tmpZmc7s9/pdisk_1.dat 2025-06-30T09:25:08.594815Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:08.595486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:08.595513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:08.601974Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8123, node 1 2025-06-30T09:25:08.623345Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:08.623362Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:08.623365Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:08.623416Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30027 TClient is connected to server localhost:30027 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:08.726391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:08.732994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:08.743279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:08.833052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:08.892282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:08.919837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:09.096875Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671040016528072:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:09.096910Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:09.160920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:09.176345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:09.199347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:09.209900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:09.224596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:09.239933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:09.257451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:09.280146Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671040016528726:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:09.280196Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:09.280350Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671040016528731:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:09.281412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:09.285884Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671040016528733:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:09.342658Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671040016528785:3402] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:09.446298Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"TopSort","Limit":"4","TopSortBy":"row.Data"},{"Scan":"Parallel","ReadRange":["Key [150, 266]"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"1","Table":"EightShard","ReadColumns":["Data","Key","Text"],"E-Cost":"0"}],"Node Type":"TopSort-TableRangeScan"}],"Node Type":"Merge","SortColumns":["Data (Asc)"],"PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":2}],"Name":"Limit","Limit":"4"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Eig ... lumns":["Key","Value"],"E-Cost":"0","ReadRangesExpectedSize":2}],"Node Type":"TableRangeScan"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 62263, MsgBus: 22154 2025-06-30T09:25:14.636569Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521671061077411047:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:14.636598Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004250/r3tmp/tmpHzZOR6/pdisk_1.dat 2025-06-30T09:25:14.662792Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:14.666850Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7521671061077411013:2080] 1751275514636311 != 1751275514636314 TServer::EnableGrpc on GrpcPort 62263, node 5 2025-06-30T09:25:14.691370Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:14.691393Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:14.691395Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:14.691457Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22154 2025-06-30T09:25:14.747797Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:14.747840Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:22154 2025-06-30T09:25:14.749920Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:14.758887Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:14.761235Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:14.766605Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:14.780223Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:14.816199Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:14.833774Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:15.179142Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671065372379906:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:15.179176Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:15.191481Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.203200Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.217905Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.231641Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.244481Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.259320Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.275665Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.291462Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671065372380559:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:15.291505Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:15.291602Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671065372380564:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:15.292791Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:15.298576Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7521671065372380566:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:15.384052Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521671065372380617:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:15.589093Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.638590Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPrivate |84.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_create_tablets.py::TestHive::test_when_create_tablets_then_can_lookup_them [GOOD] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckPermission >> KqpExplain::ExplainScanQueryWithParams [GOOD] >> KqpExplain::FewEffects+UseSink >> TYdbControlPlaneStoragePipeline::ShouldCheckAbortInTerminatedState [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckAst >> KqpExplain::Explain [GOOD] >> KqpExplain::ExplainDataQuery >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageGetTask::ShouldValidate >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionViewPrivatePublic >> KqpStats::JoinStatsBasicYql+StreamLookupJoin [GOOD] >> KqpStats::JoinStatsBasicYql-StreamLookupJoin >> TYdbControlPlaneStorageCreateConnection::ShouldCheckMultipleDotsName [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckAllowedSymbolsName >> KqpQuery::YqlSyntaxV0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageWriteResultData::ShouldSuccess [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... :TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/quotas". Create session OK 2025-06-30T09:25:14.414233Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:14.414235Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:14.414396Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenant_acks". Create session OK 2025-06-30T09:25:14.414405Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:14.414406Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:14.414440Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/idempotency_keys". Create session OK 2025-06-30T09:25:14.414444Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:14.414446Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:14.414534Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-30T09:25:14.414537Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:14.414538Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:14.417197Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/connections". Create session OK 2025-06-30T09:25:14.417210Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:14.417213Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:14.417479Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/queries". Create session OK 2025-06-30T09:25:14.417486Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:14.417487Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:14.439254Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:14.439273Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:14.475700Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:14.475722Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:14.481679Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:14.481701Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:14.483370Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:14.483405Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:14.483708Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:14.483717Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:14.483823Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:14.483829Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:14.483929Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:14.483936Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:14.488932Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:14.488951Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:14.489153Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:14.489168Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:14.489172Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:14.489174Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:14.489237Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:14.489239Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:14.489272Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:14.489276Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:14.489407Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:14.489411Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:14.489435Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:14.489442Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:14.489542Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:14.489549Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/connections": >> KqpLimits::StreamWrite+Allowed >> KqpLimits::KqpMkqlMemoryLimitException >> TYdbControlPlaneStorageModifyQuery::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckExist >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionEmpty >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,1000,0,CATEGORY_BLOOM_FILTER] [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,1000,0,BLOOM_FILTER] >> KqpExplain::UpdateConditional+UseSink >> TYdbControlPlaneStorageGetTask::ShouldValidate [GOOD] >> TYdbControlPlaneStorageGetTask::ShouldWorkWithEmptyPending >> KqpExplain::FewEffects+UseSink [GOOD] >> KqpExplain::FewEffects-UseSink |84.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |84.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |84.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator >> TPersQueueTest::CreateTopicWithMeteringMode [GOOD] >> TPersQueueTest::DefaultMeteringMode >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckAllowedSymbolsName >> TYdbControlPlaneStorageModifyQuery::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckIdempotencyKey >> GenericFederatedQuery::IcebergHiveSaFilterPushdown [GOOD] >> KqpExplain::ExplainDataQuery [GOOD] >> KqpExplain::ExplainDataQueryWithParams >> KqpStats::JoinStatsBasicYql-StreamLookupJoin [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckWithoutIdempotencyKey [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckPreviousRevisionFailed >> KqpQuery::YqlSyntaxV0 [GOOD] >> KqpQuery::YqlTableSample >> KqpOlapJson::FilterVariantsCount[10,true,1024,0,1000000,0] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue_batch[tables_format_v0] [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveSaFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 25636, MsgBus: 16146 2025-06-30T09:25:06.595149Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671028383261057:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:06.597435Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0042c8/r3tmp/tmpKhaPcD/pdisk_1.dat 2025-06-30T09:25:06.714569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:06.714605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:06.716505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:06.725870Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25636, node 1 2025-06-30T09:25:06.755029Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:06.755048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:06.755051Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:06.755104Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16146 TClient is connected to server localhost:16146 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:06.866620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:06.871469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:07.259593Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671032678228883:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:07.259626Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:07.597065Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:07.600384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:07.640926Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671032678229010:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:07.640980Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:07.641185Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671032678229016:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:07.642212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:07.644292Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671032678229018:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:07.715877Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671032678229058:2391] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:07.892503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:07.994540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.106895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.204202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:08.286107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:08.372428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.384269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:25:08.761725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:25:08.783300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.783750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715695:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.783999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715694:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } ... t\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:17.548849Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:17.682823Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:17.793839Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:17.917405Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:17.995035Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:18.090543Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.111415Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:25:18.564722Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 |84.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> TYdbControlPlaneStorageGetTask::ShouldWorkWithEmptyPending [GOOD] >> TYdbControlPlaneStorageGetTask::ShouldBatchingGetTasks >> TYdbControlPlaneStoragePipeline::ShouldCheckRemovingOldResultSet [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckPrioritySelectionEntities >> TYdbControlPlaneStorageCreateConnection::ShouldCheckAllowedSymbolsName [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckCommitTransactionWrite |84.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |84.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |84.0%| [LD] {RESULT} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |84.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |84.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |84.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpStats::JoinStatsBasicYql-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 2313, MsgBus: 16805 2025-06-30T09:25:13.064823Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671058463096682:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:13.064849Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00424b/r3tmp/tmpdjdIS1/pdisk_1.dat 2025-06-30T09:25:13.207594Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2313, node 1 2025-06-30T09:25:13.251033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:13.251051Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:13.251055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:13.251112Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:13.279023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:13.279057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:13.280617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16805 TClient is connected to server localhost:16805 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:13.388859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:13.392463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:25:13.399047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:13.471139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:13.501899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:13.519052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:13.776864Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671058463098242:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:13.776900Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:13.833227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:13.846324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:13.865336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:13.925673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:13.936589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:13.958891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:13.971561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:13.995931Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671058463098895:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:13.995995Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:13.996120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671058463098900:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:13.997277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:14.000883Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671058463098902:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:14.072325Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:14.092576Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671062758066258:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:14.319916Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521671062758066570:2475] TxId: 281474976715673. Ctx: { TraceId: 01jz02facv45jjz74hgy9xd6j4, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmQ2ZTJiZDEtY2NhODM5NWEtYThiMDAzNjAtZTdiNWRlYzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-30T09:25:14.348071Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275514359, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 3731, MsgBus: 29078 2025-06-30T09:25:14.667535Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671060859163088:2243];sen ... RVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521671071511190278:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:16.856631Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671071511190329:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:17.089674Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 20393, MsgBus: 28534 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00424b/r3tmp/tmpU3Uqim/pdisk_1.dat 2025-06-30T09:25:17.606895Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:25:17.611386Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:17.614839Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671071914446841:2080] 1751275517580493 != 1751275517580496 TServer::EnableGrpc on GrpcPort 20393, node 4 2025-06-30T09:25:17.630493Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:17.630510Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:17.630512Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:17.630567Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28534 2025-06-30T09:25:17.687416Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:17.687473Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:17.688241Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28534 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:17.777965Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:17.780472Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:25:17.788136Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:17.823873Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:17.857948Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:17.880670Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:18.152278Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671076209415739:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:18.152315Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:18.165189Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.194608Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.207702Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.233357Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.255499Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.275162Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.291156Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.314479Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671076209416389:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:18.314518Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:18.314626Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671076209416394:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:18.315876Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:18.319800Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671076209416396:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:18.390103Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671076209416447:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:18.580997Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpExplain::ExplainDataQueryWithParams [GOOD] >> KqpExplain::CreateTableAs+Stats >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPrivatePublic >> KqpExplain::FewEffects-UseSink [GOOD] >> KqpExplain::FullOuterJoin >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionManagePrivatePublic [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckLowerCaseName >> TYdbControlPlaneStoragePipeline::ShouldCheckAst [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckAstClear >> KqpExplain::UpdateConditional+UseSink [GOOD] >> KqpExplain::UpdateConditional-UseSink >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageModifyQuery::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckPreviousRevisionFailed |84.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |84.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |84.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPrivatePublic >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionViewPrivatePublic |84.0%| [TA] $(B)/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpLimits::KqpMkqlMemoryLimitException [GOOD] >> KqpLimits::LargeParametersAndMkqlFailure >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionViewPublic >> KqpQuery::YqlTableSample [GOOD] >> KqpQuery::UpdateWhereInSubquery >> KqpOlapJson::FilterVariantsCount[10,true,1024,0,1000000,0] [GOOD] >> KqpOlapJson::FilterVariantsCount[10,true,1024,0,1000000,0.5] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionViewPrivatePublic >> TYdbControlPlaneStorageModifyBinding::ShouldCheckAllowedSymbolsName [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckExist >> KqpExplain::CreateTableAs+Stats [GOOD] >> KqpExplain::CreateTableAs-Stats >> TYdbControlPlaneStorageGetTask::ShouldBatchingGetTasks [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldEmptyPageToken >> KqpQuery::QueryCacheTtl >> KqpExplain::FullOuterJoin [GOOD] >> KqpExplain::UpdateConditional-UseSink [GOOD] >> KqpExplain::UpdateConditionalKey+UseSink >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,1000,0,BLOOM_FILTER] [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,1000,0.5,BLOOM_FILTER] >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionViewPrivate >> KqpQuery::UpdateWhereInSubquery [GOOD] >> KqpQuery::UpdateThenDelete-UseSink >> TYdbControlPlaneStorageModifyQuery::ShouldCheckPreviousRevisionFailed [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckPreviousRevisionSuccess >> KqpExplain::CreateTableAs-Stats [GOOD] >> KqpOlapJson::FilterVariantsCount[10,true,1024,0,1000000,0.5] [GOOD] >> KqpOlapJson::FilterVariantsCount[10,true,1024,10,0,0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpExplain::FullOuterJoin [GOOD] Test command err: Trying to start YDB, gRPC: 19857, MsgBus: 27984 2025-06-30T09:25:14.623943Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671059996555748:2187];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00424a/r3tmp/tmpe4XnyK/pdisk_1.dat 2025-06-30T09:25:14.667615Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:14.684797Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:14.685005Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671059996555572:2080] 1751275514609192 != 1751275514609195 TServer::EnableGrpc on GrpcPort 19857, node 1 2025-06-30T09:25:14.711367Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:14.711382Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:14.711384Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:14.711436Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27984 2025-06-30T09:25:14.760075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:14.760116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:14.761125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27984 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:14.790597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:14.804447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:14.870444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:14.898602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:14.957081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:15.138644Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671064291524468:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:15.138688Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:15.193219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.203087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.215854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.229216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.244106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.257244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.272163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:15.288754Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671064291525119:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:15.288796Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:15.288845Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671064291525124:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:15.289913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:15.300494Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671064291525126:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:15.363399Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671064291525177:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } {"Plan":{"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[],"Iterator":"precompute_0_0","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_0"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"},{"PlanNodeId":6,"Subplan Name":"CTE precompute_0_0","Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"Tables":["EightShard"],"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["KeyValue"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/KeyValue","E-Rows":"0","Table":"KeyValue","ReadColumns":["Key"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Broadcast","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Aggregate","Phase":"Intermediate"},{"Inputs":[{"InternalOperatorId":2},{"ExternalPlanNodeId":2}],"E-Rows":"0","Condition":"t.Data = kv.Key","Name":"InnerJoin (MapJoin)","E-Size":"0","E-Cost":"0"},{"Inputs":[{ ... 1671084200738954:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:19.959906Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00424a/r3tmp/tmptnhASt/pdisk_1.dat 2025-06-30T09:25:19.977530Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4750, node 5 2025-06-30T09:25:19.987398Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:19.987411Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:19.987413Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:19.987465Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62839 TClient is connected to server localhost:62839 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:20.060952Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:20.060993Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:20.061913Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:25:20.067498Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:20.078260Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:20.104048Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:20.169553Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:20.186813Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:20.431391Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671088495707826:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:20.431523Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:20.457210Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:20.471350Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:20.502789Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:20.520561Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:20.534986Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:20.550956Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:20.568502Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:20.587688Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671088495708477:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:20.587724Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:20.588095Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671088495708482:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:20.589270Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:20.592328Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7521671088495708484:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:20.684119Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521671088495708535:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:20.907917Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:20.961706Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:21.038604Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:21.053715Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TYdbControlPlaneStorageModifyBinding::ShouldCheckPreviousRevisionFailed [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckPreviousRevisionSuccess >> KqpLimits::LargeParametersAndMkqlFailure [GOOD] >> KqpLimits::ManyPartitions >> TYdbControlPlaneStorageCreateBinding::ShouldCheckLowerCaseName [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckAllowedSymbolsName >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPermission >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_action_which_does_not_requere_existing_queue ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpExplain::CreateTableAs-Stats [GOOD] Test command err: Trying to start YDB, gRPC: 3239, MsgBus: 27406 2025-06-30T09:25:16.108324Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671070939484528:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:16.108808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004246/r3tmp/tmpi8x7jM/pdisk_1.dat 2025-06-30T09:25:16.209595Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:16.212906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:16.212947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 3239, node 1 2025-06-30T09:25:16.231220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:16.240376Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:16.240391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:16.240394Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:16.240449Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27406 TClient is connected to server localhost:27406 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:16.363451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:16.376842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:16.401525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:16.427580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:16.445822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:16.684028Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671070939486084:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:16.684072Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:16.739732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:16.749683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:16.762048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:16.776453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:16.790573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:16.804128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:16.818322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:16.835133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671070939486737:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:16.835160Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:16.835171Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671070939486742:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:16.836133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:16.845184Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671070939486744:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:16.939263Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671070939486796:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:17.109994Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; {"Plan":{"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[],"Iterator":"precompute_0_0","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_0"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"},{"PlanNodeId":6,"Subplan Name":"CTE precompute_0_0","Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"Tables":["EightShard"],"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["KeyValue"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/KeyValue","E-Rows":"0","Table":"KeyValue","ReadColumns":["Key"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Broadcast","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Aggregate","Phase":"Intermediate"},{"Inputs":[{"InternalOperatorId":2},{"ExternalPlanNodeId":2}],"E-Rows":"0","Condition":"t.Data = kv.Key","Name":"InnerJoin (MapJoin)","E-Size":"0","E-Cost":"0"},{"Inputs":[{"InternalOperatorId":3}],"E-Rows":"0", ... }} PLAN::{"Plan":{"Plans":[{"Tables":["test\/test2\/Destination3"],"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Source"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Col1 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Source","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Source","ReadColumns":["Col1","Col2"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage"}],"Node Type":"Map","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Operators":[{"Inputs":[],"Path":"\/Root\/test\/test2\/Destination3","Name":"FillTable","Table":"test\/test2\/Destination3","SinkType":"KqpTableSink"}],"Node Type":"Sink"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Source","reads":[{"columns":["Col1","Col2"],"scan_by":["Col1 (-∞, +∞)"],"type":"FullScan"}]},{"name":"\/Root\/test\/test2\/Destination3","writes":[{"columns":["Col1","Col2"],"type":"MultiReplace"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/test\/test2\/Destination3","Name":"FillTable","Table":"test\/test2\/Destination3","SinkType":"KqpTableSink"}],"Node Type":"FillTable"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 4536, MsgBus: 25985 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004246/r3tmp/tmpcxdODp/pdisk_1.dat 2025-06-30T09:25:20.957136Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:25:20.962455Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:20.962578Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7521671088976149199:2080] 1751275520932763 != 1751275520932766 TServer::EnableGrpc on GrpcPort 4536, node 5 2025-06-30T09:25:20.970980Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:20.970997Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:20.971000Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:20.971064Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25985 2025-06-30T09:25:21.039123Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:21.039166Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:21.039956Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25985 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:21.071657Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:21.075830Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:21.542671Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671093271117117:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:21.542705Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:21.542822Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671093271117129:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:21.543897Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:21.548183Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:25:21.548338Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7521671093271117131:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:25:21.610936Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521671093271117182:2329] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:21.624703Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) PLAN::{"Plan":{"Plans":[{"Tables":["Destination"],"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Source"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Col1 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Source","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Source","ReadColumns":["Col1","Col2"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage"}],"Node Type":"Map","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Operators":[{"Inputs":[],"Path":"\/Root\/Destination","Name":"FillTable","Table":"Destination","SinkType":"KqpTableSink"}],"Node Type":"Sink"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Destination","writes":[{"columns":["Col1","Col2"],"type":"MultiReplace"}]},{"name":"\/Root\/Source","reads":[{"columns":["Col1","Col2"],"scan_by":["Col1 (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/Destination","Name":"FillTable","Table":"Destination","SinkType":"KqpTableSink"}],"Node Type":"FillTable"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} PLAN::{"Plan":{"Plans":[{"Tables":["test\/Destination2"],"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Source"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Col1 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Source","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Source","ReadColumns":["Col1","Col2"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage"}],"Node Type":"Map","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Operators":[{"Inputs":[],"Path":"\/Root\/test\/Destination2","Name":"FillTable","Table":"test\/Destination2","SinkType":"KqpTableSink"}],"Node Type":"Sink"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Source","reads":[{"columns":["Col1","Col2"],"scan_by":["Col1 (-∞, +∞)"],"type":"FullScan"}]},{"name":"\/Root\/test\/Destination2","writes":[{"columns":["Col1","Col2"],"type":"MultiReplace"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/test\/Destination2","Name":"FillTable","Table":"test\/Destination2","SinkType":"KqpTableSink"}],"Node Type":"FillTable"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} PLAN::{"Plan":{"Plans":[{"Tables":["test\/test2\/Destination3"],"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Source"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Col1 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Source","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Source","ReadColumns":["Col1","Col2"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage"}],"Node Type":"Map","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Operators":[{"Inputs":[],"Path":"\/Root\/test\/test2\/Destination3","Name":"FillTable","Table":"test\/test2\/Destination3","SinkType":"KqpTableSink"}],"Node Type":"Sink"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Source","reads":[{"columns":["Col1","Col2"],"scan_by":["Col1 (-∞, +∞)"],"type":"FullScan"}]},{"name":"\/Root\/test\/test2\/Destination3","writes":[{"columns":["Col1","Col2"],"type":"MultiReplace"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/test\/test2\/Destination3","Name":"FillTable","Table":"test\/test2\/Destination3","SinkType":"KqpTableSink"}],"Node Type":"FillTable"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} 2025-06-30T09:25:21.940870Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionViewPrivate >> KqpQuery::UpdateThenDelete-UseSink [GOOD] >> KqpExplain::UpdateConditionalKey+UseSink [GOOD] >> KqpExplain::UpdateConditionalKey-UseSink >> TYdbControlPlaneStorageModifyBinding::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckIdempotencyKey >> TYdbControlPlaneStoragePipeline::ShouldCheckAstClear [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckAutomaticTtl >> KqpLimits::ManyPartitions [GOOD] >> KqpLimits::ManyPartitionsSorting >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionViewPrivatePublic |84.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> TYdbControlPlaneStorageModifyQuery::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckQueryName >> KqpOlapJson::FilterVariantsCount[10,true,1024,10,0,0] [GOOD] >> KqpOlapJson::FilterVariantsCount[10,true,1024,10,0,0.5] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::UpdateThenDelete-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 30833, MsgBus: 18618 2025-06-30T09:25:17.911082Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671074764755953:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:17.911190Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004245/r3tmp/tmpuJzmIj/pdisk_1.dat 2025-06-30T09:25:17.995358Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671074764755742:2080] 1751275517905311 != 1751275517905314 2025-06-30T09:25:17.997144Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30833, node 1 2025-06-30T09:25:18.034946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:18.034960Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:18.034963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:18.035009Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18618 2025-06-30T09:25:18.076278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:18.076315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:18.077482Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18618 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:18.119679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:18.122774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:18.137207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:18.204062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:18.234078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:18.246795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:18.403266Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671079059724649:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:18.403301Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:18.451006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.466162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.477422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.494224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.505863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.519468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.533050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.549848Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671079059725300:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:18.549877Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671079059725305:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:18.549881Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:18.550630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:18.559905Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671079059725307:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:18.653265Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671079059725358:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:18.874387Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521671079059725647:2478], status: GENERIC_ERROR, issues:
:3:26: Error: mismatched input '[' expecting {'*', '(', '@', '$', ABORT, ACTION, ADD, AFTER, ALL, ALTER, ANALYZE, AND, ANSI, ANY, ARRAY, AS, ASC, ASSUME, ASYMMETRIC, ASYNC, AT, ATTACH, ATTRIBUTES, AUTOINCREMENT, BACKUP, BATCH, COLLECTION, BEFORE, BEGIN, BERNOULLI, BETWEEN, BITCAST, BY, CALLABLE, CASCADE, CASE, CAST, CHANGEFEED, CHECK, CLASSIFIER, COLLATE, COLUMN, COLUMNS, COMMIT, COMPACT, CONDITIONAL, CONFLICT, CONNECT, CONSTRAINT, CONSUMER, COVER, CREATE, CROSS, CUBE, CURRENT, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DATA, DATABASE, DECIMAL, DECLARE, DEFAULT, DEFERRABLE, DEFERRED, DEFINE, DELETE, DESC, DESCRIBE, DETACH, DICT, DIRECTORY, ... schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:21.346292Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521671093217562878:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:21.416671Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671093217562929:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 2623, MsgBus: 25796 2025-06-30T09:25:21.888044Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671088990631544:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:21.888355Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004245/r3tmp/tmp87f0tp/pdisk_1.dat 2025-06-30T09:25:21.905262Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:21.905574Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671088990631431:2080] 1751275521887414 != 1751275521887417 TServer::EnableGrpc on GrpcPort 2623, node 4 2025-06-30T09:25:21.916863Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:21.916877Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:21.916880Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:21.916941Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25796 TClient is connected to server localhost:25796 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:21.994790Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:21.994833Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:21.995206Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:21.995947Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:25:22.003226Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:25:22.008893Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.022707Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:22.053146Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:22.072862Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:22.309196Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671093285600323:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.309238Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.321089Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.332509Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.340754Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.354775Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.369030Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.383276Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.397065Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.415580Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671093285600972:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.415628Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.415671Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671093285600977:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.416567Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:22.425083Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671093285600979:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:22.496200Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671093285601033:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } [] >> TYdbControlPlaneStoragePipeline::ShouldCheckPrioritySelectionEntities [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckResultSetLimit |84.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |84.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |84.0%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> KqpQuery::PreparedQueryInvalidate |84.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckLimit >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckExist >> KqpExplain::UpdateConditionalKey-UseSink [GOOD] >> KqpLimits::ManyPartitionsSorting [GOOD] >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPermission >> TYdbControlPlaneStorageCreateBinding::ShouldCheckAllowedSymbolsName [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckMaxCountBindings >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,1000,0.5,BLOOM_FILTER] [GOOD] >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionViewPrivatePublic >> KqpOlapJson::FilterVariantsCount[10,true,1024,10,0,0.5] [GOOD] >> KqpOlapJson::FilterVariantsCount[10,true,1024,10,100,0] >> KqpQuery::QueryTimeout ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateConditionalKey-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 25671, MsgBus: 10079 2025-06-30T09:25:18.541338Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671076567209986:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:18.541476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00423f/r3tmp/tmpzEykHO/pdisk_1.dat TServer::EnableGrpc on GrpcPort 25671, node 1 2025-06-30T09:25:18.666354Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:18.674883Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671076567209801:2080] 1751275518536957 != 1751275518536960 2025-06-30T09:25:18.690642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:18.690656Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:18.690660Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:18.690715Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:18.720589Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:18.720627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:18.721732Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10079 TClient is connected to server localhost:10079 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:18.835639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:18.842075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:18.852144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:18.906090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:18.975986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:19.047164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:19.258247Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671080862178704:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:19.258358Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:19.314922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:19.324195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:19.338137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:19.352282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:19.408598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:19.421725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:19.436328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:19.456024Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671080862179358:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:19.456054Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:19.456100Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671080862179363:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:19.457201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:19.462948Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671080862179365:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:19.537032Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:19.557832Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671080862179425:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } {"Plan":{"Plans":[{"Tables":["EightShard"],"PlanNodeId":5,"Operators":[{"Inputs":[],"Path":"\/Root\/EightShard","Name":"Upsert","SinkType":"KqpTableSink","Table":"EightShard"}],"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Operators":[{"E-Rows":"0","Inputs":[{"ExternalPlanNodeId":1}],"Predicate":"item.Data \u003E 0","E-Cost":"0","E-Size":"0","Name":"Filter"}],"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"0","ReadRangesPointPrefixLen":"0","Table":"EightShard","ReadColumns":["Data", ... utdated, will use file: (empty maybe) 2025-06-30T09:25:23.129631Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:23.129646Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:23.129702Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3867 TClient is connected to server localhost:3867 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:23.205271Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:23.205310Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:23.205704Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:23.206218Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:23.219918Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:23.233562Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:23.255616Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:23.269009Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:23.606957Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671101857713760:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:23.607002Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:23.616586Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:23.628359Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:23.641174Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:23.653349Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:23.663948Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:23.721588Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:23.734309Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:23.756861Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671101857714411:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:23.756902Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:23.756958Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671101857714416:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:23.758143Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:23.760885Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671101857714418:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:23.817349Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671101857714472:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } {"Plan":{"Plans":[{"PlanNodeId":8,"Plans":[{"Tables":["EightShard"],"PlanNodeId":7,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/EightShard","Name":"Upsert","Table":"EightShard"},{"Inputs":[],"Iterator":"precompute_1_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_1_0"}],"Node Type":"Effect"},{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key [100, 100]","Key [200, 200]","Key [300, 300]"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"0","ReadRangesPointPrefixLen":"1","ReadRangesKeys":["Key"],"Table":"EightShard","ReadColumns":["Data","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Stage"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","reads":[{"columns":["Data","Key"],"scan_by":["Key [100, 100]","Key [200, 200]","Key [300, 300]"],"type":"Scan"}],"writes":[{"columns":["Data","Key"],"type":"MultiUpsert"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/EightShard","Name":"Upsert","Table":"EightShard"}],"Plans":[{"PlanNodeId":8,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key [100, 100]","Key [200, 200]","Key [300, 300]"],"Name":"TableRangeScan","Path":"\/Root\/EightShard","ReadRangesPointPrefixLen":"1","E-Rows":"0","ReadRangesKeys":["Key"],"Table":"EightShard","ReadColumns":["Data","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> KqpStats::StreamLookupStats+StreamLookupJoin >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCheckLimit ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpLimits::ManyPartitionsSorting [GOOD] Test command err: Trying to start YDB, gRPC: 5079, MsgBus: 31606 2025-06-30T09:25:18.110231Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671076864820351:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:18.110870Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004243/r3tmp/tmplvfdmR/pdisk_1.dat 2025-06-30T09:25:18.208019Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:18.208443Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671076864820250:2080] 1751275518103297 != 1751275518103300 TServer::EnableGrpc on GrpcPort 5079, node 1 2025-06-30T09:25:18.237602Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:18.237616Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:18.237617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:18.237676Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31606 2025-06-30T09:25:18.275749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:18.275797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:18.276718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31606 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:18.351952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:18.363169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:18.367335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:18.445548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.508474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:18.526332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.832465Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671076864821850:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:18.832501Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:18.896074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.910759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.926095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.982985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:18.996898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:19.017753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:19.029720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:19.090606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671081159789806:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:19.090640Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:19.090765Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671081159789811:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:19.091853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:19.099294Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671081159789813:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:19.111777Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:19.153330Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671081159789873:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:19.320295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:20.140732Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [1:7521671085454758199:2576] TxId: 281474976715673. Ctx: { TraceId: 01jz02fg3f6vc68kppqc712gwq, Database: /Root, DatabaseId: /Root, SessionId: ydb:// ... 0T09:25:23.886883Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671101701726317:2596], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:23.941328Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671101701726370:5081] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } query_phases { duration_us: 14409 table_access { name: "/Root/ManyShardsTable" reads { rows: 1100 bytes: 8800 } partitions_count: 100 } cpu_time_us: 14093 affected_shards: 100 } compilation { duration_us: 18383 cpu_time_us: 17524 } process_cpu_time_us: 154 query_plan: "{\"Plan\":{\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":3,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"Tables\":[\"ManyShardsTable\"],\"PlanNodeId\":1,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"ReadRanges\":[\"Key (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/ManyShardsTable\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"0\",\"Table\":\"ManyShardsTable\",\"ReadColumns\":[\"Data\",\"Key\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TableFullScan\"}],\"Node Type\":\"Stage\",\"Stats\":{\"UseLlvm\":\"undefined\",\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":4,\"Sum\":100,\"Max\":25,\"Min\":25},\"Rows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"LastMessageMs\":{\"Count\":4,\"Sum\":39,\"Max\":11,\"Min\":9},\"ActiveMessageMs\":{\"Count\":4,\"Max\":11,\"Min\":1},\"FirstMessageMs\":{\"Count\":4,\"Sum\":4,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":4,\"Sum\":8168,\"Max\":2075,\"Min\":2004},\"ActiveTimeUs\":{\"Count\":4,\"Sum\":35000,\"Max\":10000,\"Min\":8000}},\"Name\":\"4\",\"Push\":{\"LastMessageMs\":{\"Count\":4,\"Sum\":39,\"Max\":11,\"Min\":9},\"Rows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"Chunks\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"ResumeMessageMs\":{\"Count\":4,\"Sum\":39,\"Max\":11,\"Min\":9},\"FirstMessageMs\":{\"Count\":4,\"Sum\":4,\"Max\":1,\"Min\":1},\"ActiveMessageMs\":{\"Count\":4,\"Max\":11,\"Min\":1},\"ActiveTimeUs\":{\"Count\":4,\"Sum\":35000,\"Max\":10000,\"Min\":8000},\"WaitTimeUs\":{\"Count\":4,\"Sum\":35414,\"Max\":9730,\"Min\":8153},\"WaitPeriods\":{\"Count\":4,\"Sum\":7,\"Max\":4,\"Min\":1}}}],\"DurationUs\":{\"Count\":4,\"Sum\":37000,\"Max\":10000,\"Min\":9000},\"MaxMemoryUsage\":{\"Count\":4,\"Sum\":4194304,\"Max\":1048576,\"Min\":1048576,\"History\":[0,4194304,11,4194304]},\"Tasks\":4,\"OutputRows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"FinishedTasks\":4,\"IngressRows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"PhysicalStageId\":0,\"StageDurationUs\":10000,\"Table\":[{\"Path\":\"\\/Root\\/ManyShardsTable\",\"ReadRows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"ReadBytes\":{\"Count\":4,\"Sum\":8800,\"Max\":2208,\"Min\":2192}}],\"BaseTimeMs\":1751275523962,\"WaitInputTimeUs\":{\"Count\":4,\"Sum\":30639,\"Max\":8344,\"Min\":7022,\"History\":[10,23617,11,30639]},\"OutputBytes\":{\"Count\":4,\"Sum\":8168,\"Max\":2075,\"Min\":2004},\"CpuTimeUs\":{\"Count\":4,\"Sum\":3614,\"Max\":2328,\"Min\":349,\"History\":[0,357,10,1458,11,3614]},\"Ingress\":[{\"Pop\":{\"Chunks\":{\"Count\":4,\"Sum\":100,\"Max\":25,\"Min\":25},\"Rows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"LastMessageMs\":{\"Count\":4,\"Sum\":39,\"Max\":11,\"Min\":9},\"ActiveMessageMs\":{\"Count\":4,\"Max\":11,\"Min\":1},\"FirstMessageMs\":{\"Count\":4,\"Sum\":4,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":4,\"Sum\":35200,\"Max\":8832,\"Min\":8768,\"History\":[10,26368,11,35200]},\"ActiveTimeUs\":{\"Count\":4,\"Sum\":35000,\"Max\":10000,\"Min\":8000}},\"External\":{},\"Name\":\"KqpReadRangesSource\",\"Ingress\":{},\"Push\":{\"Rows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"LastMessageMs\":{\"Count\":4,\"Sum\":39,\"Max\":11,\"Min\":9},\"Chunks\":{\"Count\":4,\"Sum\":100,\"Max\":25,\"Min\":25},\"ResumeMessageMs\":{\"Count\":4,\"Sum\":39,\"Max\":11,\"Min\":9},\"FirstMessageMs\":{\"Count\":4,\"Sum\":4,\"Max\":1,\"Min\":1},\"ActiveMessageMs\":{\"Count\":4,\"Max\":11,\"Min\":1},\"Bytes\":{\"Count\":4,\"Sum\":35200,\"Max\":8832,\"Min\":8768,\"History\":[10,26368,11,35200]},\"ActiveTimeUs\":{\"Count\":4,\"Sum\":35000,\"Max\":10000,\"Min\":8000},\"WaitTimeUs\":{\"Count\":4,\"Sum\":36966,\"Max\":10143,\"Min\":8550,\"History\":[10,28416,11,36966]},\"WaitPeriods\":{\"Count\":4,\"Sum\":7,\"Max\":4,\"Min\":1}}}],\"UpdateTimeMs\":11}}],\"Node Type\":\"Merge\",\"SortColumns\":[\"Key (Asc)\"],\"PlanNodeType\":\"Connection\"}],\"Node Type\":\"Stage\",\"Stats\":{\"UseLlvm\":\"undefined\",\"OutputRows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"PhysicalStageId\":1,\"FinishedTasks\":1,\"InputBytes\":{\"Count\":1,\"Sum\":8168,\"Max\":8168,\"Min\":8168},\"DurationUs\":{\"Count\":1,\"Sum\":11000,\"Max\":11000,\"Min\":11000},\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[0,1048576,12,1048576]},\"BaseTimeMs\":1751275523962,\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":22,\"Max\":22,\"Min\":22},\"Rows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"LastMessageMs\":{\"Count\":1,\"Sum\":12,\"Max\":12,\"Min\":12},\"ActiveMessageMs\":{\"Count\":1,\"Max\":12,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":7701,\"Max\":7701,\"Min\":7701,\"History\":[12,7701]},\"ActiveTimeUs\":{\"Count\":1,\"Sum\":11000,\"Max\":11000,\"Min\":11000}},\"Name\":\"RESULT\",\"Push\":{\"Rows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"LastMessageMs\":{\"Count\":1,\"Sum\":12,\"Max\":12,\"Min\":12},\"Chunks\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":11,\"Max\":11,\"Min\":11},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ActiveMessageMs\":{\"Count\":1,\"Max\":12,\"Min\":1},\"PauseMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ActiveTimeUs\":{\"Count\":1,\"Sum\":11000,\"Max\":11000,\"Min\":11000},\"WaitTimeUs\":{\"Count\":1,\"Sum\":10419,\"Max\":10419,\"Min\":10419,\"History\":[12,10419]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"WaitMessageMs\":{\"Count\":1,\"Max\":11,\"Min\":1}}}],\"CpuTimeUs\":{\"Count\":1,\"Sum\":1869,\"Max\":1869,\"Min\":1869,\"History\":[0,88,12,1869]},\"StageDurationUs\":11000,\"ResultRows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"ResultBytes\":{\"Count\":1,\"Sum\":7701,\"Max\":7701,\"Min\":7701},\"OutputBytes\":{\"Count\":1,\"Sum\":7701,\"Max\":7701,\"Min\":7701},\"Input\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":29,\"Max\":29,\"Min\":29},\"Rows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"LastMessageMs\":{\"Count\":1,\"Sum\":12,\"Max\":12,\"Min\":12},\"ActiveMessageMs\":{\"Count\":1,\"Max\":12,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":8168,\"Max\":8168,\"Min\":8168},\"ActiveTimeUs\":{\"Count\":1,\"Sum\":11000,\"Max\":11000,\"Min\":11000}},\"Name\":\"2\",\"Push\":{\"Rows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"LastMessageMs\":{\"Count\":1,\"Sum\":11,\"Max\":11,\"Min\":11},\"Chunks\":{\"Count\":1,\"Sum\":100,\"Max\":100,\"Min\":100},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":11,\"Max\":11,\"Min\":11},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ActiveMessageMs\":{\"Count\":1,\"Max\":11,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":8168,\"Max\":8168,\"Min\":8168},\"PauseMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ActiveTimeUs\":{\"Count\":1,\"Sum\":10000,\"Max\":10000,\"Min\":10000},\"WaitTimeUs\":{\"Count\":1,\"Sum\":2510,\"Max\":2510,\"Min\":2510},\"WaitPeriods\":{\"Count\":1,\"Sum\":5,\"Max\":5,\"Min\":5},\"WaitMessageMs\":{\"Count\":1,\"Max\":11,\"Min\":1}}}],\"UpdateTimeMs\":12,\"InputRows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"Tasks\":1}}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"Compilation\":{\"FromCache\":false,\"DurationUs\":18383,\"CpuTimeUs\":17524},\"ProcessCpuTimeUs\":154,\"TotalDurationUs\":94010,\"ResourcePoolId\":\"default\",\"QueuedTimeUs\":59985},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Plans\":[{\"PlanNodeId\":5,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"ReadRanges\":[\"Key (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Path\":\"\\/Root\\/ManyShardsTable\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"0\",\"Table\":\"ManyShardsTable\",\"ReadColumns\":[\"Data\",\"Key\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TableFullScan\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"PlanNodeType\":\"Query\"}}" query_ast: "(\n(let $1 (KqpTable \'\"/Root/ManyShardsTable\" \'\"72057594046644480:2\" \'\"\" \'1))\n(let $2 (KqpRowsSourceSettings $1 \'(\'\"Data\" \'\"Key\") \'(\'(\'\"Sorted\")) (Void) \'()))\n(let $3 (StructType \'(\'\"Data\" (OptionalType (DataType \'Int32))) \'(\'\"Key\" (OptionalType (DataType \'Uint32)))))\n(let $4 \'(\'(\'\"_logical_id\" \'367) \'(\'\"_id\" \'\"a5ce5088-a187b880-130947f9-9634ef9a\") \'(\'\"_wide_channels\" $3)))\n(let $5 (DqPhyStage \'((DqSource (DataSource \'\"KqpReadRangesSource\") $2)) (lambda \'($9) (block \'(\n (let $10 (lambda \'($11) (Member $11 \'\"Data\") (Member $11 \'\"Key\")))\n (return (FromFlow (ExpandMap (ToFlow $9) $10)))\n))) $4))\n(let $6 (DqCnMerge (TDqOutput $5 \'\"0\") \'(\'(\'1 \'\"Asc\"))))\n(let $7 (DqPhyStage \'($6) (lambda \'($12) (FromFlow (NarrowMap (ToFlow $12) (lambda \'($13 $14) (AsStruct \'(\'\"Data\" $13) \'(\'\"Key\" $14)))))) \'(\'(\'\"_logical_id\" \'379) \'(\'\"_id\" \'\"9f825494-221d2f8f-b01c6f17-378702a1\"))))\n(let $8 (DqCnResult (TDqOutput $7 \'\"0\") \'(\'\"Key\" \'\"Data\")))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($5 $7) \'($8) \'() \'(\'(\'\"type\" \'\"generic\")))) \'((KqpTxResultBinding (ListType $3) \'\"0\" \'\"0\")) \'(\'(\'\"type\" \'\"query\"))))\n)\n" total_duration_us: 94010 total_cpu_time_us: 31771 query_meta: "{\"query_database\":\"/Root\",\"query_parameter_types\":{},\"table_metadata\":[\"{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/ManyShardsTable\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":2},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"Data\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"Int32\\\",\\\"TypeId\\\":1,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Key\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"Uint32\\\",\\\"TypeId\\\":2,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"Key\\\"],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}\"],\"table_meta_serialization_type\":2,\"created_at\":\"1751275523\",\"query_type\":\"QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY\",\"query_syntax\":\"1\",\"query_cluster\":\"db\",\"query_id\":\"7e9baae9-a273b9c5-3451270f-76510614\",\"version\":\"1.0\"}" >> KqpQuery::PreparedQueryInvalidate [GOOD] >> KqpQuery::QueryCache ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::BloomCategoryIndexesVariants[true,false,1024,0,1000,0.5,BLOOM_FILTER] [GOOD] Test command err: Trying to start YDB, gRPC: 1182, MsgBus: 23164 2025-06-30T09:25:01.444518Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671005262215466:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:01.444595Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003dab/r3tmp/tmpkHDOmJ/pdisk_1.dat 2025-06-30T09:25:01.698912Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671005262215274:2080] 1751275501430656 != 1751275501430659 2025-06-30T09:25:01.702919Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1182, node 1 2025-06-30T09:25:01.829360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:01.829412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:01.830541Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:01.991289Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:01.991306Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:01.991310Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:01.991388Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23164 TClient is connected to server localhost:23164 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:02.218229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:02.244376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2); 2025-06-30T09:25:02.277796Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671009557183202:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:02.277833Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:02.445761Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:02.703948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:25:02.724502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671009557183294:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:02.724563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671009557183294:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:25:02.724627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671009557183294:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:25:02.724658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671009557183294:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:25:02.724689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671009557183294:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:25:02.724716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671009557183294:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:25:02.724744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671009557183294:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:25:02.724776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671009557183294:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:25:02.724795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671009557183294:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:25:02.724824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671009557183294:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:25:02.724855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671009557183294:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:25:02.724888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671009557183294:2296];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:25:02.725630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:25:02.725663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:25:02.725680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:25:02.725700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:25:02.725732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:25:02.725738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:25:02.725749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:25:02.725754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:25:02.725762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:25:02.725766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:25:02.725792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:25:02.725806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:25:02.725827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:25:02.725844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:25:02.725858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06- ... pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.423286Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671093940038193:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.424482Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:22.433156Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671093940038195:2324], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:25:22.504134Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671093940038246:2454] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:22.531548Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;local_tx_no=8;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037889;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:22.531625Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=8;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:22.536620Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:25:22.536716Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(11u, JsonDocument('{"a.b.c" : "1a1"}')), (12u, JsonDocument('{"a.b.c" : "1a2"}')), (13u, JsonDocument('{"b.c.d" : "1b3"}')), (14u, JsonDocument('{"b.c.d" : "1b4", "a" : "a4"}')) 2025-06-30T09:25:22.591699Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=16;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:22.591992Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;local_tx_no=16;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037889;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:22.602782Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715666;tx_id=281474976715666;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715666; 2025-06-30T09:25:22.602972Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715666;tx_id=281474976715666;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715666; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_INDEX, NAME=a_index, TYPE=BLOOM_FILTER, FEATURES=`{"column_name" : "Col2", "false_positive_probability" : 0.01}`) 2025-06-30T09:25:22.615103Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:22.617900Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-30T09:25:22.618090Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1) VALUES(10u) 2025-06-30T09:25:22.651221Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=26;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:22.655362Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-30T09:25:22.666838Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:22.670694Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715670;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715670; 2025-06-30T09:25:22.670942Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715670;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715670; waiting actualization: 5/0.000015s 2025-06-30T09:25:22.743275Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:23.310630Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521671093940038015:2294];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037888;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:23.311462Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521671093940038015:2294];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:23.314792Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;task_id=26366e18-559411f0-905b7a66-b5c4fdee;tablet_id=72075186224037888;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:23.319541Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[7:7521671093940038033:2295];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037889;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:23.319942Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[7:7521671093940038033:2295];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:23.322609Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;task_id=2637c92a-559411f0-8ab5a841-5781154b;tablet_id=72075186224037889;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"a.b.c\"") = "a1" ORDER BY Col1; COMPARE: [[1u;["{\"a.b.c\":\"a1\"}"]]] OUTPUT: [[1u;["{\"a.b.c\":\"a1\"}"]]] INDEX:0/4/1 HEADER:0/0/0 EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=DROP_INDEX, NAME=a_index) 2025-06-30T09:25:23.796943Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:23.800669Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:23.800893Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"a.b.c\"") = "1a1" ORDER BY Col1; COMPARE: [[11u;["{\"a.b.c\":\"1a1\"}"]]] OUTPUT: [[11u;["{\"a.b.c\":\"1a1\"}"]]] INDEX:0/4/1 HEADER:0/0/0 EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_INDEX, NAME=b_index, TYPE=CATEGORY_BLOOM_FILTER, FEATURES=`{"column_name" : "Col2", "false_positive_probability" : 0.01}`) 2025-06-30T09:25:23.909314Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:23.912265Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715674; 2025-06-30T09:25:23.912486Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715674; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") = "1b4" ORDER BY Col1; COMPARE: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]] OUTPUT: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]] INDEX:0/4/1 HEADER:0/0/0 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "1b5" ORDER BY Col1; COMPARE: [] OUTPUT: [] INDEX:0/5/0 HEADER:0/0/0 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "a4" ORDER BY Col1; COMPARE: [[4u;["{\"a\":\"a4\",\"b.c.d\":\"b4\"}"]];[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]] OUTPUT: [[4u;["{\"a\":\"a4\",\"b.c.d\":\"b4\"}"]];[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]] INDEX:0/3/2 HEADER:0/0/0 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d111\"") = "1b5" ORDER BY Col1; COMPARE: [] OUTPUT: [] INDEX:0/5/0 HEADER:0/0/0 >> KqpLimits::StreamWrite+Allowed [GOOD] >> KqpLimits::StreamWrite-Allowed >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[with_queues-tables_format_v0] >> TYdbControlPlaneStoragePipeline::ShouldCheckAutomaticTtl [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMoveToScopeWithPrivateConnection >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckExist >> TYdbControlPlaneStorageModifyConnection::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckNotExistOldName >> TYdbControlPlaneStorageModifyQuery::ShouldCheckQueryName [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckAvailableConnections >> TYdbControlPlaneStoragePipeline::ShouldCheckResultSetLimit [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckGetResultDataRequest >> KqpOlapJson::FilterVariantsCount[10,true,1024,10,100,0] [GOOD] >> KqpOlapJson::FilterVariantsCount[10,true,1024,10,100,0.5] >> KqpExplain::UpdateOn+UseSink >> TYdbControlPlaneStorageModifyBinding::ShouldCheckIdempotencyKey [GOOD] >> KqpQuery::QueryTimeout [GOOD] >> KqpQuery::QueryResultsTruncated >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert-UseSink+UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink+UseDataQuery >> KqpStats::StreamLookupStats+StreamLookupJoin [GOOD] >> KqpStats::StreamLookupStats-StreamLookupJoin >> KqpQuery::RewriteIfPresentToMap >> KqpQuery::QueryCache [GOOD] >> KqpQuery::QueryCacheInvalidate >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckIdempotencyKey >> KqpExplain::UpdateSecondaryConditional+UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes". Create session OK 2025-06-30T09:25:20.382209Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:20.382210Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:20.382301Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries". Create session OK 2025-06-30T09:25:20.382303Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:20.382305Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:20.382418Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas". Create session OK 2025-06-30T09:25:20.382419Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:20.382420Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:20.382493Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets". Create session OK 2025-06-30T09:25:20.382495Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:20.382496Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:20.399380Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:20.399407Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:20.444404Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:20.444420Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:20.457555Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:20.457572Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:20.467102Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:20.467124Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:20.467462Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:20.467472Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:20.467548Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:20.467550Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:20.467607Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:20.467609Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:20.467660Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:20.467662Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:20.467716Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:20.467725Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:20.479331Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:20.479354Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:20.486821Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:20.486856Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:20.487340Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:20.487355Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:20.487898Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:20.487913Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:20.487920Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:20.487926Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:20.488097Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:20.488109Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants": >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_action_which_does_not_requere_existing_queue [GOOD] >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_that_queue_can_be_created_despite_lack_of_throttling_budget >> TYdbControlPlaneStorageCreateConnection::ShouldCheckCommitTransactionWrite [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckCommitTransactionReadWrite >> KqpQuery::QueryCacheTtl [GOOD] >> KqpQuery::QueryCachePermissionsLoss >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckPermission >> TYdbControlPlaneStorageCreateBinding::ShouldCheckMaxCountBindings [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckIdempotencyKey |84.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_generates_event[tables_format_v0] [SKIPPED] >> KqpStats::StreamLookupStats-StreamLookupJoin [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? D 08:27 0:54 [kworker/u128:0+ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... scribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:20.982034Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:20.982038Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:20.982264Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes". Create session OK 2025-06-30T09:25:20.982267Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:20.982269Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:20.982932Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-30T09:25:20.982948Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:20.982950Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:20.984294Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-30T09:25:20.984297Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:20.984299Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:20.984807Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:20.984812Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:21.130815Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:21.130840Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:21.148204Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:21.148223Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:21.148243Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:21.148249Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:21.148433Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:21.148436Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:21.148472Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:21.148475Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:21.163065Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:21.163082Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:21.163306Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:21.163308Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:21.163496Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:21.163508Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:21.165317Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:21.165334Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:21.165526Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:21.165535Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:21.165649Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:21.165658Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:21.165789Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:21.165800Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:21.165892Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:21.165896Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:21.165984Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:21.165987Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small": >> KqpStats::SysViewClientLost >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_that_queue_can_be_created_despite_lack_of_throttling_budget [GOOD] >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_throttling_on_nonexistent_queue >> KqpOlapJson::FilterVariantsCount[10,true,1024,10,100,0.5] [GOOD] >> KqpQuery::QueryResultsTruncated [GOOD] >> KqpQuery::QueryStats+UseSink >> KqpExplain::UpdateOn+UseSink [GOOD] >> KqpExplain::UpdateOn-UseSink >> KqpQuery::QueryCacheInvalidate [GOOD] >> KqpQuery::Pure >> KqpQuery::RewriteIfPresentToMap [GOOD] >> KqpQuery::RowsLimit >> TYdbControlPlaneStorageModifyConnection::ShouldCheckNotExistOldName [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckLowerCaseName >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_throttling_on_nonexistent_queue [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::FilterVariantsCount[10,true,1024,10,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 32350, MsgBus: 12763 2025-06-30T09:25:19.493349Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671081152844193:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:19.493380Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003da9/r3tmp/tmp3rc8Nc/pdisk_1.dat 2025-06-30T09:25:19.553365Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:19.553463Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671081152844174:2080] 1751275519493242 != 1751275519493245 TServer::EnableGrpc on GrpcPort 32350, node 1 2025-06-30T09:25:19.567042Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:19.567065Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:19.567068Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:19.567113Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12763 TClient is connected to server localhost:12763 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:19.635468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:19.635498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:19.636336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:19.636473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 10); 2025-06-30T09:25:19.962423Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671081152844798:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:19.962466Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:20.006382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:25:20.031069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7521671085447812231:2295];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:20.031120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7521671085447812231:2295];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:25:20.031159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7521671085447812231:2295];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:25:20.031177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7521671085447812231:2295];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:25:20.031190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7521671085447812231:2295];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:25:20.031207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7521671085447812231:2295];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:25:20.031223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7521671085447812231:2295];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:25:20.031237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7521671085447812231:2295];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:25:20.031249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7521671085447812231:2295];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:25:20.031263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7521671085447812231:2295];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:25:20.031276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7521671085447812231:2295];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:25:20.031296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7521671085447812231:2295];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:25:20.033112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671085447812230:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:20.033153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671085447812230:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:25:20.033221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671085447812230:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:25:20.033248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671085447812230:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:25:20.033270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671085447812230:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:25:20.033291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671085447812230:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:25:20.033317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671085447812230:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:25:20.033338Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671085447812230:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:25:20.033361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671085447812230:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:25:20.033380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671085447812230:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:25:20.033401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671085447812230:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:25:20.033419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671085447812230:2294];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:25:20.037223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521671085447812236:2298];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:20.037250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521671085447812236:2298];tablet_id=72075186224037892;process=TTxInitSchema::Execute; ...
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.846886Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.847486Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:26.851527Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:26.851697Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:26.851843Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:26.851999Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:26.852135Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:26.852256Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:26.852400Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:26.852521Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:26.852659Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:26.852775Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`true`, `COLUMNS_LIMIT`=`1024`, `SPARSED_DETECTOR_KFF`=`10`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:25:26.862413Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671112534261848:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.862440Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.864635Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:26.868300Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:26.868308Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:26.868504Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:26.868507Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:26.868669Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:26.868690Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:26.868801Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:26.868838Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:26.868959Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:26.868997Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1"}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3", "d" : "d3"}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}')) 2025-06-30T09:25:26.875763Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671112534261915:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.875808Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.875829Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671112534261920:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.877183Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:26.883248Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671112534261922:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:25:26.943232Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671112534261973:2675] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:26.970806Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:25:26.970807Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:25:26.970932Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:25:26.970992Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521671112534261577:2303];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=18;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037894;receive=72075186224037897; 2025-06-30T09:25:26.971026Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521671112534261577:2303];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=19;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037894;receive=72075186224037889; 2025-06-30T09:25:26.971043Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521671112534261577:2303];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=20;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037894;receive=72075186224037897; 2025-06-30T09:25:26.971053Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521671112534261577:2303];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=21;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037894;receive=72075186224037889; 2025-06-30T09:25:26.971127Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT COUNT(*) FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "a2"; COMPARE: [[1u]] OUTPUT: [[1u]] INDEX:4/0/0 HEADER:0/0/0 2025-06-30T09:25:27.130238Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStoragePipeline::ShouldCheckAutomaticTtl [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... E DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:23.724924Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:23.724987Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/connections". Create session OK 2025-06-30T09:25:23.724998Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:23.725000Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:23.725055Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenant_acks". Create session OK 2025-06-30T09:25:23.725057Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:23.725059Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:23.725098Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/result_sets". Create session OK 2025-06-30T09:25:23.725100Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:23.725101Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:23.725151Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/pending_small". Create session OK 2025-06-30T09:25:23.725153Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:23.725154Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:23.725220Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/bindings". Create session OK 2025-06-30T09:25:23.725222Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:23.725223Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:23.755215Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:23.755235Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:23.789566Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:23.789584Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:23.801918Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:23.801947Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:23.804061Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:23.804077Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:23.804296Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:23.804317Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:23.804498Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:23.804503Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:23.804625Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:23.804631Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:23.804658Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:23.804661Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:23.804746Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:23.804750Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:23.804758Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:23.804761Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:23.806400Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:23.806427Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:23.813435Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:23.813453Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:23.813453Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:23.813460Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:23.813897Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:23.813906Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:23.814016Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:23.814019Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenants": >> TYdbControlPlaneStorageListQueries::ShouldCheckLimit [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCheckScopeVisibility >> TYdbControlPlaneStoragePipeline::ShouldCheckGetResultDataRequest [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldRetryQuery >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckExist >> KqpQuery::UdfTerminate >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPreviousRevisionFailed >> KqpParams::ExplicitSameParameterTypesQueryCacheCheck >> KqpExplain::UpdateSecondaryConditional+UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditional-UseSink >> KqpQuery::QueryCachePermissionsLoss [GOOD] >> KqpQuery::QueryCancelWrite >> KqpStats::RequestUnitForBadRequestExecute >> TYdbControlPlaneStorageModifyQuery::ShouldCheckAvailableConnections [GOOD] >> KqpExplain::UpdateOn-UseSink [GOOD] >> KqpQuery::Pure [GOOD] >> KqpExplain::UpdateOnSecondary+UseSink >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMoveToScopeWithPrivateConnection [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldNotCreateScopeeBindingWithUnavailableConnection >> KqpQuery::QueryStats+UseSink [GOOD] >> KqpQuery::QueryStats-UseSink >> TYdbControlPlaneStorageListConnections::ShouldCheckLimit [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckScopeVisibility >> KqpQuery::RowsLimit [GOOD] >> KqpQuery::RowsLimitServiceOverride >> TYdbControlPlaneStorageModifyConnection::ShouldCheckLowerCaseName [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckMaxLengthName ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageModifyBinding::ShouldCheckIdempotencyKey [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... ontrolPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:23.551669Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets". Create session OK 2025-06-30T09:25:23.551693Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:23.551696Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:23.551963Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-30T09:25:23.551967Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:23.551969Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:23.552082Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks". Create session OK 2025-06-30T09:25:23.552085Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:23.552086Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:23.552166Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries". Create session OK 2025-06-30T09:25:23.552169Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:23.552170Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:23.552282Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings". Create session OK 2025-06-30T09:25:23.552284Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:23.552286Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:23.579403Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:23.579427Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:23.641368Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:23.641387Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:23.658400Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:23.658425Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:23.665886Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:23.665909Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:23.665968Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:23.665977Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:23.666191Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:23.666197Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:23.666250Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:23.666267Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:23.675188Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:23.675204Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:23.675798Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:23.675807Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:23.676000Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:23.676005Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:23.676493Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:23.676501Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:23.676653Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:23.676656Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:23.676750Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:23.676753Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:23.676828Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:23.676831Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:23.678960Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:23.678974Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/jobs": ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::Pure [GOOD] Test command err: Trying to start YDB, gRPC: 61835, MsgBus: 24293 2025-06-30T09:25:23.851277Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671099786501495:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:23.851577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00423a/r3tmp/tmpP6lnGT/pdisk_1.dat 2025-06-30T09:25:23.925385Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61835, node 1 2025-06-30T09:25:23.942279Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:23.942294Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:23.942296Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:23.942344Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:23.953859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:23.953901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:23.954954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24293 TClient is connected to server localhost:24293 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:24.009455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:24.021946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:24.051456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:24.120314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:24.137543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:24.364884Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671104081470385:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:24.364959Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:24.435256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:24.492779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:24.549356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:24.559712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:24.574469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:24.589733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:24.603337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:24.619099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671104081471039:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:24.619138Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:24.619177Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671104081471044:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:24.620090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:24.629072Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671104081471046:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:24.704035Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671104081471097:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:24.855349Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:24.939119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) Trying to start YDB, gRPC: 9383, MsgBus: 11695 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00423a/r3tmp/tmpCalai5/pdisk_1.dat 2025-06-30T09:25:25.217133Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671109512404492:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:25.217149Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:25.237800Z node 2 :IMPORT ... :2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:27.120319Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671115329909029:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:27.304414Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) Trying to start YDB, gRPC: 16377, MsgBus: 18853 2025-06-30T09:25:27.561213Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671117648806252:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:27.562037Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00423a/r3tmp/tmpG9IpvG/pdisk_1.dat 2025-06-30T09:25:27.580310Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16377, node 4 2025-06-30T09:25:27.593435Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:27.593456Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:27.593459Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:27.593515Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18853 TClient is connected to server localhost:18853 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:27.667886Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:27.667923Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:27.668350Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:27.668885Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:27.679009Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:27.687939Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:27.746968Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:27.778824Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:27.807293Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:27.964099Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671117648807818:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:27.964147Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:27.973871Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.981970Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.990097Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:28.004320Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:28.018146Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:28.034517Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:28.046713Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:28.062698Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671121943775769:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:28.062750Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:28.062758Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671121943775774:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:28.063812Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:28.072787Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671121943775776:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:28.142228Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671121943775827:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> LocalPartitionReader::Simple >> TYdbControlPlaneStorageCreateBinding::ShouldCheckIdempotencyKey [GOOD] >> KqpLimits::StreamWrite-Allowed [GOOD] >> KqpLimits::TooBigColumn+useSink >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckExist [GOOD] >> KqpStats::RequestUnitForBadRequestExecute [GOOD] >> KqpStats::RequestUnitForBadRequestExplicitPrepare >> LocalPartitionReader::Simple [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPreviousRevisionFailed [GOOD] >> KqpQuery::UdfTerminate [GOOD] >> KqpQuery::UdfMemoryLimit >> KqpQuery::QueryCancelWrite [GOOD] >> KqpQuery::QueryCancelWriteImmediate >> KqpQuery::QueryStats-UseSink [GOOD] >> KqpQuery::RowsLimitServiceOverride [GOOD] >> KqpQuery::SelectCountAsteriskFromVar >> KqpParams::ExplicitSameParameterTypesQueryCacheCheck [GOOD] >> KqpParams::ImplicitDifferentParameterTypesQueryCacheCheck >> KqpExplain::UpdateSecondaryConditional-UseSink [GOOD] >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey+UseSink |84.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::Simple [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckMaxLengthName [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckMultipleDotsName >> KqpExplain::UpdateOnSecondary+UseSink [GOOD] >> KqpExplain::UpdateOnSecondary-UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::QueryStats-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 5844, MsgBus: 9599 2025-06-30T09:25:24.830277Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671103881992336:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:24.830500Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004239/r3tmp/tmpvAO0Gn/pdisk_1.dat 2025-06-30T09:25:24.916182Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:24.916307Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671103881992308:2080] 1751275524829354 != 1751275524829357 TServer::EnableGrpc on GrpcPort 5844, node 1 2025-06-30T09:25:24.931754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:24.931769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:24.931772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:24.931818Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9599 2025-06-30T09:25:24.977899Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:24.977924Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:24.978995Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9599 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:25.012076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:25.025326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:25.048071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:25.087266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:25.103691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:25.366234Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671108176961211:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:25.366278Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:25.424320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.434228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.491613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.509038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.520672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.534671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.552423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.575569Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671108176961865:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:25.575618Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671108176961870:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:25.575639Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:25.576447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:25.579218Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671108176961872:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:25.656962Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671108176961923:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:25.833340Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:25.922549Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:2024: ActorId: [1:7521671108176962210:2470] TxId: 281474976715672. Ctx: { TraceId: 01jz02fnqg836jtkc25t5mh6ps, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTFmZTc1MjktNzQ2YzcwNTQtNTYwOTcwZGUtYjgzNWQyZTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Abort execution: TIMEOUT, [ {
: Error: Request timeout 50ms exceeded } {
: Error: Cancelling after 49ms during execution } ] 2025-06-30T09:25:25.922579Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:2038: ActorId: [1:7521671108176962210:2470] TxId: 281474976715672. Ctx: { TraceId: 01jz02fnqg836jtkc25t5mh6ps, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTFmZTc1MjktNzQ2YzcwNTQtNTYwOTcwZGUtYjgzNWQyZTM=, CurrentExecutionId: , CustomerSuppl ... } compilation { duration_us: 46310 cpu_time_us: 45031 } process_cpu_time_us: 195 total_duration_us: 68785 total_cpu_time_us: 46646 Trying to start YDB, gRPC: 27201, MsgBus: 8519 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004239/r3tmp/tmpDoNsFo/pdisk_1.dat 2025-06-30T09:25:28.941577Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671119921326966:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:28.941598Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:28.967979Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:28.968284Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671119921326949:2080] 1751275528941443 != 1751275528941446 TServer::EnableGrpc on GrpcPort 27201, node 4 2025-06-30T09:25:28.977314Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:28.977329Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:28.977331Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:28.977401Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8519 TClient is connected to server localhost:8519 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:29.053673Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:29.053718Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:29.054199Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:29.055685Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:25:29.058042Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:29.066570Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:29.092010Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:29.128620Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:29.153723Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:29.439353Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671124216295840:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.439383Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.450651Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.459994Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.473970Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.487956Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.502464Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.516387Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.532031Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.546796Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671124216296493:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.546827Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.546828Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671124216296498:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.547670Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:29.557336Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671124216296500:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:29.618943Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671124216296551:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } query_phases { duration_us: 1568 table_access { name: "/Root/TwoShard" reads { rows: 3 bytes: 35 } partitions_count: 1 } cpu_time_us: 1190 affected_shards: 1 } query_phases { duration_us: 2394 table_access { name: "/Root/EightShard" updates { rows: 3 bytes: 47 } partitions_count: 1 } cpu_time_us: 1008 affected_shards: 2 } compilation { duration_us: 33659 cpu_time_us: 32758 } process_cpu_time_us: 225 total_duration_us: 39505 total_cpu_time_us: 35181 2025-06-30T09:25:29.943525Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpExplain::UpdateSecondaryConditionalPrimaryKey+UseSink >> TYdbControlPlaneStoragePipeline::ShouldRetryQuery [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckNotAutomaticTtl >> KqpStats::RequestUnitForBadRequestExplicitPrepare [GOOD] >> KqpStats::OneShardNonLocalExec+UseSink >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v0-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v0-std] >> TPersQueueTest::DirectReadRestartTablet [GOOD] >> TPersQueueTest::EachMessageGetsExactlyOneAcknowledgementInCorrectOrder >> KqpLimits::TooBigColumn+useSink [GOOD] >> KqpLimits::ReplySizeExceeded |84.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/config/ut/ydb-services-config-ut |84.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/config/ut/ydb-services-config-ut |84.1%| [LD] {RESULT} $(B)/ydb/services/config/ut/ydb-services-config-ut >> KqpQuery::QueryCancelWriteImmediate [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCheckScopeVisibility [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCheckPrivateVisibility >> KqpQuery::SelectCountAsteriskFromVar [GOOD] >> KqpParams::ImplicitDifferentParameterTypesQueryCacheCheck [GOOD] >> KqpParams::DefaultParameterValue ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_throttling_on_nonexistent_queue [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::QueryCancelWriteImmediate [GOOD] Test command err: Trying to start YDB, gRPC: 1740, MsgBus: 26877 2025-06-30T09:25:21.604574Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671091526002516:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:21.604626Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00423c/r3tmp/tmpiw4YSe/pdisk_1.dat 2025-06-30T09:25:21.715355Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:21.715384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:21.716468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:21.724364Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:21.726723Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671091526002498:2080] 1751275521604357 != 1751275521604360 TServer::EnableGrpc on GrpcPort 1740, node 1 2025-06-30T09:25:21.746958Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:21.746973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:21.746976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:21.747019Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26877 TClient is connected to server localhost:26877 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:21.911783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:21.915951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:21.920599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:21.943070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:21.964706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:21.977464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:22.228538Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671095820971385:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.228569Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.324498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.386022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.397081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.414252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.425781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.439742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.453852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:22.472171Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671095820972038:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.472204Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671095820972043:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.472207Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.473164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:22.480238Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671095820972045:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:22.550991Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671095820972096:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:22.599956Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:26.608291Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521671091526002516:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:26.608341Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 27927, MsgBus: 20438 2025-06-30T09:25:26.964083Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671111533724333:2064];send_to=[0:730719 ... oolCreatorActor] ActorId: [3:7521671125098222937:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:29.451730Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671125098222988:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:29.633457Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 23072, MsgBus: 8735 2025-06-30T09:25:30.021101Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671130843044301:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:30.021146Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00423c/r3tmp/tmptu7s2u/pdisk_1.dat 2025-06-30T09:25:30.041773Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:30.042161Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671130843044277:2080] 1751275530020868 != 1751275530020871 TServer::EnableGrpc on GrpcPort 23072, node 4 2025-06-30T09:25:30.052998Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:30.053012Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:30.053015Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:30.053068Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8735 TClient is connected to server localhost:8735 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:30.128082Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:30.128124Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:30.128546Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:30.129195Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:25:30.130985Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:30.139606Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:30.158203Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:30.186792Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:30.211460Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:30.496315Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671130843045891:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:30.496350Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:30.507372Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.519356Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.534677Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.546197Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.559261Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.573377Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.588703Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.603922Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671130843046543:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:30.603983Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:30.604012Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671130843046548:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:30.604927Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:30.614863Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671130843046550:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:30.697219Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671130843046601:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageModifyQuery::ShouldCheckAvailableConnections [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? D 08:27 0:54 [kworker/u128:0+ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings". Create session OK 2025-06-30T09:25:26.360418Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:26.360419Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:26.360484Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-30T09:25:26.360486Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:26.360488Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:26.360542Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-30T09:25:26.360545Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:26.360546Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:26.361557Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections". Create session OK 2025-06-30T09:25:26.361571Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:26.361573Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:26.361624Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/quotas". Create session OK 2025-06-30T09:25:26.361646Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:26.361648Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:26.374628Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:26.374643Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:26.410239Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:26.410261Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:26.421301Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:26.421320Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:26.427417Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:26.427435Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:26.427924Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:26.427942Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:26.427947Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:26.427952Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:26.428106Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:26.428109Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:26.436502Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:26.436515Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:26.436713Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:26.436716Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:26.436810Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:26.436812Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:26.436957Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:26.436963Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:26.437131Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:26.437134Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:26.437233Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:26.437236Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:26.437329Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:26.437331Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:26.437410Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:26.437412Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings": >> TYdbControlPlaneStorageModifyConnection::ShouldCheckMultipleDotsName [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckAllowedSymbolsName >> KqpParams::CheckQueryCacheForPreparedQuery |84.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> TYdbControlPlaneStorageListBindings::ShouldEmptyPageToken [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCheckLimit >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey+UseSink [GOOD] >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey-UseSink >> TYdbControlPlaneStorageModifyBinding::ShouldNotCreateScopeeBindingWithUnavailableConnection [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldNotCreatePrivateBindingWithUnavailableConnection >> TPersQueueTest::DefaultMeteringMode [GOOD] >> KqpQuery::SelectWhereInSubquery ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::SelectCountAsteriskFromVar [GOOD] Test command err: Trying to start YDB, gRPC: 14660, MsgBus: 5935 2025-06-30T09:25:26.405113Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671114033370899:2150];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:26.406022Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004230/r3tmp/tmpDe8oyS/pdisk_1.dat 2025-06-30T09:25:26.466968Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14660, node 1 2025-06-30T09:25:26.486930Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:26.486943Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:26.486945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:26.486992Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5935 2025-06-30T09:25:26.504023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:26.504059Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:26.505305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5935 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:26.560397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:26.570457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:26.595248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:26.622820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:26.641673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:26.920646Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671114033372365:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.920674Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.975119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:26.987892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.003196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.066698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.080920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.093675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.109096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.128900Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671118328340316:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:27.128933Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:27.128938Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671118328340321:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:27.129842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:27.134669Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671118328340323:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:27.201064Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671118328340374:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:27.406523Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 19161, MsgBus: 10032 2025-06-30T09:25:27.725249Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671118798646072:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:27.725275Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004230/r3tmp/tmpCReptg/pdisk_1.dat 2025-06-30T09:25:27.745207Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:27.745502Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521671118798646045:2080] 1751275527725072 != 1751275527725075 TServer::EnableGrpc on GrpcPort 19161, node 2 2025-06-30T09:25:27.757880Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config ... CreatorActor] ActorId: [3:7521671125466604724:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:29.681232Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671125466604775:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 21582, MsgBus: 62029 2025-06-30T09:25:30.198179Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671128284933962:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:30.198210Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004230/r3tmp/tmp1PVQ0n/pdisk_1.dat 2025-06-30T09:25:30.225439Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:30.225702Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671128284933937:2080] 1751275530198034 != 1751275530198037 TServer::EnableGrpc on GrpcPort 21582, node 4 2025-06-30T09:25:30.235421Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:30.235434Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:30.235437Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:30.235492Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62029 TClient is connected to server localhost:62029 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:30.306392Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:30.306427Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:30.306788Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:30.307312Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:25:30.313624Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:30.372566Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:30.399516Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:30.412326Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:30.622720Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671128284935531:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:30.622767Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:30.639667Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.655147Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.668222Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.677946Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.694003Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.711335Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.727677Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:30.753199Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671128284936181:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:30.753232Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:30.753349Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671128284936186:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:30.754634Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:30.760765Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:30.760863Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671128284936188:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:30.847349Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671128284936239:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:31.200004Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TPersQueueTest::InflightLimit [GOOD] |84.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageCreateBinding::ShouldCheckIdempotencyKey [GOOD] Test command err: 2025-06-30T09:24:50.105836Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670957068715044:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:50.105881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a88/r3tmp/tmp01RJlG/pdisk_1.dat 2025-06-30T09:24:50.243517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:50.243552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:50.244538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:24:50.251864Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:50.253594Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670957068715024:2081] 1751275490105627 != 1751275490105631 TServer::EnableGrpc on GrpcPort 64318, node 1 2025-06-30T09:24:50.285909Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:50.285924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:50.285926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:50.285986Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24852 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:50.349459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:24:50.355615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:50.380496Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvNodesHealthCheckRequest 2025-06-30T09:24:50.878843Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvGetTaskRequest 2025-06-30T09:24:50.880237Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvCreateQueryRequest 2025-06-30T09:24:50.880780Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvDescribeQueryRequest Wait query execution 0.000364s: STARTING 2025-06-30T09:24:51.108348Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:24:51.880520Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvGetTaskRequest 2025-06-30T09:24:51.881001Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:367: QueryId: utque775j7cl6vvnrv3u Start run actor. Compute state: STARTING 2025-06-30T09:24:51.881010Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:459: QueryId: utque775j7cl6vvnrv3u FillConnections 2025-06-30T09:24:51.881062Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:2305: QueryId: utque775j7cl6vvnrv3u Run actors params: { QueryId: utque775j7cl6vvnrv3u CloudId: mock_cloud UserId: root@builtin Owner: b04f57d9-6a793867-5feb1889-a9c933c12 PreviousQueryRevision: 1 Connections: 0 Bindings: 0 AccountIdSignatures: 0 QueryType: STREAMING ExecuteMode: RUN ResultId: utrue775j6e43t6r4jkt StateLoadMode: EMPTY StreamingDisposition: { } Status: STARTING DqGraphs: 0 DqGraphIndex: 0 Resource.TopicConsumers: 0 } 2025-06-30T09:24:51.881098Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:1951: QueryId: utque775j7cl6vvnrv3u Compiling query ... 2025-06-30T09:24:51.887145Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvGetTaskRequest 2025-06-30T09:24:51.887602Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-30T09:24:51.887820Z node 1 :FQ_RUN_ACTOR TRACE: run_actor.cpp:847: QueryId: utque775j7cl6vvnrv3u Forward ping response. Success: 1. Cookie: 2 2025-06-30T09:24:51.887915Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-30T09:24:51.887978Z node 1 :FQ_RUN_ACTOR TRACE: run_actor.cpp:847: QueryId: utque775j7cl6vvnrv3u Forward ping response. Success: 1. Cookie: 0 2025-06-30T09:24:51.888044Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvDescribeQueryRequest Wait query execution 1.007663s: RUNNING 2025-06-30T09:24:51.948578Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:914: QueryId: utque775j7cl6vvnrv3u Graph (execution) with tasks: 1 2025-06-30T09:24:51.949434Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:897: QueryId: utque775j7cl6vvnrv3u Overall dq tasks: 1 2025-06-30T09:24:51.949500Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:744: QueryId: utque775j7cl6vvnrv3u Graph 0 2025-06-30T09:24:51.949758Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-30T09:24:51.949873Z node 1 :FQ_RUN_ACTOR TRACE: run_actor.cpp:847: QueryId: utque775j7cl6vvnrv3u Forward ping response. Success: 1. Cookie: 0 2025-06-30T09:24:51.949949Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-30T09:24:51.949983Z node 1 :FQ_RUN_ACTOR TRACE: run_actor.cpp:847: QueryId: utque775j7cl6vvnrv3u Forward ping response. Success: 1. Cookie: 1 2025-06-30T09:24:51.949988Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:897: QueryId: utque775j7cl6vvnrv3u Overall dq tasks: 1 2025-06-30T09:24:51.950243Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:1646: QueryId: utque775j7cl6vvnrv3u Executer: [1:7521670961363682986:2318], Controller: [1:7521670961363682988:2320], ResultIdActor: [1:7521670961363682987:2319] 2025-06-30T09:24:51.950325Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-30T09:24:51.950364Z node 1 :FQ_RUN_ACTOR TRACE: run_actor.cpp:847: QueryId: utque775j7cl6vvnrv3u Forward ping response. Success: 1. Cookie: 0 2025-06-30T09:24:51.955400Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvWriteResultDataRequest 2025-06-30T09:24:51.956959Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:1219: QueryId: utque775j7cl6vvnrv3u Query response SUCCESS. Result set index: 0. Issues count: 0. Rows count: 1 2025-06-30T09:24:51.957412Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:1812: QueryId: utque775j7cl6vvnrv3u Is about to finish query with status COMPLETED 2025-06-30T09:24:51.957418Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:1805: QueryId: utque775j7cl6vvnrv3u Write finalizing status: COMPLETING 2025-06-30T09:24:51.957427Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:1904: QueryId: utque775j7cl6vvnrv3u SendPingAndPassAway, FinalQueryStatus COMPLETED 2025-06-30T09:24:51.957440Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:522: QueryId: utque775j7cl6vvnrv3u PassAway 2025-06-30T09:24:51.958067Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-30T09:24:51.958267Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-30T09:24:52.886335Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvGetTaskRequest 2025-06-30T09:24:52.890269Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvDescribeQueryRequest 2025-06-30T09:24:52.890859Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvGetResultDataRequest test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003a88/r3tmp/tmptvJ2uO/pdisk_1.dat 2025-06-30T09:24:53.370879Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:24:53.372354Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521670969102051499:2081] 1751275493343575 != 1751275493343579 2025-06-30T09:24:53.374128Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25452, node 2 2025-06-30T09:24:53.391020Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:53.391038Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:53.391040Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:53.391092Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7987 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:24:53.447610Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:53.447647Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2 ... ::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:27.668590Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets". Create session OK 2025-06-30T09:25:27.668593Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:27.668595Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:27.668651Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-30T09:25:27.668653Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:27.668655Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:27.668674Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-30T09:25:27.668675Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:27.668676Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:27.668712Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections". Create session OK 2025-06-30T09:25:27.668714Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:27.668716Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:27.668735Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/jobs". Create session OK 2025-06-30T09:25:27.668737Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:27.668738Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:27.701788Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:27.701817Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:27.751519Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:27.751546Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:27.798887Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:27.798912Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:27.799301Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:27.799313Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:27.799413Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:27.799424Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:27.799484Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:27.799492Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:27.799561Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:27.799573Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:27.799627Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:27.799635Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:27.799693Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:27.799701Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:27.799811Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:27.799821Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:27.799885Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:27.799898Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:27.799928Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:27.799941Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:27.800025Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:27.800039Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:27.800111Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:27.800119Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:27.800180Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:27.800204Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/nodes": >> KqpExplain::UpdateOnSecondary-UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalPrimaryKey+UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalPrimaryKey-UseSink >> KqpParams::DefaultParameterValue [GOOD] >> KqpParams::Decimal-QueryService-UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckExist [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? R 08:27 0:54 [kworker/u128:0+ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... bControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:28.825111Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/quotas". Create session OK 2025-06-30T09:25:28.825127Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:28.825129Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:28.825385Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-30T09:25:28.825389Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:28.825390Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:28.825473Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/idempotency_keys". Create session OK 2025-06-30T09:25:28.825475Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:28.825476Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:28.825547Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/mappings". Create session OK 2025-06-30T09:25:28.825549Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:28.825551Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:28.825607Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/queries". Create session OK 2025-06-30T09:25:28.825608Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:28.825610Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:28.825751Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:28.825755Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:28.897320Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:28.897342Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:28.906060Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:28.906076Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:28.909394Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:28.909414Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:28.909624Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:28.909645Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:28.921726Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:28.921752Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:28.922303Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:28.922327Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:28.922514Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:28.922517Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:28.922622Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:28.922625Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:28.923435Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:28.923443Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:28.923644Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:28.923649Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:28.923770Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:28.923772Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:28.923836Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:28.923837Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:28.923877Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:28.923879Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:28.924102Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:28.924105Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:29.393767Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: DescribeQueryRequest - DescribeQueryResult: {query_id: "abra" } ERROR: {
: Error: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp:664: Query does not exist or permission denied. Please check the id of the query or your access rights, code: 1000 } >> KqpQuery::UdfMemoryLimit [GOOD] >> KqpQuery::TryToUpdateNonExistentColumn >> KqpQuery::Now ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPreviousRevisionFailed [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... tContext&)/bindings" 2025-06-30T09:25:29.147323Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/nodes". Create session OK 2025-06-30T09:25:29.147334Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:29.147335Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:29.147353Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-30T09:25:29.147354Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:29.147355Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:29.147430Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/pending_small". Create session OK 2025-06-30T09:25:29.147442Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:29.147443Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:29.147759Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-30T09:25:29.147767Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:29.147768Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:29.179018Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:29.179037Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:29.242807Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:29.242830Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:29.251009Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:29.251026Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:29.269371Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:29.269393Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:29.269416Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:29.269424Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:29.269778Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:29.269789Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:29.269865Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:29.269871Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:29.275376Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:29.275392Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:29.275662Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:29.275668Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:29.275775Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:29.275777Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:29.276369Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:29.276378Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:29.276515Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:29.276517Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:29.276602Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:29.276604Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:29.276669Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:29.276671Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:29.276990Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:29.276997Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:29.746951Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:185: Revision of the query has been changed already. Please restart the request with a new revision 2025-06-30T09:25:29.747196Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: DeleteQueryRequest - DeleteQueryResult: {query_id: "utque775i2mrtmdr268f" previous_revision: 100 } ERROR: {
: Error: Revision of the query has been changed already. Please restart the request with a new revision, code: 1003 } >> KqpStats::JoinNoStatsYql ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateOnSecondary-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 31104, MsgBus: 12573 2025-06-30T09:25:26.241114Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671111482228591:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:26.241136Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004233/r3tmp/tmprul1uF/pdisk_1.dat 2025-06-30T09:25:26.306319Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:26.312951Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671111482228568:2080] 1751275526240928 != 1751275526240931 TServer::EnableGrpc on GrpcPort 31104, node 1 2025-06-30T09:25:26.340730Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:26.340752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:26.340754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:26.340815Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12573 2025-06-30T09:25:26.379635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:26.379673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:26.386271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12573 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:26.434429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:26.446397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:26.472360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:26.516160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:26.537173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:26.784745Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671111482230163:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.784784Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.855783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:26.866284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:26.876941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:26.891459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:26.905568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:26.919959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:26.934605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:26.954159Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671111482230815:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.954209Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.954367Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671111482230820:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:26.955475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:26.958099Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671111482230822:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:27.032745Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671115777198169:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:27.242790Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; {"Plan":{"Plans":[{"Tables":["EightShard"],"PlanNodeId":2,"Operators":[{"Inputs":[],"Path":"\/Root\/EightShard","Name":"Update","SinkType":"KqpTableSink","Table":"EightShard"}],"Plans":[{"PlanNodeId":1,"Operators":[{"Inputs":[],"Iterator":"[{Data: 0,Key: 100}]","Name":"Iterator"}],"Node Type":"ConstantExpr"}],"Node Type":"Sink"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","writes":[{"columns":["Data","Key"],"type":"MultiUpdate"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/EightShard","Name":"Update","SinkType":"KqpTableSink","Table":"EightShard"}],"Node Type":"Update"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} Trying to start ... fault not found or you don't have access permissions } 2025-06-30T09:25:31.235297Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.251751Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.267323Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.281999Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.299847Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.321996Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.342146Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.357615Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.419879Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671136039943723:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.419914Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.419972Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671136039943728:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.421090Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:31.425859Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671136039943730:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:31.505309Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671136039943784:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:31.629221Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:31.721005Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.733311Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.747435Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {"Plan":{"Plans":[{"PlanNodeId":29,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":28,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"},{"Inputs":[],"Iterator":"precompute_2_1","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_2_1"}],"Node Type":"Effect"},{"PlanNodeId":27,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":26,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_3_1","Name":"Iterator"}],"Node Type":"Delete-ConstantExpr","CTE Name":"precompute_3_1"}],"Node Type":"Effect"},{"PlanNodeId":25,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":24,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_3_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_3_0"}],"Node Type":"Effect"},{"PlanNodeId":22,"Plans":[{"PlanNodeId":21,"Plans":[{"PlanNodeId":20,"Plans":[{"PlanNodeId":19,"Operators":[{"Inputs":[{"Other":"ConstantExpression"},{"Other":"ConstantExpression"},{"Other":"ConstantExpression"}],"Iterator":"FlatMap","Name":"Iterator"}],"Node Type":"ConstantExpr"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_3_0","Node Type":"Precompute_3_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":18,"Plans":[{"PlanNodeId":17,"Plans":[{"PlanNodeId":16,"Plans":[{"PlanNodeId":15,"Operators":[{"Inputs":[{"InternalOperatorId":1},{"InternalOperatorId":1}],"Iterator":"Map","Name":"Iterator"},{"E-Rows":"1","Inputs":[],"Predicate":"","E-Cost":"0","E-Size":"5","Name":"Filter"}],"Node Type":"ConstantExpr-Filter"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_3_1","Node Type":"Precompute_3_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":13,"Plans":[{"PlanNodeId":12,"Operators":[{"Inputs":[{"Other":"ConstantExpression"}],"Iterator":"[ToDict]","Name":"Iterator"}],"Node Type":"ConstantExpr"}],"Subplan Name":"CTE precompute_2_0","Node Type":"Precompute_2_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Iterator":"Filter","Name":"Iterator"},{"E-Rows":"2","Inputs":[],"Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"ConstantExpr-Filter"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_2_1","Node Type":"Precompute_2_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"E-Size":"0","LookupKeyColumns":["Key"],"Node Type":"TableLookup","PlanNodeId":2,"Path":"\/Root\/SecondaryKeys","Columns":["Fk","Key"],"E-Rows":"2","Plans":[{"PlanNodeId":1,"Operators":[{"Inputs":[],"Iterator":"precompute_0_1","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_1"}],"Table":"SecondaryKeys","PlanNodeType":"Connection","E-Cost":"0"}],"Node Type":"Stage"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/SecondaryKeys","reads":[{"lookup_by":["Key"],"columns":["Fk","Key"],"type":"Lookup"}],"writes":[{"columns":["Fk","Key","Value"],"type":"MultiUpsert"}]},{"name":"\/Root\/SecondaryKeys\/Index\/indexImplTable","writes":[{"columns":["Fk","Key"],"type":"MultiUpsert"},{"type":"MultiErase"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"}],"Plans":[{"PlanNodeId":8,"Operators":[{"E-Rows":"2","Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"Filter"}],"Node Type":"Upsert"}],"Node Type":"Effect"},{"PlanNodeId":9,"Plans":[{"PlanNodeId":10,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":16,"Operators":[{"E-Rows":"1","Predicate":"","E-Cost":"0","E-Size":"5","Name":"Filter"}],"Node Type":"Filter"}],"Node Type":"Delete"}],"Node Type":"Effect"},{"PlanNodeId":17,"Plans":[{"PlanNodeId":18,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> KqpLimits::TooBigQuery-useSink >> KqpLimits::ReplySizeExceeded [GOOD] >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[with_queues-tables_format_v0] [GOOD] >> KqpLimits::DatashardProgramSize+useSink >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[with_queues-tables_format_v1] >> KqpParams::CheckQueryCacheForPreparedQuery [GOOD] >> KqpParams::CheckQueryCacheForExecuteAndPreparedQueries >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey-UseSink [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckNotAutomaticTtl [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckChangeAutomaticTtl >> TYdbControlPlaneStorageModifyConnection::ShouldCheckAllowedSymbolsName [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckMoveToScope >> KqpQuery::SelectWhereInSubquery [GOOD] >> KqpParams::RowsList >> KqpQuery::TableSink_ReplaceDataShardDataQuery+UseSink >> KqpStats::OneShardNonLocalExec+UseSink [GOOD] >> KqpStats::OneShardNonLocalExec-UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::InflightLimit [GOOD] Test command err: 2025-06-30T09:22:52.962034Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670452288127119:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:52.962070Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:52.970486Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670451252308063:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:52.970515Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002ff7/r3tmp/tmpZN9Bea/pdisk_1.dat 2025-06-30T09:22:53.018412Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:22:53.026907Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:22:53.042731Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4150, node 1 2025-06-30T09:22:53.062607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:53.062641Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:53.064658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:53.071372Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002ff7/r3tmp/yandexTwZFis.tmp 2025-06-30T09:22:53.071383Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002ff7/r3tmp/yandexTwZFis.tmp 2025-06-30T09:22:53.071461Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002ff7/r3tmp/yandexTwZFis.tmp 2025-06-30T09:22:53.071508Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:53.077535Z INFO: TTestServer started on Port 2590 GrpcPort 4150 TClient is connected to server localhost:2590 PQClient connected to localhost:4150 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:53.111526Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:53.111548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:53.113123Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:22:53.113821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:53.113842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:22:53.133424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-30T09:22:53.429697Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670455547275698:2275], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:53.429697Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670455547275650:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:53.429741Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:53.431786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:53.437869Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670455547275703:2276], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-30T09:22:53.532940Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670455547275730:2174] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:53.542012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:53.542018Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670456583095434:2303], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:22:53.542648Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NDRmZTRjNTktYWM1NDRmMmMtZWE3MTBhMjctZTQwYzhlOTg=, ActorId: [1:7521670456583095416:2296], ActorState: ExecuteState, TraceId: 01jz02b0x9393zbj6q94sthbw1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:22:53.542928Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521670455547275737:2280], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:22:53.543199Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:22:53.543086Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=ZTMyNzZlM2MtN2IwNTAzNjctZGExMWU1Ni1kYTlhNWVm, ActorId: [2:7521670455547275648:2270], ActorState: ExecuteState, TraceId: 01jz02b0vm27gwd8y6m2ghp66c, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:22:53.543203Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:22:53.565150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:53.613002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-30T09:22:53.668143Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720665. Ctx: ... 40429Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 2 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-30T09:25:26.840446Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-30T09:25:26.840512Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--topic1' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:25:26.841055Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer session _29_2_115977798006795926_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 4 Result { Offset: 0 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 2 WriteTimestampMS: 1751275519017 CreateTimestampMS: 1751275519003 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 3 WriteTimestampMS: 1751275519046 CreateTimestampMS: 1751275519036 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 4 WriteTimestampMS: 1751275519066 CreateTimestampMS: 1751275519063 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 3 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 5 WriteTimestampMS: 1751275519079 CreateTimestampMS: 1751275519078 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 4 SizeLag: 88 RealReadOffset: 3 WaitQuotaTimeMs: 3905 EndOffset: 4 StartOffset: 0 } Cookie: 0 } 2025-06-30T09:25:26.841156Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 2 consumer session _29_2_115977798006795926_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 4 2025-06-30T09:25:26.841175Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 2 consumer session _29_2_115977798006795926_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 4 ReadOffset 4 ReadGuid ef0a8c31-5d656915-613e4144-3afdbe57 has messages 1 2025-06-30T09:25:26.841279Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 2 consumer session _29_2_115977798006795926_v1 read done: guid# ef0a8c31-5d656915-613e4144-3afdbe57, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 82608 2025-06-30T09:25:26.841305Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 2 consumer session _29_2_115977798006795926_v1 response to read: guid# ef0a8c31-5d656915-613e4144-3afdbe57 Bytes readed: 82608 Offset: 0 from session 1 Offset: 1 from session 1 Offset: 2 from session 1 Offset: 3 from session 1 2025-06-30T09:25:26.841669Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 2 consumer session _29_2_115977798006795926_v1 Process answer. Aval parts: 0 2025-06-30T09:25:26.842698Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer session _29_2_115977798006795926_v1 grpc read done: success# 0, data# { } 2025-06-30T09:25:26.842704Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer session _29_2_115977798006795926_v1 grpc read failed 2025-06-30T09:25:26.842712Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer session _29_2_115977798006795926_v1 grpc closed 2025-06-30T09:25:26.842773Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer session _29_2_115977798006795926_v1 is DEAD 2025-06-30T09:25:26.842987Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session _29_2_115977798006795926_v1 2025-06-30T09:25:26.843015Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [29:7521671097114288401:2539] destroyed 2025-06-30T09:25:26.843051Z node 30 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _29_2_115977798006795926_v1 2025-06-30T09:25:27.830815Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=82536, count=4, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:25:29.842887Z node 28 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=82536, count=4, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-30T09:25:30.854915Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 5 Topic 'rt3.dc1--topic1' partition 0 user $without_consumer offset 0 count 4 size 99043 endOffset 4 max time lag 0ms effective offset 0 2025-06-30T09:25:30.855015Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 5 added 4 blobs, size 82536 count 4 last offset 3, current partition end offset: 4 2025-06-30T09:25:30.855022Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 5. Send blob request. 2025-06-30T09:25:30.855051Z node 30 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 20634 accessed 3 times before, last time 2025-06-30T09:25:26.000000Z 2025-06-30T09:25:30.855056Z node 30 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 1 partno 0 count 1 parts_count 0 source 1 size 20634 accessed 2 times before, last time 2025-06-30T09:25:26.000000Z 2025-06-30T09:25:30.855061Z node 30 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 2 partno 0 count 1 parts_count 0 source 1 size 20634 accessed 2 times before, last time 2025-06-30T09:25:26.000000Z 2025-06-30T09:25:30.855067Z node 30 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 3 partno 0 count 1 parts_count 0 source 1 size 20634 accessed 2 times before, last time 2025-06-30T09:25:26.000000Z 2025-06-30T09:25:30.855091Z node 30 :PERSQUEUE DEBUG: read.h:121: Reading cookie 5. All 4 blobs are from cache. 2025-06-30T09:25:30.855128Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 4 blobs 2025-06-30T09:25:30.855252Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-30T09:25:30.855337Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 1 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-30T09:25:30.855386Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 2 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-30T09:25:30.855427Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-30T09:25:30.855517Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--topic1' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:25:30.856498Z node 30 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-30T09:25:30.856517Z node 30 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 1 partno 0 count 1 parts 0 suffix '63' 2025-06-30T09:25:30.856522Z node 30 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' 2025-06-30T09:25:30.856527Z node 30 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 3 partno 0 count 1 parts 0 suffix '63' 2025-06-30T09:25:30.857267Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 3 consumer session _29_3_1139414874400700853_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 4 Result { Offset: 0 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 2 WriteTimestampMS: 1751275519017 CreateTimestampMS: 1751275519003 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 3 WriteTimestampMS: 1751275519046 CreateTimestampMS: 1751275519036 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 4 WriteTimestampMS: 1751275519066 CreateTimestampMS: 1751275519063 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 3 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 5 WriteTimestampMS: 1751275519079 CreateTimestampMS: 1751275519078 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 4 SizeLag: 88 RealReadOffset: 3 WaitQuotaTimeMs: 7912 EndOffset: 4 StartOffset: 0 } Cookie: 0 } Bytes readed: 82608 Offset: 0 from session 1 Offset: 1 from session 1 Offset: 2 from session 1 Offset: 3 from session 1 2025-06-30T09:25:30.857502Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 3 consumer session _29_3_1139414874400700853_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 4 2025-06-30T09:25:30.857543Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 3 consumer session _29_3_1139414874400700853_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 4 ReadOffset 4 ReadGuid f572552e-c5f311c-5c6abfef-5aae997f has messages 1 2025-06-30T09:25:30.857645Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 3 consumer session _29_3_1139414874400700853_v1 read done: guid# f572552e-c5f311c-5c6abfef-5aae997f, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 82608 2025-06-30T09:25:30.857657Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 3 consumer session _29_3_1139414874400700853_v1 response to read: guid# f572552e-c5f311c-5c6abfef-5aae997f 2025-06-30T09:25:30.859884Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 3 consumer session _29_3_1139414874400700853_v1 Process answer. Aval parts: 0 2025-06-30T09:25:30.861759Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer session _29_3_1139414874400700853_v1 grpc read done: success# 0, data# { } 2025-06-30T09:25:30.861765Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer session _29_3_1139414874400700853_v1 grpc read failed 2025-06-30T09:25:30.861772Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer session _29_3_1139414874400700853_v1 grpc closed 2025-06-30T09:25:30.861793Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer session _29_3_1139414874400700853_v1 is DEAD 2025-06-30T09:25:30.862834Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session _29_3_1139414874400700853_v1 2025-06-30T09:25:30.862857Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [29:7521671097114288404:2541] destroyed 2025-06-30T09:25:30.862896Z node 30 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _29_3_1139414874400700853_v1 >> KqpLimits::ComputeActorMemoryAllocationFailure+useSink >> TYdbControlPlaneStorageListQueries::ShouldCheckPrivateVisibility [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCheckSuperUser >> KqpQuery::ExecuteDataQueryCollectMeta ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 10089, MsgBus: 27019 2025-06-30T09:25:26.736709Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671110911163957:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:26.736856Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00422f/r3tmp/tmpvoez5e/pdisk_1.dat 2025-06-30T09:25:26.817394Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:26.817503Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671110911163853:2080] 1751275526733066 != 1751275526733069 TServer::EnableGrpc on GrpcPort 10089, node 1 2025-06-30T09:25:26.851040Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:26.851076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:26.851080Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:26.851154Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27019 2025-06-30T09:25:26.888912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:26.888967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:26.890007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27019 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:26.932956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:26.938144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:27.012010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:27.042518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:27.060989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:27.276429Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671115206132743:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:27.276461Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:27.327014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.335477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.350459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.360885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.373726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.388303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.444500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.461577Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671115206133401:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:27.461608Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671115206133406:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:27.461608Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:27.462461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:27.470769Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671115206133408:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:27.527767Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671115206133459:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:27.708394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.728971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:27.732396Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:27.7 ... CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:32.278982Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:32.291542Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:32.307453Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:32.342056Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:32.379224Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:32.417226Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.609970Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671137309220463:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:32.610006Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:32.620834Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.644035Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.656528Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.672221Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.698903Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.716458Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.730636Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.742784Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671137309221118:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:32.742815Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671137309221123:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:32.742819Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:32.743644Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:32.749615Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671137309221125:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:32.812960Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671137309221176:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:33.086462Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.086583Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:33.097588Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.108889Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {"Plan":{"Plans":[{"PlanNodeId":14,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":13,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"},{"Inputs":[],"Iterator":"precompute_2_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_2_0"}],"Node Type":"Effect"},{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Iterator":"Filter","Name":"Iterator"},{"E-Rows":"2","Inputs":[],"Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"ConstantExpr-Filter"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_2_0","Node Type":"Precompute_2","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"E-Size":"0","LookupKeyColumns":["Key"],"Node Type":"TableLookup","PlanNodeId":2,"Path":"\/Root\/SecondaryKeys","Columns":["Key"],"E-Rows":"2","Plans":[{"PlanNodeId":1,"Operators":[{"Inputs":[],"Iterator":"precompute_0_1","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_1"}],"Table":"SecondaryKeys","PlanNodeType":"Connection","E-Cost":"0"}],"Node Type":"Stage"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/SecondaryKeys","reads":[{"lookup_by":["Key"],"columns":["Key"],"type":"Lookup"}],"writes":[{"columns":["Key","Value"],"type":"MultiUpsert"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"}],"Plans":[{"PlanNodeId":8,"Operators":[{"E-Rows":"2","Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"Filter"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> KqpQuery::TryToUpdateNonExistentColumn [GOOD] >> KqpQuery::UpdateThenDelete+UseSink >> KqpParams::CheckQueryCacheForUnpreparedQuery >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[with_queues-tables_format_v1] [GOOD] >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[without_queues-tables_format_v0] >> KqpParams::Decimal-QueryService-UseSink [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpLimits::ReplySizeExceeded [GOOD] Test command err: Trying to start YDB, gRPC: 17781, MsgBus: 30036 2025-06-30T09:25:18.131981Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671077339377759:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:18.132975Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004240/r3tmp/tmpGw8ttU/pdisk_1.dat 2025-06-30T09:25:18.265683Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:18.267517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:18.267550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:18.268838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17781, node 1 2025-06-30T09:25:18.295021Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:18.295038Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:18.295041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:18.295109Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30036 TClient is connected to server localhost:30036 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:18.366328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:25:18.381204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:19.132865Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:22.041569Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671094519248414:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.041549Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671094519248406:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.041689Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:22.043142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:22.046363Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671094519248420:2404], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:22.111663Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671094519248471:2949] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:22.207828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:23.134255Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521671077339377759:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:23.134367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 24515, MsgBus: 3924 2025-06-30T09:25:25.215561Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671107335517307:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:25.216858Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004240/r3tmp/tmpKVm6MD/pdisk_1.dat 2025-06-30T09:25:25.247848Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:25.248281Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521671107335517284:2080] 1751275525215299 != 1751275525215302 TServer::EnableGrpc on GrpcPort 24515, node 2 2025-06-30T09:25:25.258211Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:25.258234Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:25.258237Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:25.258295Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3924 TClient is connected to server localhost:3924 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:25.328185Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:25.328908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:25.330664Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:25.332308Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:25.332669Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:25.344582Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:26.218873Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:28.573852Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671120220420686:2399], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:28.573890Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:28.574086Z node 2 :KQP_WORKLOAD_SERVICE WAR ... 4Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671131744396937:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:30.950592Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004240/r3tmp/tmpU8f2qB/pdisk_1.dat 2025-06-30T09:25:31.009601Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:31.010893Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671131744396909:2080] 1751275530948248 != 1751275530948251 TServer::EnableGrpc on GrpcPort 6484, node 4 2025-06-30T09:25:31.039422Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:31.039440Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:31.039442Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:31.039492Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:31.047941Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:31.047981Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:31.048990Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25436 TClient is connected to server localhost:25436 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:31.219405Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:31.221849Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:31.231778Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:31.294683Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:31.348210Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:31.412123Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:31.580983Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671136039365812:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.581015Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.595641Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.609221Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.621369Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.636222Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.647114Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.658579Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.672929Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.703908Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671136039366465:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.703945Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.704062Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671136039366470:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.705166Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:31.709361Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671136039366472:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:31.782855Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671136039366523:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:31.947705Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:31.967299Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.181947Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=OTAxYzBmZGYtNWUyMjcxYzgtMmFjMzA4ZmItODlhODY3MGM=, ActorId: [4:7521671136039366796:2470], ActorState: ExecuteState, TraceId: 01jz02fwqy4yeh8y88s87wyden, Create QueryResponse for error on request, msg: >> KqpTypes::QuerySpecialTypes >> KqpLimits::CancelAfterRoTx [GOOD] >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookup >> KqpQuery::Now [GOOD] >> KqpQuery::GenericQueryNoRowsLimit >> TYdbControlPlaneStorageListConnections::ShouldCheckScopeVisibility [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckPrivateVisibility >> KqpExplain::UpdateSecondaryConditionalPrimaryKey-UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalSecondaryKey+UseSink >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[without_queues-tables_format_v0] [GOOD] >> KqpLimits::QSReplySizeEnsureMemoryLimits+useSink >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[without_queues-tables_format_v1] >> KqpLimits::ComputeActorMemoryAllocationFailure+useSink [GOOD] >> KqpLimits::CancelAfterRwTx+useSink >> KqpParams::CheckQueryCacheForExecuteAndPreparedQueries [GOOD] >> KqpParams::CheckCacheByAst >> KqpStats::JoinNoStatsYql [GOOD] >> KqpStats::JoinNoStatsScan >> KqpParams::RowsList [GOOD] >> KqpParams::MissingParameter |84.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |84.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |84.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpParams::Decimal-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 8811, MsgBus: 15932 2025-06-30T09:25:28.659679Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671122557230050:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:28.666983Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00422c/r3tmp/tmpZZEbve/pdisk_1.dat 2025-06-30T09:25:28.750687Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:28.763928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:28.763965Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:28.769938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8811, node 1 2025-06-30T09:25:28.811049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:28.811066Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:28.811070Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:28.811125Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15932 TClient is connected to server localhost:15932 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:28.984278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:28.988197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:28.995389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:29.070868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:29.105130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.127236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:29.318548Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671126852198807:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.318590Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.373225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.391593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.404480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.460965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.475854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.489495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.502545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.519648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671126852199466:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.519681Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.519792Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671126852199471:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.521124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:29.530553Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671126852199473:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:29.594055Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671126852199524:3400] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:29.647745Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 63654, MsgBus: 6533 2025-06-30T09:25:30.185788Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671128488854414:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:30.185818Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00422c/r3tmp/tmpZSCy1f/pdisk_1.dat 2025-06-30T09:25:30.211516Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:30.211759Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521671128488854386:20 ... 311) 2025-06-30T09:25:32.884480Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:25:32.885588Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:32.891399Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:32.915998Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:32.965500Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.980027Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:33.232583Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671140655615927:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.232615Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.245547Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.256709Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.273045Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.286826Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.344728Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.359622Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.374789Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.403614Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671140655616581:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.403674Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.403880Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671140655616586:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.405164Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:33.410988Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671140655616588:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:33.485021Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671140655616639:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:33.708135Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.781943Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:33.981441Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7521671140655617047:2502], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:4:17: Error: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At tuple, At function: SqlProjectItem, At lambda
:3:25: Error: At function: Parameter, At function: DataType
:3:25: Error: Invalid decimal precision: 99 2025-06-30T09:25:33.981964Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=4&id=ZGY3NDBkYTktYmVkYjM0NjItZTY4OWNiMzUtZDllY2U0MzA=, ActorId: [4:7521671140655616862:2460], ActorState: ExecuteState, TraceId: 01jz02fxmq9zwmrsrs6bm58j1h, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:25:34.037123Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZGY3NDBkYTktYmVkYjM0NjItZTY4OWNiMzUtZDllY2U0MzA=, ActorId: [4:7521671140655616862:2460], ActorState: ExecuteState, TraceId: 01jz02fxn7amxymkac6wdwrek9, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1417: ydb/core/kqp/query_data/kqp_query_data.cpp:271: Parameter $value22 type mismatch, expected: { Kind: Data Data { Scheme: 4865 DecimalParams { Precision: 22 Scale: 9 } } }, actual: Type (Data), schemeType: Decimal(35,10), schemeTypeId: 4865 2025-06-30T09:25:34.048398Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7521671144950584359:2508], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:7:29: Error: At function: KiWriteTable!
:7:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:4:25: Error: Implicit decimal cast would lose precision
:7:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:7:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-30T09:25:34.048822Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=4&id=ZGY3NDBkYTktYmVkYjM0NjItZTY4OWNiMzUtZDllY2U0MzA=, ActorId: [4:7521671140655616862:2460], ActorState: ExecuteState, TraceId: 01jz02fxpt0xnvwspa2prn9yrz, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:25:34.057715Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7521671144950584368:2512], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:29: Error: At function: KiWriteTable!
:3:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:0:14: Error: Implicit decimal cast would lose precision
:3:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:3:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-30T09:25:34.058612Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=4&id=ZGY3NDBkYTktYmVkYjM0NjItZTY4OWNiMzUtZDllY2U0MzA=, ActorId: [4:7521671140655616862:2460], ActorState: ExecuteState, TraceId: 01jz02fxq5f0xg53r841h5f9sn, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[without_queues-tables_format_v1] [GOOD] >> KqpQuery::TableSink_ReplaceDataShardDataQuery+UseSink [GOOD] >> KqpQuery::TableSink_ReplaceDataShardDataQuery-UseSink >> KqpExplain::SortStage |84.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |84.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |84.1%| [LD] {RESULT} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut >> KqpQuery::ExecuteDataQueryCollectMeta [GOOD] >> KqpQuery::DeleteWhereInSubquery >> TYdbControlPlaneStorageModifyBinding::ShouldNotCreatePrivateBindingWithUnavailableConnection [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldNotCreatePrivateConnectionWithDesctructionBinding |84.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |84.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |84.1%| [LD] {RESULT} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut >> KqpQuery::RandomNumber >> KqpQuery::UpdateThenDelete+UseSink [GOOD] >> KqpParams::CheckQueryCacheForUnpreparedQuery [GOOD] >> KqpParams::Decimal+QueryService-UseSink >> KqpLimits::DatashardProgramSize+useSink [GOOD] >> KqpLimits::DatashardProgramSize-useSink >> KqpParams::MissingParameter [GOOD] >> KqpParams::MissingOptionalParameter-UseSink >> TYdbControlPlaneStorageListQueries::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCombineFilters >> KqpTypes::QuerySpecialTypes [GOOD] >> KqpTypes::DyNumberCompare >> KqpLimits::ComputeActorMemoryAllocationFailure-useSink >> KqpQuery::GenericQueryNoRowsLimit [GOOD] >> KqpQuery::GenericQueryNoRowsLimitLotsOfRows >> TYdbControlPlaneStorageModifyConnection::ShouldCheckMoveToScope [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckIdempotencyKey ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::UpdateThenDelete+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 16935, MsgBus: 61900 2025-06-30T09:25:28.476109Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671119555009872:2182];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:28.476425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00422e/r3tmp/tmp1YFFy4/pdisk_1.dat 2025-06-30T09:25:28.576346Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16935, node 1 2025-06-30T09:25:28.618297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:28.618312Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:28.618314Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:28.618360Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:28.637033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:28.637065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:28.638520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61900 TClient is connected to server localhost:61900 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:28.725797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:28.731235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:28.736812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:28.774426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:28.815344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:28.871699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:28.987379Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671119555011323:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:28.987413Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.064169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.075830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.087551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.146707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.159896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.176593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.243124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.282710Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671123849979273:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.282807Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.282963Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671123849979281:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.284114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:29.290255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:29.290371Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671123849979283:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:29.345332Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671123849979334:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:29.474637Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:29.721190Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7521671123849979625:2478], TxId: 281474976715672, task: 1. Ctx: { TraceId : 01jz02fsb46fh0jd780yn1hge7. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=MjE5NmY1NmMtYTMwYjJmOGUtOTg1NzE2ZS02ZGE2OTI0. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED DEFAULT_ERROR: {
: Error: Terminate was called, reason(17): Bad filter value. }. 2025-06-30T09:25:29.721445Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7521671123849979626:2479], TxId: 281474976715672, task: 2. Ctx: { ...
:3:84: Error: At function: KiUpdateTable!
:3:84: Error: Column 'NonExistentColumn' does not exist in table '/Root/KeyValue'., code: 2017 2025-06-30T09:25:34.116123Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=3&id=NzhmNTdhZC04ODhmNDBiYy1kY2EyNDUwOC05NDU4N2ZmNA==, ActorId: [3:7521671147164010336:2469], ActorState: ExecuteState, TraceId: 01jz02fxrv14c75h3g7pxm2yam, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-30T09:25:34.170501Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 3910, MsgBus: 29882 2025-06-30T09:25:34.364665Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671147584464711:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:34.364702Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00422e/r3tmp/tmpDU39Rs/pdisk_1.dat 2025-06-30T09:25:34.387972Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:34.388152Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671147584464685:2080] 1751275534364513 != 1751275534364516 TServer::EnableGrpc on GrpcPort 3910, node 4 2025-06-30T09:25:34.396066Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:34.396085Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:34.396087Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:34.396136Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29882 2025-06-30T09:25:34.473376Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:34.473419Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:34.474673Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29882 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:34.526816Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:34.533988Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.545141Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.568536Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.583355Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.907246Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671147584466282:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.907278Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.917014Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.928875Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.947390Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.966609Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.984381Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.997254Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.011709Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.050761Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671151879434230:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.050800Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.053272Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671151879434235:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.054528Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:35.058788Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:35.058865Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671151879434237:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:35.111788Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671151879434288:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:35.370826Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; [] >> KqpExplain::UpdateSecondaryConditionalSecondaryKey+UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalSecondaryKey-UseSink >> KqpLimits::QSReplySizeEnsureMemoryLimits+useSink [GOOD] >> KqpLimits::QSReplySizeEnsureMemoryLimits-useSink >> TYdbControlPlaneStorageCreateConnection::ShouldCheckCommitTransactionReadWrite [GOOD] >> KqpExplain::SortStage [GOOD] >> KqpExplain::SelfJoin3xSameLabels |84.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut >> KqpQuery::DeleteWhereInSubquery [GOOD] >> KqpQuery::DictJoin >> KqpStats::OneShardNonLocalExec-UseSink [GOOD] |84.1%| [LD] {RESULT} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |84.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut >> KqpQuery::TableSink_ReplaceDataShardDataQuery-UseSink [GOOD] >> KqpQuery::TableSinkWithSubquery >> KqpExplain::LimitOffset >> KqpLimits::ComputeActorMemoryAllocationFailure-useSink [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService+useSink |84.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue_batch[tables_format_v0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpStats::OneShardNonLocalExec-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 19150, MsgBus: 6269 2025-06-30T09:25:28.724810Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671123276469270:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:28.724840Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00422a/r3tmp/tmpjVdj4c/pdisk_1.dat 2025-06-30T09:25:28.795137Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671123276469249:2080] 1751275528724677 != 1751275528724680 2025-06-30T09:25:28.803741Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19150, node 1 2025-06-30T09:25:28.816372Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:28.816387Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:28.816391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:28.816439Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6269 2025-06-30T09:25:28.867193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:28.867226Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:6269 2025-06-30T09:25:28.868449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:28.903298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:28.911081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:28.914340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:28.950499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:28.979688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:28.996921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.205303Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671127571438140:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.205347Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.268369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.310592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.328381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.340514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.352145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.367564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.378050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:29.399725Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671127571438791:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.399767Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.399774Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671127571438796:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:29.400761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:29.403046Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671127571438798:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:29.456180Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671127571438849:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:29.609792Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521671127571439121:2474], status: GENERIC_ERROR, issues:
:2:12: Error: mismatched input 'INCORRECT_STMT' expecting {';', '(', '$', ALTER, ANALYZE, BACKUP, BATCH, COMMIT, CREATE, DECLARE, DEFINE, DELETE, DISCARD, DO, DROP, EVALUATE, EXPLAIN, EXPORT, FOR, FROM, GRANT, IF, IMPORT, INSERT, PARALLEL, PRAGMA, PROCESS, REDUCE, REPLACE, RESTORE, REVOKE, ROLLBACK, SELECT, SHOW, UPDATE, UPSERT, USE, VALUES} 2025-06-30T09:25:29.609890Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=MmY5ODhhNDYtNjQ3YzgwMDktMTIwMWYwM2QtNjg0OGE2MTE=, ActorId: [1:7521671127571439113:2469], ActorState: ExecuteState, TraceId: 01jz02fsc7a1sg0p2kabmdgfv1, ReplyQueryCompileError, status GENERI ... _snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00422a/r3tmp/tmpwGDol4/pdisk_1.dat 2025-06-30T09:25:33.886537Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7521671141604788579:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:33.886572Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:33.919018Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:33.919519Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7521671141157296381:2072] 1751275533887201 != 1751275533887198 2025-06-30T09:25:33.924976Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:33.925009Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:33.937740Z node 5 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-06-30T09:25:33.938048Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16194, node 5 2025-06-30T09:25:33.945268Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:33.945277Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:33.945280Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:33.945332Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26724 2025-06-30T09:25:33.987057Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:33.987090Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:33.991421Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26724 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:34.021076Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:34.107936Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.188229Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.243746Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:34.275917Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.428695Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671145899757798:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.428726Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.441235Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.528970Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.558761Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.625591Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.675637Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.793589Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.889117Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:34.889360Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:34.897315Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.939549Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671145899758729:2383], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.939588Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.939706Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671145899758734:2386], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.940762Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:34.950561Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7521671145899758736:2387], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720669 completed, doublechecking } 2025-06-30T09:25:35.039727Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521671150194726124:4297] txid# 281474976720670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> KqpQuery::RandomNumber [GOOD] >> KqpQuery::RandomUuid >> KqpParams::Decimal+QueryService-UseSink [GOOD] >> KqpParams::Decimal-QueryService+UseSink >> KqpStats::JoinNoStatsScan [GOOD] >> KqpStats::JoinStatsBasicScan >> KqpParams::MissingOptionalParameter-UseSink [GOOD] >> KqpParams::ParameterTypes >> KqpTypes::DyNumberCompare [GOOD] >> KqpTypes::SelectNull >> KqpLimits::DatashardProgramSize-useSink [GOOD] >> KqpLimits::DatashardReplySize >> TYdbControlPlaneStorageListQueries::ShouldCombineFilters [GOOD] >> KqpExplain::SelfJoin3xSameLabels [GOOD] >> KqpExplain::SqlIn >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService+useSink [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService-useSink >> KqpQuery::TableSinkWithSubquery [GOOD] >> TSubscriberCombinationsTest::CombinationsMigratedPath [GOOD] >> KqpQuery::GenericQueryNoRowsLimitLotsOfRows [GOOD] >> KqpQuery::NoEvaluate >> KqpLimits::TooBigQuery+useSink >> KqpQuery::DictJoin [GOOD] >> KqpQuery::ExecuteWriteQuery >> KqpLimits::QSReplySizeEnsureMemoryLimits-useSink [GOOD] >> KqpLimits::QSReplySize+useSink |84.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |84.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |84.1%| [LD] {RESULT} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load >> KqpExplain::UpdateSecondaryConditionalSecondaryKey-UseSink [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldNotCreatePrivateConnectionWithDesctructionBinding [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckObjectStorageProjectionByTypes >> KqpExplain::LimitOffset [GOOD] >> KqpExplain::MultiUsedStage >> TYdbControlPlaneStorageModifyConnection::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPreviousRevisionFailed >> KqpStats::DeferredEffects+UseSink >> KqpQuery::RandomUuid [GOOD] >> KqpQuery::ReadOverloaded+StreamLookup >> KqpQuery::QueryClientTimeout ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberCombinationsTest::CombinationsMigratedPath [GOOD] Test command err: =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 2025-06-30T09:24:17.207844Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2025-06-30T09:24:17.207867Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:3:2050] Successful handshake: owner# 800, generation# 1 2025-06-30T09:24:17.207896Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2025-06-30T09:24:17.207900Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:3:2050] Commit generation: owner# 800, generation# 1 2025-06-30T09:24:17.207906Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:37:2067] 2025-06-30T09:24:17.207908Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 800, generation# 1 2025-06-30T09:24:17.207939Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:37:2067] 2025-06-30T09:24:17.207942Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:6:2053] Commit generation: owner# 800, generation# 1 2025-06-30T09:24:17.207958Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:39:2069][/root/tenant] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:17.208010Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [1:43:2069] 2025-06-30T09:24:17.208014Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:3:2050] Upsert description: path# /root/tenant 2025-06-30T09:24:17.208038Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:3:2050] Subscribe: subscriber# [1:43:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:24:17.208057Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [1:44:2069] 2025-06-30T09:24:17.208059Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:6:2053] Upsert description: path# /root/tenant 2025-06-30T09:24:17.208063Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:44:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:24:17.208073Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [1:45:2069] 2025-06-30T09:24:17.208076Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:9:2056] Upsert description: path# /root/tenant 2025-06-30T09:24:17.208079Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:9:2056] Subscribe: subscriber# [1:45:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:24:17.208089Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:3:2050] 2025-06-30T09:24:17.208096Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:43:2069] 2025-06-30T09:24:17.208100Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:44:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:6:2053] 2025-06-30T09:24:17.208104Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:44:2069] 2025-06-30T09:24:17.208109Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:45:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:9:2056] 2025-06-30T09:24:17.208113Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:45:2069] 2025-06-30T09:24:17.208121Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:40:2069] 2025-06-30T09:24:17.208134Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:41:2069] 2025-06-30T09:24:17.208143Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:39:2069][/root/tenant] Set up state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.208148Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:42:2069] 2025-06-30T09:24:17.208153Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:39:2069][/root/tenant] Ignore empty state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== !argsLeft.IsDeletion 2025-06-30T09:24:17.208194Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:3:2050] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:36:2066], cookie# 0, event size# 103 2025-06-30T09:24:17.208199Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:3:2050] Update description: path# /root/tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-30T09:24:17.208209Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:3:2050] Upsert description: path# /root/tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /root/tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-30T09:24:17.208235Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant PathId: [OwnerId: 800, LocalPathId: 2] Version: 1 }: sender# [1:3:2050] 2025-06-30T09:24:17.208242Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [1:43:2069] 2025-06-30T09:24:17.208249Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant PathId: [OwnerId: 800, LocalPathId: 2] Version: 1 }: sender# [1:40:2069] 2025-06-30T09:24:17.208256Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:39:2069][/root/tenant] Update to strong state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 800, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 2] AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() >= argsRight.GetSuperId() =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 1 PathOwnerId: 900 2025-06-30T09:24:17.615461Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [3:36:2066] 2025-06-30T09:24:17.615489Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:3:2050] Successful handshake: owner# 800, generation# 1 2025-06-30T09:24:17.615510Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [3:36:2066] 2025-06-30T09:24:17.615516Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:3:2050] Commit generation: owner# 800, generation# 1 2025-06-30T09:24:17.615523Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [3:37:2067] 2025-06-30T09:24:17.615528Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:6:2053] Successful handshake: owner# 900, generation# 1 2025-06-30T09:24:17.615555Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [3:37:2067] 2025-06-30T09:24:17.615559Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:6:2053] Commit generation: owner# 900, generation# 1 2025-06-30T09:24:17.615589Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][3:39:2069][/root/tenant] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:17.615655Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [3:43:2069] 2025-06-30T09:24:17.615661Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:3:2050] Upsert description: path# /root/tenant 2025-06-30T09:24:17.615687Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:3:2050] Subscribe: subscriber# [3:43:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:24:17.615709Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [3:44:2069] 2025-06-30T09:24:17.615713Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:6:2053] Upsert description: path# /root/tenant 2025-06-30T09:24:17.615719Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:44:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:24:17.615734Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [3:45:2069] 2025-06-30T09:24:17.615738Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:9:2056] Upsert description: path# /root/tenant 2025-06-30T09:24:17.615744Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:9:2056] Subscribe: subscriber# [3:45:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:24:17.615757Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:43:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [3:3:2050] 2025-06-30T09:24:17.615768Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:43:2069] 2025-06-30T09:24:17.615774Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:44:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [3:6:2053] 2025-06-30T09:24:17.615780Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:44:2069] 2025-06-30T0 ... 53] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [397:37:2067] 2025-06-30T09:25:37.104592Z node 397 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [397:6:2053] Successful handshake: owner# 910, generation# 1 2025-06-30T09:25:37.104600Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [397:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [397:36:2066] 2025-06-30T09:25:37.104605Z node 397 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [397:3:2050] Commit generation: owner# 910, generation# 1 2025-06-30T09:25:37.104625Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [397:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [397:37:2067] 2025-06-30T09:25:37.104629Z node 397 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [397:6:2053] Commit generation: owner# 910, generation# 1 2025-06-30T09:25:37.104661Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][397:39:2069][/Root/Tenant/table_inside] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:25:37.104726Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [397:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [397:43:2069] 2025-06-30T09:25:37.104732Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [397:3:2050] Upsert description: path# /Root/Tenant/table_inside 2025-06-30T09:25:37.104758Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [397:3:2050] Subscribe: subscriber# [397:43:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:25:37.104781Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [397:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [397:44:2069] 2025-06-30T09:25:37.104785Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [397:6:2053] Upsert description: path# /Root/Tenant/table_inside 2025-06-30T09:25:37.104791Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [397:6:2053] Subscribe: subscriber# [397:44:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:25:37.104806Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [397:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [397:45:2069] 2025-06-30T09:25:37.104809Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [397:9:2056] Upsert description: path# /Root/Tenant/table_inside 2025-06-30T09:25:37.104815Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [397:9:2056] Subscribe: subscriber# [397:45:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:25:37.104827Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][397:43:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:3:2050] 2025-06-30T09:25:37.104839Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [397:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [397:43:2069] 2025-06-30T09:25:37.104846Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][397:44:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:6:2053] 2025-06-30T09:25:37.104853Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [397:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [397:44:2069] 2025-06-30T09:25:37.104860Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][397:45:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:9:2056] 2025-06-30T09:25:37.104866Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [397:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [397:45:2069] 2025-06-30T09:25:37.104876Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][397:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:40:2069] 2025-06-30T09:25:37.104894Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][397:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:41:2069] 2025-06-30T09:25:37.104905Z node 397 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][397:39:2069][/Root/Tenant/table_inside] Set up state: owner# [397:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:25:37.104914Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][397:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:42:2069] 2025-06-30T09:25:37.104921Z node 397 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][397:39:2069][/Root/Tenant/table_inside] Ignore empty state: owner# [397:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() >= argsRight.GetSuperId() =========== Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 2025-06-30T09:25:37.528611Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [399:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [399:36:2066] 2025-06-30T09:25:37.528639Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [399:3:2050] Successful handshake: owner# 910, generation# 1 2025-06-30T09:25:37.528656Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [399:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [399:37:2067] 2025-06-30T09:25:37.528661Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [399:6:2053] Successful handshake: owner# 910, generation# 1 2025-06-30T09:25:37.528670Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [399:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [399:36:2066] 2025-06-30T09:25:37.528675Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [399:3:2050] Commit generation: owner# 910, generation# 1 2025-06-30T09:25:37.528699Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [399:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [399:37:2067] 2025-06-30T09:25:37.528703Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [399:6:2053] Commit generation: owner# 910, generation# 1 2025-06-30T09:25:37.528729Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:25:37.528796Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [399:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [399:43:2069] 2025-06-30T09:25:37.528803Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [399:3:2050] Upsert description: path# /Root/Tenant/table_inside 2025-06-30T09:25:37.528829Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [399:3:2050] Subscribe: subscriber# [399:43:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:25:37.528851Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [399:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [399:44:2069] 2025-06-30T09:25:37.528855Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [399:6:2053] Upsert description: path# /Root/Tenant/table_inside 2025-06-30T09:25:37.528862Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [399:6:2053] Subscribe: subscriber# [399:44:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:25:37.528881Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [399:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [399:45:2069] 2025-06-30T09:25:37.528885Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [399:9:2056] Upsert description: path# /Root/Tenant/table_inside 2025-06-30T09:25:37.528890Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [399:9:2056] Subscribe: subscriber# [399:45:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:25:37.528904Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][399:43:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:3:2050] 2025-06-30T09:25:37.528913Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [399:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [399:43:2069] 2025-06-30T09:25:37.528920Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][399:44:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:6:2053] 2025-06-30T09:25:37.528928Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [399:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [399:44:2069] 2025-06-30T09:25:37.528935Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][399:45:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:9:2056] 2025-06-30T09:25:37.528940Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [399:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [399:45:2069] 2025-06-30T09:25:37.528953Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:40:2069] 2025-06-30T09:25:37.528971Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:41:2069] 2025-06-30T09:25:37.528982Z node 399 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][399:39:2069][/Root/Tenant/table_inside] Set up state: owner# [399:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:25:37.528993Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:42:2069] 2025-06-30T09:25:37.529001Z node 399 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][399:39:2069][/Root/Tenant/table_inside] Ignore empty state: owner# [399:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() >= argsRight.GetSuperId() ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::TableSinkWithSubquery [GOOD] Test command err: Trying to start YDB, gRPC: 20468, MsgBus: 9872 2025-06-30T09:25:32.195183Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671139054732153:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:32.195204Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004227/r3tmp/tmpFulEYp/pdisk_1.dat 2025-06-30T09:25:32.342825Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671139054732127:2080] 1751275532194834 != 1751275532194837 2025-06-30T09:25:32.349298Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:32.372719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:32.372752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:32.376470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20468, node 1 2025-06-30T09:25:32.398975Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:32.398993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:32.398996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:32.399045Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9872 TClient is connected to server localhost:9872 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:32.560016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:32.566958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:32.579970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:32.657034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:32.723701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:32.784144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.873282Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671139054733744:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:32.873329Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:32.925460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.938344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.947889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.959780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.976658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.988398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.047730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.065803Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671143349701693:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.065825Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671143349701698:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.065833Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.066859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:33.070700Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671143349701700:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:33.169551Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671143349701751:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:33.196967Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 19660, MsgBus: 13353 2025-06-30T09:25:33.802766Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671143525079611:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:33.802801Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004227/r3tmp/tmpVaVJ11/pdisk_1.dat 2025-06-30T09:25:33.823778Z node 2 :CONFIGS_DISPATCHER ERROR: confi ... _operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:35.369594Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:35.710819Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521671149182197152:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.710847Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.720298Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.765609Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.848142Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521671149182198476:2399], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.848173Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.848327Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7521671149182198481:2402], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.849488Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:35.852828Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-30T09:25:35.852929Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521671149182198483:2403], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-30T09:25:35.920034Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671149182198534:3179] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:36.217738Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 2923, MsgBus: 25534 2025-06-30T09:25:36.863787Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671154914035961:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:36.864648Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004227/r3tmp/tmpVrm9tW/pdisk_1.dat 2025-06-30T09:25:36.882807Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:36.882981Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671154914035931:2080] 1751275536863575 != 1751275536863578 TServer::EnableGrpc on GrpcPort 2923, node 4 2025-06-30T09:25:36.892356Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:36.892369Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:36.892373Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:36.892428Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25534 TClient is connected to server localhost:25534 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:36.970003Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:36.970037Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:36.970434Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:36.971014Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:37.275339Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671159209003849:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.275382Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.278528Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.287409Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.308572Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671159209004021:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.308616Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671159209004026:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.308628Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.309556Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:37.312495Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671159209004028:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-30T09:25:37.403921Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671159209004079:2430] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:37.470724Z node 4 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037888 Cancelled read: {[4:7521671159209004138:2326], 0} 2025-06-30T09:25:37.524985Z node 4 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037889 Cancelled read: {[4:7521671159209004164:2335], 0} >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService-useSink [GOOD] >> KqpLimits::ComputeNodeMemoryLimit >> TYdbControlPlaneStoragePipeline::ShouldCheckChangeAutomaticTtl [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckResultsTTL >> KqpTypes::SelectNull [GOOD] >> KqpTypes::MultipleCurrentUtcTimestamp ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateSecondaryConditionalSecondaryKey-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 5978, MsgBus: 21996 2025-06-30T09:25:30.689899Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671130005044133:2169];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:30.690509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004229/r3tmp/tmp6BCpXi/pdisk_1.dat 2025-06-30T09:25:30.781207Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671130005043976:2080] 1751275530687174 != 1751275530687177 2025-06-30T09:25:30.785086Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5978, node 1 2025-06-30T09:25:30.791479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:30.791512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:30.792169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:30.819069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:30.819085Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:30.819088Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:30.819148Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21996 TClient is connected to server localhost:21996 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:30.963371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:25:30.974532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.051598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:31.083609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:31.100230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:31.408533Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671134300012871:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.408563Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.454995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.467816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.477322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.493030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.504897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.519926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.535438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.608164Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671134300013528:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.608214Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.608411Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671134300013533:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:31.609615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:31.612952Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671134300013535:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:31.667211Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671134300013586:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:31.689965Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:31.860162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.876599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:31.939 ... in>: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.962911Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.972542Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:36.981496Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:36.994814Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.005893Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.045492Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.063554Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.080416Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.103508Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671158876057894:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.103545Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.103721Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671158876057899:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.105017Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:37.108869Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:37.109268Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671158876057901:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:37.181751Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671158876057952:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:37.392374Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.415630Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.427202Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.507291Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; {"Plan":{"Plans":[{"PlanNodeId":18,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":17,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"},{"Inputs":[],"Iterator":"precompute_1_2","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_1_2"}],"Node Type":"Effect"},{"PlanNodeId":16,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":15,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_1_1","Name":"Iterator"}],"Node Type":"Delete-ConstantExpr","CTE Name":"precompute_1_1"}],"Node Type":"Effect"},{"PlanNodeId":14,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":13,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_1_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_1_0"}],"Node Type":"Effect"},{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":7,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","E-Rows":"0","ReadRangesPointPrefixLen":"1","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Subplan Name":"CTE Stage_5","Node Type":"Stage","Parent Relationship":"InitPlan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Node Type":"UnionAll","CTE Name":"Stage_5","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_1_1","Node Type":"Precompute_1_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":1,"Node Type":"UnionAll","CTE Name":"Stage_5","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_1_2","Node Type":"Precompute_1_2","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/SecondaryKeys","writes":[{"columns":["Key","Fk"],"type":"MultiUpsert"}]},{"name":"\/Root\/SecondaryKeys\/Index\/indexImplTable","reads":[{"columns":["Fk","Key"],"scan_by":["Fk [1, 4)"],"type":"Scan"}],"writes":[{"columns":["Key","Fk"],"type":"MultiUpsert"},{"type":"MultiErase"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"}],"Plans":[{"PlanNodeId":7,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","ReadRangesPointPrefixLen":"1","E-Rows":"0","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Upsert"}],"Node Type":"Effect"},{"PlanNodeId":8,"Plans":[{"PlanNodeId":9,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":14,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","ReadRangesPointPrefixLen":"1","E-Rows":"0","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Delete"}],"Node Type":"Effect"},{"PlanNodeId":15,"Plans":[{"PlanNodeId":16,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":22,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","ReadRangesPointPrefixLen":"1","E-Rows":"0","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> KqpParams::ParameterTypes [GOOD] >> KqpParams::CheckCacheByAst [GOOD] >> KqpParams::CheckCacheWithRecompilationQuery >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeSave [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckPrivateVisibility [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckSuperUser >> KqpExplain::SqlIn [GOOD] >> KqpExplain::SsaProgramInJsonPlan ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpParams::ParameterTypes [GOOD] Test command err: Trying to start YDB, gRPC: 1676, MsgBus: 21244 2025-06-30T09:25:33.842183Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671142364066611:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:33.844451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00421c/r3tmp/tmpO0VLqQ/pdisk_1.dat 2025-06-30T09:25:33.906710Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1676, node 1 2025-06-30T09:25:33.926855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:33.926874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:33.926877Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:33.926942Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:33.943238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:33.943280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:33.944071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21244 TClient is connected to server localhost:21244 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:34.039689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:34.051254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:34.109657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.179408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.255180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.273330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.342272Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671146659035457:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.342300Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.397894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.407385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.420329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.431495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.443897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.458761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.472381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.489113Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671146659036109:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.489151Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.489154Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671146659036114:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.490110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:34.499467Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671146659036116:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:34.557072Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671146659036167:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 17133, MsgBus: 20732 2025-06-30T09:25:35.023153Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671152121692593:2158];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00421c/r3tmp/tmpDm7npq/pdisk_1.dat 2025-06-30T09:25:35.025421Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:35.036464Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17133, node 2 2025-06-30T09:25:35.048488Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:35.048499Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty ... pleted, doublechecking } 2025-06-30T09:25:36.862276Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671154661505097:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:36.989541Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 21577, MsgBus: 9076 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00421c/r3tmp/tmpmhBFTt/pdisk_1.dat 2025-06-30T09:25:37.402967Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671161284619973:2246];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:37.403070Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:37.429295Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671161284619738:2080] 1751275537399969 != 1751275537399972 2025-06-30T09:25:37.432270Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21577, node 4 2025-06-30T09:25:37.447096Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:37.447113Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:37.447116Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:37.447185Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9076 2025-06-30T09:25:37.507014Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:37.507056Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:37.507942Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9076 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:37.552250Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:37.554085Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:37.562463Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:37.582902Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:37.619532Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:37.643305Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:37.945036Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671161284621348:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.945063Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.954426Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.965144Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.975606Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.987472Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.001494Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.017151Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.034144Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.051491Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671165579589296:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.051525Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.051579Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671165579589301:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.052540Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:38.055772Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671165579589303:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:38.148608Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671165579589354:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:38.404060Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |84.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |84.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |84.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs >> KqpParams::Decimal-QueryService+UseSink [GOOD] >> KqpParams::Decimal+QueryService+UseSink >> KqpStats::JoinStatsBasicScan [GOOD] >> KqpStats::DeferredEffects-UseSink >> KqpQuery::NoEvaluate [GOOD] >> AnalyzeColumnshard::AnalyzeStatus [GOOD] >> TPersQueueTest::EachMessageGetsExactlyOneAcknowledgementInCorrectOrder [GOOD] >> TPersQueueTest::Delete ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeSave [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckObjectStorageProjectionByTypes [GOOD] Test command err: 2025-06-30T09:23:19.170164Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:420:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:19.170209Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:19.170221Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004589/r3tmp/tmpgrRumQ/pdisk_1.dat 2025-06-30T09:23:19.283979Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11409, node 1 2025-06-30T09:23:19.399339Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:19.399361Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:19.399365Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:19.399462Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:23:19.400078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:19.495465Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:19.495522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:19.516070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15282 2025-06-30T09:23:19.954705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:23:21.069156Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-30T09:23:21.078003Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:21.078041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:21.128221Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:23:21.135330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:21.299289Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:21.321596Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.321792Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.321939Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.322029Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.322114Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.322137Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.322154Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.322173Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.322191Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.477152Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:21.477207Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:21.488752Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:21.525472Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:21.538443Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-30T09:23:21.538481Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-30T09:23:21.554233Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-30T09:23:21.554499Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-30T09:23:21.554533Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-30T09:23:21.554540Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-30T09:23:21.554544Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-30T09:23:21.554549Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-30T09:23:21.554554Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-30T09:23:21.554560Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-30T09:23:21.554722Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-30T09:23:21.573768Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7960: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:23:21.573803Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7990: ConnectToSA(), pipe client id: [2:1795:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:23:21.576502Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1807:2571] 2025-06-30T09:23:21.578553Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1840:2586] 2025-06-30T09:23:21.578755Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1840:2586], schemeshard id = 72075186224037897 2025-06-30T09:23:21.580149Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-30T09:23:21.589601Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-30T09:23:21.589624Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-30T09:23:21.589640Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-30T09:23:21.594213Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:21.596717Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-30T09:23:21.596763Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-30T09:23:21.716832Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-30T09:23:21.790577Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-30T09:23:21.889528Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-30T09:23:22.494378Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:22.538239Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2149:3027], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:22.538293Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:22.542428Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:23:22.593485Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2246:2805];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:23:22.593562Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2246:2805];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:23:22.593636Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2246:2805];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:23:22.593659Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2246:2805];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:23:22.593679Z node 2 :TX_COLUMNSHARD WARN: ... ervice_impl.cpp:1248: SyncNode(), pipe client id = [2:8398:6183] 2025-06-30T09:25:37.974654Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7990: ConnectToSA(), pipe client id: [2:8399:6184], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:25:38.035974Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-30T09:25:38.036022Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-30T09:25:38.036110Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-30T09:25:38.036402Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-30T09:25:38.036468Z node 2 :STATISTICS DEBUG: tx_init.cpp:55: [72075186224037894] Loaded database: /Root/Database 2025-06-30T09:25:38.036479Z node 2 :STATISTICS DEBUG: tx_init.cpp:59: [72075186224037894] Loaded traversal start key 2025-06-30T09:25:38.036486Z node 2 :STATISTICS DEBUG: tx_init.cpp:64: [72075186224037894] Loaded traversal table owner id: 72075186224037897 2025-06-30T09:25:38.036492Z node 2 :STATISTICS DEBUG: tx_init.cpp:69: [72075186224037894] Loaded traversal table local path id: 4 2025-06-30T09:25:38.036497Z node 2 :STATISTICS DEBUG: tx_init.cpp:74: [72075186224037894] Loaded traversal start time: 1751275537942920 2025-06-30T09:25:38.036504Z node 2 :STATISTICS DEBUG: tx_init.cpp:79: [72075186224037894] Loaded traversal IsColumnTable: 1 2025-06-30T09:25:38.036509Z node 2 :STATISTICS DEBUG: tx_init.cpp:84: [72075186224037894] Loaded global traversal round: 2 2025-06-30T09:25:38.036523Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 1 2025-06-30T09:25:38.036531Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-30T09:25:38.036546Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-30T09:25:38.036555Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-30T09:25:38.036561Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-30T09:25:38.036571Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-30T09:25:38.036622Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-30T09:25:38.037065Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-30T09:25:38.037216Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-30T09:25:38.037496Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-30T09:25:38.037512Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-30T09:25:38.037700Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-30T09:25:38.037714Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-30T09:25:38.038435Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-30T09:25:38.104263Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-30T09:25:38.104344Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-30T09:25:38.104580Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8446:6215], server id = [2:8450:6219], tablet id = 72075186224037899, status = OK 2025-06-30T09:25:38.104618Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8446:6215], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:38.104674Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8447:6216], server id = [2:8451:6220], tablet id = 72075186224037900, status = OK 2025-06-30T09:25:38.104682Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8447:6216], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:38.104939Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8448:6217], server id = [2:8452:6221], tablet id = 72075186224037901, status = OK 2025-06-30T09:25:38.104949Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8448:6217], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:38.105038Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8449:6218], server id = [2:8453:6222], tablet id = 72075186224037902, status = OK 2025-06-30T09:25:38.105045Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8449:6218], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:38.105270Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-30T09:25:38.105358Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8446:6215], server id = [2:8450:6219], tablet id = 72075186224037899 2025-06-30T09:25:38.105364Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:38.105408Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-30T09:25:38.105457Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8447:6216], server id = [2:8451:6220], tablet id = 72075186224037900 2025-06-30T09:25:38.105461Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:38.105493Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-30T09:25:38.105521Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-30T09:25:38.105527Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-30T09:25:38.105569Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-30T09:25:38.105602Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-30T09:25:38.105690Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-30T09:25:38.106295Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8448:6217], server id = [2:8452:6221], tablet id = 72075186224037901 2025-06-30T09:25:38.106306Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:38.113183Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-30T09:25:38.113491Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8449:6218], server id = [2:8453:6222], tablet id = 72075186224037902 2025-06-30T09:25:38.113499Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:38.138098Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8474:6243]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-30T09:25:38.138169Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-30T09:25:38.138178Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8474:6243], StatRequests.size() = 1 2025-06-30T09:25:38.272219Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:03.000000Z, event interval end# 2025-06-30T09:25:36.000000Z 2025-06-30T09:25:38.272436Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZTRhMTc1YWYtNzE5MDNkNDQtYjQ3OWI1OTYtMTRiZmJjMWI=, TxId: 2025-06-30T09:25:38.272449Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZTRhMTc1YWYtNzE5MDNkNDQtYjQ3OWI1OTYtMTRiZmJjMWI=, TxId: 2025-06-30T09:25:38.272692Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-30T09:25:38.284189Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8489:6249] 2025-06-30T09:25:38.284252Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8398:6183], server id = [2:8489:6249], tablet id = 72075186224037894, status = OK 2025-06-30T09:25:38.284329Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8489:6249], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-30T09:25:38.284398Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8490:6250] 2025-06-30T09:25:38.284421Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:8490:6250], schemeshard id = 72075186224037897 2025-06-30T09:25:38.299159Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-30T09:25:38.299193Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-30T09:25:38.361831Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8493:6253]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-30T09:25:38.361957Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-30T09:25:38.361966Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-30T09:25:38.362714Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-30T09:25:38.362759Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-30T09:25:38.362771Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-30T09:25:38.370686Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 |84.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink+UseDataQuery [GOOD] |84.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut |84.2%| [LD] {RESULT} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPreviousRevisionFailed [GOOD] >> KqpStats::DeferredEffects+UseSink [GOOD] >> KqpStats::DataQueryWithEffects+UseSink >> KqpLimits::QueryReplySize >> KqpExplain::MultiUsedStage [GOOD] >> KqpExplain::MergeConnection ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageCreateConnection::ShouldCheckCommitTransactionReadWrite [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... 179796Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775hu7vpk943i4g] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:34.307088Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:34.307504Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775hu387nrts3e0] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:34.444856Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:34.445202Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775htvbft3pd9sh] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:34.607456Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:34.607815Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775htr3jgbqd850] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:34.761336Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:34.761719Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775htm69g2l734f] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:34.802170Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage.cpp:437: DB Error, Status: BAD_SESSION, Issues: [ {
: Error: Exceeded maximum allowed number of active transactions, code: 2014 } {
: Error: ydb/core/kqp/session_actor/kqp_session_actor.cpp:904: Too many transactions, current active: 10 MaxTxPerSession: 10 } ], Query: --!syntax_v1 -- Query name: Unknown query name PRAGMA TablePathPrefix("local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateConnection::TTestCaseShouldCheckCommitTransactionReadWrite::Execute_(NUnitTest::TTestContext&)"); DECLARE $idempotency_key as String; DECLARE $scope as String; SELECT `response` FROM `idempotency_keys` WHERE `scope` = $scope AND `idempotency_key` = $idempotency_key; 2025-06-30T09:25:34.940600Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:34.940969Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775htheik86vi67] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:35.151904Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:35.152231Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775htc0sd8vsvvq] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:35.283093Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:35.283424Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775ht5ice4pods3] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:35.431205Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:35.431591Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775ht1ibrtlidao] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:35.562177Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:35.562540Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775hst1o9etjn87] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:35.675074Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:35.675436Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775hsp1tihqcnpc] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:35.879011Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:35.879383Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775hsljjp7rgu5l] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:36.044194Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:36.047033Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775hsfb9upfaphg] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:36.269725Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:36.270119Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775hsa5cg3qmtqk] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-30T09:25:36.406457Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-30T09:25:36.407149Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue775hs3eofup5teu] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::NoEvaluate [GOOD] Test command err: Trying to start YDB, gRPC: 31495, MsgBus: 1571 2025-06-30T09:25:33.218319Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671143042363682:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:33.218369Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004226/r3tmp/tmpTzyiQl/pdisk_1.dat 2025-06-30T09:25:33.284172Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:33.284253Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671143042363655:2080] 1751275533218047 != 1751275533218050 TServer::EnableGrpc on GrpcPort 31495, node 1 2025-06-30T09:25:33.299115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:33.299126Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:33.299128Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:33.299162Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1571 TClient is connected to server localhost:1571 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:25:33.361892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:33.361918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:33.362983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:33.365896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:33.370282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:33.437349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:33.500759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:33.513862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:33.647067Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671143042365262:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.647099Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.697744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.715509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.726086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.739459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.751098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.766430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.783676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:33.797043Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671143042365916:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.797071Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.797111Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671143042365921:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.798084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:33.805978Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671143042365923:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:33.896408Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671143042365974:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:34.220296Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 27260, MsgBus: 6293 2025-06-30T09:25:34.634405Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671146497583046:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:34.636662Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004226/r3tmp/tmp3HUrc7/pdisk_1.dat 2025-06-30T09:25:34.654012Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:34.654204Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:752167114 ... l try to initialize from file: (empty maybe) 2025-06-30T09:25:38.072898Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:38.072959Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16851 2025-06-30T09:25:38.125555Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:38.125594Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:38.131298Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16851 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:38.163606Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:38.166111Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:38.174880Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.199684Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.253154Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.281985Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.579294Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671163127321609:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.579325Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.589220Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.606565Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.626441Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.648956Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.672636Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.693549Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.721404Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.796033Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671163127322268:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.796074Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.796197Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671163127322273:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.797283Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:38.800096Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671163127322275:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:38.879893Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671163127322326:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:39.015691Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:39.147860Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7521671167422289903:2475], status: UNSUPPORTED, issues:
: Error: Default error
:7:24: Error: EVALUATE IF is not supported in YDB queries., code: 2030 2025-06-30T09:25:39.148002Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=4&id=ZmIzNGIyNzgtYWI0YTQxZi00NTM1OTkyMC0xZTA2YWQwMQ==, ActorId: [4:7521671167422289895:2470], ActorState: ExecuteState, TraceId: 01jz02g2p31cmzgq5e64y2wdcr, ReplyQueryCompileError, status UNSUPPORTED remove tx with tx_id: 2025-06-30T09:25:39.153575Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7521671167422289907:2477], status: UNSUPPORTED, issues:
: Error: Default error
:4:28: Error: EVALUATE is not supported in YDB queries., code: 2030 2025-06-30T09:25:39.153765Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=4&id=ZmIzNGIyNzgtYWI0YTQxZi00NTM1OTkyMC0xZTA2YWQwMQ==, ActorId: [4:7521671167422289895:2470], ActorState: ExecuteState, TraceId: 01jz02g2pd97cn38v9ndr0hzb8, ReplyQueryCompileError, status UNSUPPORTED remove tx with tx_id: 2025-06-30T09:25:39.178498Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7521671167422289915:2481], status: UNSUPPORTED, issues:
: Error: Default error
:8:78: Error: ATOM evaluation is not supported in YDB queries., code: 2030 2025-06-30T09:25:39.178688Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=4&id=ZmIzNGIyNzgtYWI0YTQxZi00NTM1OTkyMC0xZTA2YWQwMQ==, ActorId: [4:7521671167422289895:2470], ActorState: ExecuteState, TraceId: 01jz02g2q57et2rhzvj8qnnvbe, ReplyQueryCompileError, status UNSUPPORTED remove tx with tx_id: >> KqpTypes::MultipleCurrentUtcTimestamp [GOOD] >> KqpStats::MultiTxStatsFullYql ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeStatus [GOOD] Test command err: 2025-06-30T09:23:19.192831Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:420:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:19.192878Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:19.192891Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00458f/r3tmp/tmpbdUv4v/pdisk_1.dat 2025-06-30T09:23:19.327560Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12000, node 1 2025-06-30T09:23:19.446379Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:19.446404Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:19.446410Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:19.446535Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:23:19.447267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:19.546227Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:19.546269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:19.567842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25886 2025-06-30T09:23:20.095853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:23:21.155955Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-30T09:23:21.175277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:21.175321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:21.224216Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:23:21.225239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:21.382345Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:21.406564Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.406775Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.406974Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.407030Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.407108Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.407131Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.407153Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.407173Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.407198Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:21.551983Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:21.552038Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:21.563935Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:21.606405Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:21.618999Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-30T09:23:21.619030Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-30T09:23:21.629800Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-30T09:23:21.630167Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-30T09:23:21.630203Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-30T09:23:21.630208Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-30T09:23:21.630215Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-30T09:23:21.630224Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-30T09:23:21.630231Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-30T09:23:21.630240Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-30T09:23:21.630600Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-30T09:23:21.651116Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7960: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:23:21.651153Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7990: ConnectToSA(), pipe client id: [2:1799:2564], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:23:21.652709Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1805:2570] 2025-06-30T09:23:21.655871Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1846:2590] 2025-06-30T09:23:21.656284Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1846:2590], schemeshard id = 72075186224037897 2025-06-30T09:23:21.656406Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-30T09:23:21.663435Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-30T09:23:21.663459Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-30T09:23:21.663473Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-30T09:23:21.667656Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:21.670086Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-30T09:23:21.670131Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-30T09:23:21.779328Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-30T09:23:21.870784Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-30T09:23:21.938202Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-30T09:23:22.564128Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:22.603275Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3024], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:22.603328Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:22.607512Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:23:22.653636Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2215:2787];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:23:22.653752Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2215:2787];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:23:22.653835Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2215:2787];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:23:22.653868Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2215:2787];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:23:22.653902Z node 2 :TX_COLUMNSHARD WARN: ... : Error: Transaction 281474976720658 completed, doublechecking } 2025-06-30T09:25:36.512892Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7302:5350] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:36.524636Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7331:5365]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-30T09:25:36.524719Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-30T09:25:36.524731Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7333:5367] 2025-06-30T09:25:36.524746Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7333:5367] 2025-06-30T09:25:36.524863Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7334:5368] 2025-06-30T09:25:36.524896Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7333:5367], server id = [2:7334:5368], tablet id = 72075186224037894, status = OK 2025-06-30T09:25:36.524909Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7334:5368], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-30T09:25:36.524925Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-30T09:25:36.524950Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-30T09:25:36.524962Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7331:5365], StatRequests.size() = 1 2025-06-30T09:25:36.569194Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NGEwZjAzZDAtM2NmZWI3My00MGIwZjg3ZC1hZDI1MmRjNA==, TxId: 2025-06-30T09:25:36.569230Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NGEwZjAzZDAtM2NmZWI3My00MGIwZjg3ZC1hZDI1MmRjNA==, TxId: 2025-06-30T09:25:36.569407Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-30T09:25:36.581358Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-30T09:25:36.581391Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-30T09:25:36.628839Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-30T09:25:36.628872Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-30T09:25:36.705886Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7333:5367], schemeshard count = 1 2025-06-30T09:25:37.615984Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-30T09:25:37.616039Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-30T09:25:37.617111Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-30T09:25:37.633251Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-30T09:25:37.633508Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-30T09:25:37.633529Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-30T09:25:37.645466Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-30T09:25:37.666323Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. ... blocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR cookie 0 ... waiting for TEvAnalyzeTableResponse (done) 2025-06-30T09:25:37.666993Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7405:5410] 2025-06-30T09:25:37.667153Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:478: [72075186224037894] Send TEvStatistics::TEvAnalyzeStatusResponse. Status STATUS_ENQUEUED 2025-06-30T09:25:37.667473Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7407:5411]
---- StatisticsAggregator ----
Database: /Root/Database
BaseStatistics: 1
SchemeShards: 1
    72075186224037897
Nodes: 1
    2
RequestedSchemeShards: 1
    72075186224037897
FastCounter: 3
FastCheckInFlight: 0
FastSchemeShards: 0
FastNodes: 0
PropagationInFlight: 0
PropagationSchemeShards: 0
PropagationNodes: 0
LastSSIndex: 0
PendingRequests: 0
ProcessUrgentInFlight: 0
Columns: 2
DatashardRanges: 0
CountMinSketches: 0
ScheduleTraversalsByTime: 2
  oldest table: [OwnerId: 72075186224037897, LocalPathId: 4], update time: 1970-01-01T00:00:00Z
ScheduleTraversalsBySchemeShard: 1
    72075186224037897
    [OwnerId: 72075186224037897, LocalPathId: 4], [OwnerId: 72075186224037897, LocalPathId: 3]
ForceTraversals: 1
    1970-01-01T00:00:06Z
NavigateType: Analyze
NavigateAnalyzeOperationId: 
NavigatePathId: 
ForceTraversalOperationId: 
TraversalStartTime: 1970-01-01T00:00:00Z
TraversalPathId: 
TraversalIsColumnTable: 0
TraversalStartKey: 
GlobalTraversalRound: 1
TraversalRound: 0
HiveRequestRound: 0
... unblocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR 2025-06-30T09:25:37.667812Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-30T09:25:37.667846Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-30T09:25:37.679550Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-30T09:25:38.832325Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-30T09:25:38.832388Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-30T09:25:38.832392Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-30T09:25:38.832564Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-30T09:25:38.851217Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-30T09:25:38.851379Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-30T09:25:38.851406Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-30T09:25:38.851704Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-30T09:25:38.867380Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-30T09:25:38.867470Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-30T09:25:38.867670Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7456:5439], server id = [2:7457:5440], tablet id = 72075186224037899, status = OK 2025-06-30T09:25:38.867726Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7456:5439], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:38.868784Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-30T09:25:38.868801Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-30T09:25:38.868885Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-30T09:25:38.868922Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-30T09:25:38.868971Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7456:5439], server id = [2:7457:5440], tablet id = 72075186224037899 2025-06-30T09:25:38.868975Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:38.869018Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-30T09:25:38.869663Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-30T09:25:38.876994Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7477:5459]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-30T09:25:38.877068Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-30T09:25:38.877073Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7477:5459], StatRequests.size() = 1 2025-06-30T09:25:38.908634Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NmJhYjk2Y2QtZDAxNTczNC1iODFlY2MxMy01ZDZlYWQxNg==, TxId: 2025-06-30T09:25:38.908672Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NmJhYjk2Y2QtZDAxNTczNC1iODFlY2MxMy01ZDZlYWQxNg==, TxId: 2025-06-30T09:25:38.909284Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-30T09:25:38.921441Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-30T09:25:38.921474Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:914:2714] 2025-06-30T09:25:38.921979Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7494:5467] 2025-06-30T09:25:38.922168Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:478: [72075186224037894] Send TEvStatistics::TEvAnalyzeStatusResponse. Status STATUS_NO_OPERATION >> KqpParams::CheckCacheWithRecompilationQuery [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink+UseDataQuery [GOOD] Test command err: Trying to start YDB, gRPC: 30072, MsgBus: 9689 2025-06-30T09:24:37.406828Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670900808838816:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:37.407161Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001eec/r3tmp/tmpMJkC0i/pdisk_1.dat 2025-06-30T09:24:37.490818Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30072, node 1 2025-06-30T09:24:37.522973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:37.522989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:37.522992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:37.523040Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9689 2025-06-30T09:24:37.549865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:37.549896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:37.550925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9689 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:24:37.659207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:37.662698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:24:37.668324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:37.717891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:37.764136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:37.782419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:24:37.959340Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670900808840377:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:37.959375Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.024223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.040687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.060832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.075631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.089608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.108477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.171903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:24:38.200262Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670905103808326:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.200294Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.201304Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670905103808331:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:24:38.202351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:24:38.204674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:24:38.205062Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521670905103808333:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:24:38.307938Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521670905103808384:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:24:38.408150Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; [[[1u];["One"]];[[2u];["Two"]]] [[[1u];["One"]];[[2u];["Two"]]] Trying to start YDB, gRPC: 20298, MsgBus: 29454 2025-06-30T09:24:39.083166Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670910299467328:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:24:39.083177Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001eec/r3tmp/tmpRlgMBW/pdisk_1.dat 2025-06-30T09:24: ... .cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001eec/r3tmp/tmphdmYwy/pdisk_1.dat 2025-06-30T09:25:38.193507Z node 42 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30137, node 42 2025-06-30T09:25:38.218398Z node 42 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:38.218420Z node 42 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:38.218424Z node 42 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:38.218485Z node 42 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3423 TClient is connected to server localhost:3423 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:38.276780Z node 42 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:38.276817Z node 42 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:38.277219Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:38.278129Z node 42 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:38.281208Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.294634Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.326273Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.348318Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.600441Z node 42 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [42:7521671165323335908:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.600483Z node 42 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.611875Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.635759Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.662311Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.695543Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.729893Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.753829Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.778494Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.821326Z node 42 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [42:7521671165323336563:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.821354Z node 42 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.821575Z node 42 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [42:7521671165323336568:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.822967Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:38.834384Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:38.834582Z node 42 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [42:7521671165323336570:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:38.907824Z node 42 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [42:7521671165323336621:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:39.154627Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.169164Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.169567Z node 42 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:39.183215Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) [[["f8904e9d-afdc-440f-9acd-e364845f120e"]]] [[["f8904e9d-afdc-440f-9acd-e364845f120e"]]] >> KqpQuery::CurrentUtcTimestamp ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpTypes::MultipleCurrentUtcTimestamp [GOOD] Test command err: Trying to start YDB, gRPC: 4911, MsgBus: 10631 2025-06-30T09:25:34.670613Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671145321084237:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:34.670751Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004214/r3tmp/tmphTb43F/pdisk_1.dat 2025-06-30T09:25:34.768038Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671145321084068:2080] 1751275534668595 != 1751275534668598 2025-06-30T09:25:34.768269Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4911, node 1 2025-06-30T09:25:34.774332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:34.774365Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:34.776176Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:34.803470Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:34.803481Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:34.803484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:34.803524Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10631 TClient is connected to server localhost:10631 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:34.908633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:34.912954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:34.924462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:35.007834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:35.051963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.070421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:35.183039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671149616052979:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.183085Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.247473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.257236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.271326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.285549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.299456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.315739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.330179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.358882Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671149616053631:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.358913Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.359099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671149616053636:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.360183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:35.367596Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671149616053638:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:35.464231Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671149616053689:3400] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:35.669996Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 18826, MsgBus: 21111 2025-06-30T09:25:36.035663Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671153448483221:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:36.038062Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004214/r3tmp/tmpEm2Fe2/pdisk_1.dat 2025-06-30T09:25:36.099067Z node 2 :IMPORT WARN: schemeshard_impor ... h: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:38.435661Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 10519, MsgBus: 26516 2025-06-30T09:25:38.828230Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671166235704590:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:38.830598Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004214/r3tmp/tmp6UJw6y/pdisk_1.dat 2025-06-30T09:25:38.854253Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671166235704382:2080] 1751275538815779 != 1751275538815782 2025-06-30T09:25:38.855967Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10519, node 4 2025-06-30T09:25:38.866484Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:38.866497Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:38.866500Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:38.866575Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26516 2025-06-30T09:25:38.936435Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:38.936468Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:38.937865Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26516 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:38.999514Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:39.008164Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:39.024679Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:39.052030Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:39.092433Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:39.107687Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.324866Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671170530673273:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.324898Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.336046Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.348359Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.360730Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.375264Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.389095Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.401956Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.418661Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.454829Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671170530673926:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.454877Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.458825Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671170530673931:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.460078Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:39.464280Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:39.464378Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671170530673933:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:39.559591Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671170530673984:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:39.817447Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpExplain::SsaProgramInJsonPlan [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCheckLimit [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCheckScopeVisibility ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageListQueries::ShouldCombineFilters [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... 6-30T09:25:36.451735Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:36.451736Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:36.452037Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings". Create session OK 2025-06-30T09:25:36.452059Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:36.452063Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:36.452263Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings". Create session OK 2025-06-30T09:25:36.452270Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:36.452272Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:36.452364Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks". Create session OK 2025-06-30T09:25:36.452366Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:36.452367Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:36.452420Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes". Create session OK 2025-06-30T09:25:36.452422Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:36.452423Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:36.452441Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/jobs". Create session OK 2025-06-30T09:25:36.452446Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:36.452449Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:36.470986Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:36.471011Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:36.518997Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:36.519017Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:36.543067Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:36.543082Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:36.543327Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:36.543337Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:36.575225Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:36.575241Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:36.575811Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:36.575817Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:36.575936Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:36.575940Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:36.576471Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:36.576477Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:36.576574Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:36.576576Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:36.576641Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:36.576643Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:36.576698Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:36.576700Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:36.576757Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:36.576759Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:36.576813Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:36.576814Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:36.576871Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:36.576873Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:36.576929Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:36.576931Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/queries": >> TYdbControlPlaneStoragePipeline::ShouldCheckResultsTTL [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckDisableCurrentIamGetTask >> KqpStats::DeferredEffects-UseSink [GOOD] >> KqpParams::Decimal+QueryService+UseSink [GOOD] >> KqpNewEngine::ContainerRegistryCombiner ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpParams::CheckCacheWithRecompilationQuery [GOOD] Test command err: Trying to start YDB, gRPC: 14904, MsgBus: 61144 2025-06-30T09:25:31.911939Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671135892833465:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:31.918909Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004228/r3tmp/tmpsdyhbP/pdisk_1.dat 2025-06-30T09:25:32.088059Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14904, node 1 2025-06-30T09:25:32.119179Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:32.119193Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:32.119196Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:32.119243Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61144 TClient is connected to server localhost:61144 2025-06-30T09:25:32.227110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:32.227141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:25:32.233021Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:32.267821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:32.271673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:25:32.288601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.355857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:32.436131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:32.498386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:32.595477Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671140187802298:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:32.595510Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:32.661279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.683094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.704573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.728029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.792339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.814688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.837685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:32.905809Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671140187802960:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:32.905843Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:32.905962Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671140187802965:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:32.907156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:32.912948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:32.913189Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671140187802967:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:32.923115Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:32.991160Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671140187803027:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 62568, MsgBus: 61506 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004228/r3tmp/tmpI6IuKf/pdisk_1.dat 2025-06-30T09:25:33.704041Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:25:33.704400Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:33.707356Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521671142427716374:2080] 1751275533671696 != 1 ... f500] received request Name# ListDatabases ok# false data# peer# 2025-06-30T09:25:40.175266Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdb4a00] received request Name# RemoveDatabase ok# false data# peer# 2025-06-30T09:25:40.175276Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b9262000] received request Name# DescribeDatabaseOptions ok# false data# peer# 2025-06-30T09:25:40.175312Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bd8c8000] received request Name# GetScaleRecommendation ok# false data# peer# 2025-06-30T09:25:40.175316Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bd965c00] received request Name# ListEndpoints ok# false data# peer# 2025-06-30T09:25:40.175343Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdc8700] received request Name# WhoAmI ok# false data# peer# 2025-06-30T09:25:40.175358Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bb765c00] received request Name# NodeRegistration ok# false data# peer# 2025-06-30T09:25:40.175381Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bc2e0300] received request Name# Scan ok# false data# peer# 2025-06-30T09:25:40.175404Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bd9bf500] received request Name# GetShardLocations ok# false data# peer# 2025-06-30T09:25:40.175425Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bfdfdc00] received request Name# DescribeTable ok# false data# peer# 2025-06-30T09:25:40.175450Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113ba033100] received request Name# CreateSnapshot ok# false data# peer# 2025-06-30T09:25:40.175467Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdbca00] received request Name# RefreshSnapshot ok# false data# peer# 2025-06-30T09:25:40.175494Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b9982a00] received request Name# DiscardSnapshot ok# false data# peer# 2025-06-30T09:25:40.175505Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcd4ee00] received request Name# List ok# false data# peer# 2025-06-30T09:25:40.175539Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bb764700] received request Name# RateLimiter/CreateResource ok# false data# peer# 2025-06-30T09:25:40.175547Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bab26700] received request Name# RateLimiter/AlterResource ok# false data# peer# 2025-06-30T09:25:40.175581Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bfdfc000] received request Name# RateLimiter/DropResource ok# false data# peer# 2025-06-30T09:25:40.175590Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b913b800] received request Name# RateLimiter/ListResources ok# false data# peer# 2025-06-30T09:25:40.175637Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b9138000] received request Name# RateLimiter/DescribeResource ok# false data# peer# 2025-06-30T09:25:40.175675Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113beec8a00] received request Name# RateLimiter/AcquireResource ok# false data# peer# 2025-06-30T09:25:40.175714Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bc2dee00] received request Name# CreateStream ok# false data# peer# 2025-06-30T09:25:40.175762Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdca300] received request Name# ListStreams ok# false data# peer# 2025-06-30T09:25:40.175798Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdef500] received request Name# DeleteStream ok# false data# peer# 2025-06-30T09:25:40.175836Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113baafae00] received request Name# DescribeStream ok# false data# peer# 2025-06-30T09:25:40.175869Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113ba199100] received request Name# ListShards ok# false data# peer# 2025-06-30T09:25:40.175904Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113ba196000] received request Name# SetWriteQuota ok# false data# peer# 2025-06-30T09:25:40.175946Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bb63a000] received request Name# UpdateStream ok# false data# peer# 2025-06-30T09:25:40.175949Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bb764e00] received request Name# PutRecord ok# false data# peer# 2025-06-30T09:25:40.175989Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bd9be000] received request Name# PutRecords ok# false data# peer# 2025-06-30T09:25:40.175991Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113babc5500] received request Name# GetRecords ok# false data# peer# 2025-06-30T09:25:40.176034Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdfd500] received request Name# GetShardIterator ok# false data# peer# 2025-06-30T09:25:40.176038Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdc5100] received request Name# SubscribeToShard ok# false data# peer# 2025-06-30T09:25:40.176074Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113babc4e00] received request Name# DescribeLimits ok# false data# peer# 2025-06-30T09:25:40.176075Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113baafa700] received request Name# DescribeStreamSummary ok# false data# peer# 2025-06-30T09:25:40.176118Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bc230000] received request Name# DecreaseStreamRetentionPeriod ok# false data# peer# 2025-06-30T09:25:40.176121Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bb767100] received request Name# IncreaseStreamRetentionPeriod ok# false data# peer# 2025-06-30T09:25:40.176159Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b91aea00] received request Name# UpdateShardCount ok# false data# peer# 2025-06-30T09:25:40.176167Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcc8d100] received request Name# UpdateStreamMode ok# false data# peer# 2025-06-30T09:25:40.176201Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b8f7ce00] received request Name# RegisterStreamConsumer ok# false data# peer# 2025-06-30T09:25:40.176216Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b9825500] received request Name# DeregisterStreamConsumer ok# false data# peer# 2025-06-30T09:25:40.176240Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b92b1c00] received request Name# DescribeStreamConsumer ok# false data# peer# 2025-06-30T09:25:40.176267Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113babc4700] received request Name# ListStreamConsumers ok# false data# peer# 2025-06-30T09:25:40.176278Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bab26e00] received request Name# AddTagsToStream ok# false data# peer# 2025-06-30T09:25:40.176311Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113baa38a00] received request Name# DisableEnhancedMonitoring ok# false data# peer# 2025-06-30T09:25:40.176323Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113baafc300] received request Name# EnableEnhancedMonitoring ok# false data# peer# 2025-06-30T09:25:40.176352Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113baa36700] received request Name# ListTagsForStream ok# false data# peer# 2025-06-30T09:25:40.176360Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bc2b5c00] received request Name# MergeShards ok# false data# peer# 2025-06-30T09:25:40.176393Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113baa39800] received request Name# RemoveTagsFromStream ok# false data# peer# 2025-06-30T09:25:40.176402Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bb767800] received request Name# SplitShard ok# false data# peer# 2025-06-30T09:25:40.176442Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcd4e000] received request Name# StartStreamEncryption ok# false data# peer# 2025-06-30T09:25:40.176444Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bc21ce00] received request Name# StopStreamEncryption ok# false data# peer# 2025-06-30T09:25:40.176485Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bde44a00] received request Name# SelfCheck ok# false data# peer# 2025-06-30T09:25:40.176486Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdcb100] received request Name# NodeCheck ok# false data# peer# 2025-06-30T09:25:40.176527Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bb63d100] received request Name# CreateSession ok# false data# peer# 2025-06-30T09:25:40.176535Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bc22b800] received request Name# DeleteSession ok# false data# peer# 2025-06-30T09:25:40.176570Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113ba030000] received request Name# AttachSession ok# false data# peer# 2025-06-30T09:25:40.176585Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b91af100] received request Name# BeginTransaction ok# false data# peer# 2025-06-30T09:25:40.176607Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113baa36000] received request Name# CommitTransaction ok# false data# peer# 2025-06-30T09:25:40.176634Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bfc0f800] received request Name# RollbackTransaction ok# false data# peer# 2025-06-30T09:25:40.176653Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bfc0ea00] received request Name# ExecuteQuery ok# false data# peer# 2025-06-30T09:25:40.176679Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdd8e00] received request Name# ExecuteScript ok# false data# peer# 2025-06-30T09:25:40.176692Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b92b0000] received request Name# FetchScriptResults ok# false data# peer# 2025-06-30T09:25:40.176725Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b9980700] received request Name# ExecuteTabletMiniKQL ok# false data# peer# 2025-06-30T09:25:40.176747Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdee000] received request Name# ChangeTabletSchema ok# false data# peer# 2025-06-30T09:25:40.176766Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bd9c1800] received request Name# RestartTablet ok# false data# peer# 2025-06-30T09:25:40.176788Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113baa37c00] received request Name# CreateLogStore ok# false data# peer# 2025-06-30T09:25:40.176811Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdabc00] received request Name# DescribeLogStore ok# false data# peer# 2025-06-30T09:25:40.176828Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bc278300] received request Name# DropLogStore ok# false data# peer# 2025-06-30T09:25:40.176854Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113ba197500] received request Name# AlterLogStore ok# false data# peer# 2025-06-30T09:25:40.176864Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b99a4e00] received request Name# CreateLogTable ok# false data# peer# 2025-06-30T09:25:40.176900Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113b825dc00] received request Name# DescribeLogTable ok# false data# peer# 2025-06-30T09:25:40.176912Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113beec6000] received request Name# DropLogTable ok# false data# peer# 2025-06-30T09:25:40.176934Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdd8700] received request Name# AlterLogTable ok# false data# peer# 2025-06-30T09:25:40.176956Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bdff9500] received request Name# Login ok# false data# peer# 2025-06-30T09:25:40.176976Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdc4300] received request Name# DescribeReplication ok# false data# peer# 2025-06-30T09:25:40.177006Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x5113bcdaa700] received request Name# DescribeView ok# false data# peer# >> KqpExplain::MergeConnection [GOOD] >> KqpExplain::IdxFullscan >> KqpStats::DataQueryWithEffects+UseSink [GOOD] >> KqpStats::DataQueryWithEffects-UseSink >> KqpLimits::DatashardReplySize [GOOD] >> KqpLimits::DataShardReplySizeExceeded >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex-UseSink-UseDataQuery |84.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |84.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |84.2%| [LD] {RESULT} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpExplain::SsaProgramInJsonPlan [GOOD] Test command err: Trying to start YDB, gRPC: 7413, MsgBus: 62378 2025-06-30T09:25:35.266426Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671152802810640:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:35.266586Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004212/r3tmp/tmpcvp33R/pdisk_1.dat 2025-06-30T09:25:35.327790Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:35.327901Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671152802810616:2080] 1751275535266195 != 1751275535266198 TServer::EnableGrpc on GrpcPort 7413, node 1 2025-06-30T09:25:35.347573Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:35.347585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:35.347586Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:35.347620Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:35.369026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:35.369072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:62378 2025-06-30T09:25:35.369905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62378 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:35.423964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:25:35.431885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.498688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:35.528227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:35.541570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:35.746256Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671152802812215:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.746288Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.806378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.837800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.853992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.923370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.937853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.955600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.975510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:36.000015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671152802812870:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.000067Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.000078Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671152802812875:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.001068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:36.004379Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671152802812877:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:36.075406Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671157097780224:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:36.268166Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"SortBy":"row.Text","Name":"Sort"},{"Scan":"Parallel","ReadRange":["Key [150, 266]"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"1","Table":"EightShard","ReadColumns":["Data","Key","Text"],"E-Cost":"0"}],"Node Type":"Sort-TableRangeScan"}],"Node Type":"Merge","SortColumns":["Text (Asc)"],"PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","reads":[{"columns":["Data","Key","Text"],"scan_by":["Key [150, 266]"],"type" ... : tablet_id=72075186224037977;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.369540Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.369733Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037973;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.369947Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037977;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.370107Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037983;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.370832Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037973;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.370989Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037978;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.371111Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037983;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.371287Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037971;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.372028Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037978;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.372159Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.372295Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037971;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.372455Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037965;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.373122Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.373274Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037980;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.373454Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037965;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.373671Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037979;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.374301Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037980;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.374421Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037953;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.374983Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037979;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.375130Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037955;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.375591Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037953;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.375722Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.376210Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037955;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.376348Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.377449Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.377581Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.377669Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.378096Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.378687Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.378923Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037961;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.379189Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.379351Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037985;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.379903Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037961;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.380092Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.380315Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037985;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.380440Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037957;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:25:40.381370Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:25:40.381734Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037957;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["OlapTable"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"E-Rows":"0","Predicate":"Value \u003E 0","Pushdown":"True","Name":"Filter","E-Size":"0","E-Cost":"0"},{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/OlapTable","E-Rows":"0","Table":"OlapTable","ReadColumns":["Key","Value"],"SsaProgram":{"Command":[{"Assign":{"Constant":{"Int32":0},"Column":{"Id":3}}},{"Assign":{"Function":{"YqlOperationId":15,"KernelIdx":0,"FunctionType":2,"Arguments":[{"Id":2},{"Id":3}]},"Column":{"Id":4}}},{"Assign":{"Constant":{"Uint8":0},"Column":{"Id":5}}},{"Assign":{"Function":{"YqlOperationId":17,"KernelIdx":1,"FunctionType":2,"Arguments":[{"Id":4},{"Id":5}]},"Column":{"Id":6}}},{"Filter":{"Predicate":{"Id":6}}},{"Projection":{"Columns":[{"Id":1},{"Id":2}]}}]},"E-Cost":"0"}],"Node Type":"Filter-TableFullScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/OlapTable","reads":[{"columns":["Key","Value"],"scan_by":["Key (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/OlapTable","E-Rows":"0","Table":"OlapTable","ReadColumns":["Key","Value"],"SsaProgram":{"Command":[{"Assign":{"Constant":{"Int32":0},"Column":{"Id":3}}},{"Assign":{"Function":{"YqlOperationId":15,"KernelIdx":0,"FunctionType":2,"Arguments":[{"Id":2},{"Id":3}]},"Column":{"Id":4}}},{"Assign":{"Constant":{"Uint8":0},"Column":{"Id":5}}},{"Assign":{"Function":{"YqlOperationId":17,"KernelIdx":1,"FunctionType":2,"Arguments":[{"Id":4},{"Id":5}]},"Column":{"Id":6}}},{"Filter":{"Predicate":{"Id":6}}},{"Projection":{"Columns":[{"Id":1},{"Id":2}]}}]},"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Operators":[{"E-Rows":"0","Predicate":"Value \u003E 0","Pushdown":"True","Name":"Filter","E-Size":"0","E-Cost":"0"}],"Node Type":"Filter"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpStats::DeferredEffects-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 27929, MsgBus: 8780 2025-06-30T09:25:33.404208Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671142436985632:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:33.404232Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004220/r3tmp/tmpVJRx8t/pdisk_1.dat 2025-06-30T09:25:33.453933Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671142436985612:2080] 1751275533404051 != 1751275533404054 2025-06-30T09:25:33.454166Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27929, node 1 2025-06-30T09:25:33.469467Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:33.469482Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:33.469485Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:33.469541Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8780 TClient is connected to server localhost:8780 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:25:33.538425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:33.538468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:33.539571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:33.553820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:33.564626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:33.576248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:33.664971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:33.694716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:33.731724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:33.980931Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671142436987212:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:33.980975Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.048916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.070893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.084338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.094574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.111091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.122409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.139349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.160716Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671146731955161:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.160751Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.160878Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671146731955166:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.161986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:34.165900Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671146731955168:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:34.243719Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671146731955219:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:34.406689Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 32273, MsgBus: 7183 2025-06-30T09:25:35.010079Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671152333565685:2090];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:35.011892Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004220/r3tmp/tmpEdwhsq/pdisk_1.dat 2025-06-30T09:25:35.041874Z node 2 :IMPORT WARN: schemeshard_import. ... ed retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:38.023881Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671163238350428:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:38.312562Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:39.045082Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275538411, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 31257, MsgBus: 9836 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004220/r3tmp/tmpJ79dJe/pdisk_1.dat 2025-06-30T09:25:39.503067Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:25:39.514367Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31257, node 4 2025-06-30T09:25:39.549285Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:39.549310Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:39.549313Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:39.549375Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9836 2025-06-30T09:25:39.599807Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:39.599855Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:39.601084Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9836 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:39.650789Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:39.656904Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:39.676307Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:39.705945Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:39.725585Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:39.976288Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671168794707201:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.976326Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.992745Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.014461Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.027551Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.042122Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.061818Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.074961Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.090629Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.118961Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671173089675151:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.118995Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.119386Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671173089675156:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.120546Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:40.126093Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671173089675158:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:40.179369Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671173089675209:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:40.499971Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup;
: Warning: Type annotation, code: 1030
:3:46: Warning: At lambda, At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpParams::Decimal+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 23637, MsgBus: 5431 2025-06-30T09:25:34.424257Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671146756734695:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:34.424289Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004215/r3tmp/tmp5SK1tJ/pdisk_1.dat 2025-06-30T09:25:34.487309Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23637, node 1 2025-06-30T09:25:34.500502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:34.500529Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:34.500532Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:34.500580Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5431 2025-06-30T09:25:34.531091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:34.531131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:34.532071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5431 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:34.580905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:34.584214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:34.585676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.608890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.638763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.652114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.896977Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671146756736257:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.897005Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.958545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.971369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.984971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.998649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.011189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.026796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.084192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.102338Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671151051704212:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.102361Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.102536Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671151051704217:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.103361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:35.108948Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671151051704219:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:35.191866Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671151051704270:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:35.427199Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 20523, MsgBus: 17516 2025-06-30T09:25:35.751203Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671149563148452:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:35.752073Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004215/r3tmp/tmpcc6c8E/pdisk_1.dat 2025-06-30T09:25:35.767175Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:35.767490Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521671149563148339:20 ... first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:39.563609Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.576793Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:39.599926Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:39.614353Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:39.910886Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671169078729177:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.910968Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.922593Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.936919Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.950605Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.975353Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.990129Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.007219Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.027405Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.056101Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671173373697128:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.056132Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.056314Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671173373697133:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.057462Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:40.061293Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:40.061374Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671173373697135:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:40.118080Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671173373697186:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:40.273510Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.393197Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:40.478130Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7521671173373697664:2518], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:4:17: Error: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At tuple, At function: SqlProjectItem, At lambda
:3:25: Error: At function: Parameter, At function: DataType
:3:25: Error: Invalid decimal precision: 99 2025-06-30T09:25:40.478276Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=4&id=NmY4ZWE3YTktOTdiOTNhYjktYmQ5ZDVjZjctMTZlNzBmMmY=, ActorId: [4:7521671173373697662:2517], ActorState: ExecuteState, TraceId: 01jz02g3zv0cn6xpxchjtjcctn, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:25:40.498229Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ODY1NjU4Mi1mZjU1YzdiYS1hNDJiMTMxMC0xMDZlZDY3NQ==, ActorId: [4:7521671173373697668:2520], ActorState: ExecuteState, TraceId: 01jz02g3zz42ej23jrp9dgqw36, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1266: ydb/core/kqp/query_data/kqp_query_data.cpp:271: Parameter $value22 type mismatch, expected: { Kind: Data Data { Scheme: 4865 DecimalParams { Precision: 22 Scale: 9 } } }, actual: Type (Data), schemeType: Decimal(35,10), schemeTypeId: 4865 2025-06-30T09:25:40.519402Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7521671173373697683:2526], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:7:29: Error: At function: KiWriteTable!
:7:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:4:25: Error: Implicit decimal cast would lose precision
:7:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:7:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-30T09:25:40.520347Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=4&id=ZTZmZjE0NDItNDllMTU0MjAtNWY3MzI1NjMtZjY3MGVlNGY=, ActorId: [4:7521671173373697681:2525], ActorState: ExecuteState, TraceId: 01jz02g40p07bsmf2h5fnjsxtb, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:25:40.526566Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7521671173373697694:2531], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:29: Error: At function: KiWriteTable!
:3:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:0:14: Error: Implicit decimal cast would lose precision
:3:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:3:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-30T09:25:40.526893Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=4&id=Njk0YmNlYjAtY2E5MTkzMzQtYTI4ZjA3YS0yNWM2MGY5Mg==, ActorId: [4:7521671173373697692:2530], ActorState: ExecuteState, TraceId: 01jz02g419fzfm94gswmma9csh, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,0,1000,0,CATEGORY_BLOOM_FILTER] >> KqpStats::MultiTxStatsFullYql [GOOD] >> KqpStats::MultiTxStatsFullScan >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1024,0,0,0] >> KqpQuery::CurrentUtcTimestamp [GOOD] >> KqpQuery::DdlInDataQuery >> LocalPartitionReader::FeedSlowly >> LocalPartitionReader::FeedSlowly [GOOD] >> SelfHeal::TestOneFaultyMaintenanceRequestOneMaintenanceRequestMirror3dc [GOOD] >> SelfHeal::TestOneFaultyMaintenanceRequestOneMaintenanceRequest4Plus2Block >> KqpLimits::QueryReplySize [GOOD] >> KqpLimits::ReadsetCountLimit >> TYdbControlPlaneStoragePipeline::ShouldCheckDisableCurrentIamGetTask [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldReturnPartialBatchForGetTask >> KqpNewEngine::ContainerRegistryCombiner [GOOD] >> KqpNewEngine::DeferredEffects >> KqpLimits::QSReplySize+useSink [GOOD] >> KqpLimits::QSReplySize-useSink >> KqpStats::DataQueryWithEffects-UseSink [GOOD] >> KqpStats::DataQueryMulti |84.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::FeedSlowly [GOOD] |84.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> KqpExplain::IdxFullscan [GOOD] >> KqpExplain::MultiJoinCteLinks ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageModifyBinding::ShouldCheckObjectStorageProjectionByTypes [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? D 08:27 0:54 [kworker/u128:0+ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... ckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:38.666112Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:38.667930Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/mappings". Create session OK 2025-06-30T09:25:38.667948Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:38.667952Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:38.668237Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-30T09:25:38.668239Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:38.668241Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:38.668554Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/quotas". Create session OK 2025-06-30T09:25:38.668560Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:38.668562Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:38.701520Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:38.701541Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:38.750075Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:38.750093Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:38.759399Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:38.759421Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:38.760853Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:38.760868Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:38.783523Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:38.783542Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:38.783911Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:38.783923Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:38.784018Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:38.784020Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:38.784063Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:38.784064Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:38.784103Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:38.784104Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:38.784155Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:38.784159Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:38.784203Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:38.784204Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:38.784246Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:38.784248Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:38.784279Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:38.784281Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:38.784331Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:38.784332Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:38.784364Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:38.784365Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:39.559525Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage_bindings.cpp:81: [yandexcloud://test_folder_id_1, test_user@staff, utbue775hov38uqsh12j] CreateBindingRequest, validation failed: **** (D7BA8005) content { name: "test_binding_name_1" connection_id: "utcue775hpjqui57ivjm" setting { object_storage { subset { path_pattern: "/root/" schema { column { name: "a" type { type_id: BOOL } } } partitioned_by: "a" } } } acl { visibility: PRIVATE } } error:
: Error: Column "a" from projection does not support Bool type, code: 400010 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPreviousRevisionFailed [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... StorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:38.648855Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:38.649039Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-30T09:25:38.649048Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:38.649048Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings". Create session OK 2025-06-30T09:25:38.649050Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:38.649054Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:38.649056Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:38.649167Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys". Create session OK 2025-06-30T09:25:38.649176Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:38.649178Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:38.692185Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:38.692209Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:38.743934Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:38.743951Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:38.764982Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:38.765001Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:38.765557Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:38.765563Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:38.765699Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:38.765702Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:38.765820Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:38.765823Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:38.766479Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:38.766487Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:38.766625Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:38.766627Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:38.767023Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:38.767027Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:38.767096Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:38.767100Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:38.767102Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:38.767103Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:38.767144Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:38.767145Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:38.767179Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:38.767180Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:38.767222Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:38.767224Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:38.769297Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:38.769308Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:39.650272Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:644: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:185: Revision of the connection has been changed already. Please restart the request with a new revision 2025-06-30T09:25:39.650701Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user@staff, utcue775hpkkod53oaje] ModifyConnectionRequest: {connection_id: "utcue775hpkkod53oaje" content { name: "test_connection_name_2" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } previous_revision: 10 } ERROR: {
: Error: Revision of the connection has been changed already. Please restart the request with a new revision, code: 1003 } >> KqpStats::MultiTxStatsFullScan [GOOD] >> KqpStats::OneShardLocalExec+UseSink >> KqpRanges::NullInKey >> KqpQuery::DdlInDataQuery [GOOD] >> KqpQuery::CreateAsSelect_BadCases >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1024,0,0,0] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1024,0,0,0.5] >> KqpNotNullColumns::UpdateNotNullPk >> KqpLimits::DataShardReplySizeExceeded [GOOD] >> KqpQuery::QueryClientTimeout [GOOD] >> KqpQuery::QueryClientTimeoutPrecompiled >> KqpLimits::OutOfSpaceBulkUpsertFail [GOOD] >> KqpLimits::OutOfSpaceYQLUpsertFail+useSink >> KqpStats::DataQueryMulti [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByName >> KqpLimits::ReadsetCountLimit [GOOD] >> KqpLimits::QueryExecTimeoutCancel >> KqpNewEngine::DeferredEffects [GOOD] >> KqpNewEngine::Delete+UseSink >> KqpSort::ReverseFirstKeyOptimized ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpLimits::DataShardReplySizeExceeded [GOOD] Test command err: Trying to start YDB, gRPC: 31664, MsgBus: 29980 2025-06-30T09:25:33.621475Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671143085513182:2164];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:33.622270Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00421f/r3tmp/tmpbHIaaG/pdisk_1.dat 2025-06-30T09:25:33.728066Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31664, node 1 2025-06-30T09:25:33.732468Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671143085513056:2080] 1751275533619917 != 1751275533619920 2025-06-30T09:25:33.748278Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:33.748293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:33.748297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:33.748352Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29980 2025-06-30T09:25:33.792799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:33.792829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:33.794970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29980 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:33.840727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:33.847496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.543392Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671147380481320:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.543426Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.543820Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671147380481332:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.546630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:34.550073Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671147380481334:2319], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:34.603728Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671147380481385:2555] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:34.622279Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 3869, MsgBus: 4851 2025-06-30T09:25:35.861146Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671152134715621:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:35.861186Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00421f/r3tmp/tmp3CuFEz/pdisk_1.dat 2025-06-30T09:25:35.883659Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3869, node 2 2025-06-30T09:25:35.897699Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:35.897714Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:35.897717Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:35.897774Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4851 TClient is connected to server localhost:4851 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:35.963438Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:35.963473Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:35.963886Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:35.964875Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:25:35.971681Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:36.451266Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671156429683850:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.451317Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.451448Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671156429683862:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.452535Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:36.459226Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521671156429683864:2319], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:36.527750Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521671156429683915:2549] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/ ... te QueryResponse for error on request, msg:
: Error: Query result size limit exceeded. (200003994 > 50331648), code: 2013 Trying to start YDB, gRPC: 16872, MsgBus: 12307 2025-06-30T09:25:41.076460Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671177190359093:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:41.076495Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00421f/r3tmp/tmpQpErjb/pdisk_1.dat 2025-06-30T09:25:41.095319Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16872, node 4 2025-06-30T09:25:41.115282Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:41.115296Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:41.115314Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:41.115365Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12307 TClient is connected to server localhost:12307 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:25:41.182228Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:41.182263Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:41.183417Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:41.185398Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:41.188646Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:41.197348Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:41.209583Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:41.247951Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.269619Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:41.503175Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671177190360662:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.503211Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.519646Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.533805Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.600230Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.616132Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.627968Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.643727Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.659804Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.688005Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671177190361319:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.688048Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.688200Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671177190361324:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.689271Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:41.691927Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671177190361326:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:41.743227Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671177190361377:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:41.927551Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.078531Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:42.688115Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=YWM2NWJiY2ItNTNmYjRmNzgtMmZlOTFkZWMtZjdjYzkzY2Y=, ActorId: [4:7521671177190361639:2469], ActorState: ExecuteState, TraceId: 01jz02g61qetxqf9vt1a7qjdze, Create QueryResponse for error on request, msg: ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpStats::DataQueryMulti [GOOD] Test command err: Trying to start YDB, gRPC: 10457, MsgBus: 6654 2025-06-30T09:25:38.570319Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671165438229306:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:38.570507Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00420c/r3tmp/tmppDGh0s/pdisk_1.dat 2025-06-30T09:25:38.641442Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:38.641545Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671165438229279:2080] 1751275538570081 != 1751275538570084 TServer::EnableGrpc on GrpcPort 10457, node 1 2025-06-30T09:25:38.659236Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:38.659251Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:38.659264Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:38.659311Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6654 TClient is connected to server localhost:6654 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:25:38.719958Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:38.719988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:38.721444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:38.732026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:25:38.739655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.807176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:38.841429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.913802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:39.011548Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671169733198178:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.011583Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.058628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.074010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.085339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.098805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.112939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.129499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.141187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.159540Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671169733198830:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.159572Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.159602Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671169733198835:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.160586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:39.168319Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671169733198837:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:39.257734Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671169733198888:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:39.572766Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup;
: Warning: Type annotation, code: 1030
:3:46: Warning: At lambda, At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 Trying to start YDB, gRPC: 15503, MsgBus: 29693 2025-06-30T09:25:39.928372Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671169045415969:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:39.928390Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPat ... Id: [3:7521671177753429898:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:41.822950Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671177753429949:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 29802, MsgBus: 4818 2025-06-30T09:25:42.298489Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671182231273627:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:42.298538Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00420c/r3tmp/tmpxul8Zo/pdisk_1.dat 2025-06-30T09:25:42.320838Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:42.321108Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671182231273604:2080] 1751275542298330 != 1751275542298333 TServer::EnableGrpc on GrpcPort 29802, node 4 2025-06-30T09:25:42.335100Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:42.335114Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:42.335118Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:42.335176Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4818 2025-06-30T09:25:42.407376Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:42.407402Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:42.407673Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4818 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:42.435136Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:42.442238Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:25:42.452516Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.472770Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:42.503601Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:42.520750Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.809809Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671182231275217:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.809848Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.819943Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.833517Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.847233Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.863475Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.873718Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.893735Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.910849Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.931289Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671182231275873:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.931324Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.931507Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671182231275878:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.932627Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:42.937995Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:42.938107Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671182231275880:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:43.019541Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671186526243227:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[with_queues-tables_format_v0] >> KqpExplain::MultiJoinCteLinks [GOOD] >> KqpNamedExpressions::NamedExpressionChanged+UseSink >> KqpNotNullColumns::UpdateNotNullPk [GOOD] >> KqpNotNullColumns::UpdateNotNullPkPg >> AnalyzeColumnshard::AnalyzeRebootSaInAggregate [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1024,0,0,0.5] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1024,0,100,0] >> KqpStats::OneShardLocalExec+UseSink [GOOD] >> KqpStats::OneShardLocalExec-UseSink >> test_format_without_version.py::TestQueueWithoutVersionWithTenant::test_common[std] >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,0,1000,0,CATEGORY_BLOOM_FILTER] [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,0,1000,0.5,CATEGORY_BLOOM_FILTER] >> KqpExtractPredicateLookup::OverflowLookup >> KqpKv::ReadRows_SpecificKey |84.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> KqpRanges::NullInKey [GOOD] >> KqpRanges::NullInKeySuffix ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpExplain::MultiJoinCteLinks [GOOD] Test command err: Trying to start YDB, gRPC: 10548, MsgBus: 19071 2025-06-30T09:25:37.074959Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671158026554482:2245];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00420e/r3tmp/tmpKJXvqH/pdisk_1.dat 2025-06-30T09:25:37.118502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:37.145160Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671158026554249:2080] 1751275537057295 != 1751275537057298 2025-06-30T09:25:37.145607Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10548, node 1 2025-06-30T09:25:37.180533Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:37.180549Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:37.180553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:37.180615Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19071 2025-06-30T09:25:37.222334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:37.222371Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:37.223262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19071 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:37.266815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:37.281442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:37.318607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:37.388247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:37.403576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:37.605507Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671158026555865:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.605540Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.660122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.668840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.681830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.692802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.707031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.725860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.741403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:37.766408Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671158026556515:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.766442Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.766583Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671158026556520:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:37.767745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:37.772257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:37.772338Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671158026556522:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:37.864922Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671158026556573:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:38.074624Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; {"Plan":{"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"TopSort","Limit":"SUM(10,15)","TopSortBy":"row.Text"},{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"0","Table":"EightShard","ReadColumns":["Data","Key","Text"],"E-Cost":"0"}],"Node Type":"TopSort-TableFullScan"}],"Node Type":"Merge","SortColumns":["Text (Asc)"],"PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId ... on.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:42.645351Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:25:42.649491Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:42.653365Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:42.688088Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:42.712412Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:42.730663Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:43.098844Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671184449203579:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.098914Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.105213Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.118612Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.132596Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.148780Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.163482Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.182127Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.249421Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.269363Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671184449204232:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.269459Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.269652Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671184449204237:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.270798Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:43.275545Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:43.275704Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7521671184449204242:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:43.335607Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521671184449204293:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:43.551158Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; {"Plan":{"Plans":[{"PlanNodeId":12,"Plans":[{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"E-Size":"0","PlanNodeId":8,"LookupKeyColumns":["Key"],"Node Type":"TableLookup","Path":"\/Root\/EightShard","Columns":["Data","Key","Text"],"E-Rows":"0","Table":"EightShard","Plans":[{"PlanNodeId":7,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Iterator":"PartitionByKey","Name":"Iterator"},{"Inputs":[],"Name":"PartitionByKey","Input":"precompute_0_0"}],"Node Type":"ConstantExpr-Aggregate","CTE Name":"precompute_0_0"}],"PlanNodeType":"Connection","E-Cost":"0"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Limit","Limit":"1001"},{"Inputs":[{"InternalOperatorId":3},{"InternalOperatorId":2}],"E-Rows":"0","Condition":"es.Key = kv.Key","Name":"InnerJoin (MapJoin)","E-Size":"0","E-Cost":"0"},{"Inputs":[],"ToFlow":"precompute_0_0","Name":"ToFlow"},{"Inputs":[{"ExternalPlanNodeId":8}],"E-Rows":"0","Predicate":"Exist(item.Key)","Name":"Filter","E-Size":"0","E-Cost":"0"}],"Node Type":"Limit-InnerJoin (MapJoin)-ConstantExpr-Filter","CTE Name":"precompute_0_0"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":10}],"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"},{"PlanNodeId":5,"Subplan Name":"CTE precompute_0_0","Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["KeyValue"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/KeyValue","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"KeyValue","ReadColumns":["Key","Value"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Collect"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"Precompute_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","reads":[{"lookup_by":["Key"],"columns":["Data","Key","Text"],"type":"Lookup"}]},{"name":"\/Root\/KeyValue","reads":[{"columns":["Key","Value"],"scan_by":["Key (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":7,"Operators":[{"E-Rows":"0","Columns":["Data","Key","Text"],"E-Size":"0","E-Cost":"0","Name":"TableLookup","Table":"EightShard","LookupKeyColumns":["Key"]}],"Node Type":"TableLookup","PlanNodeType":"Connection"}],"Operators":[{"E-Rows":"0","Predicate":"Exist(item.Key)","Name":"Filter","E-Size":"0","E-Cost":"0"}],"Node Type":"Filter"},{"PlanNodeId":13,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/KeyValue","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"KeyValue","ReadColumns":["Key","Value"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Operators":[{"E-Rows":"0","Condition":"es.Key = kv.Key","Name":"InnerJoin (MapJoin)","E-Size":"0","E-Cost":"0"}],"Node Type":"InnerJoin (MapJoin)"}],"Operators":[{"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Operators":[{"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} >> TYdbControlPlaneStoragePipeline::ShouldReturnPartialBatchForGetTask [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaInAggregate [GOOD] Test command err: 2025-06-30T09:23:17.779708Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:420:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:17.779757Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:17.779771Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045bf/r3tmp/tmpFzvXXg/pdisk_1.dat 2025-06-30T09:23:17.884277Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29623, node 1 2025-06-30T09:23:17.993256Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:17.993280Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:17.993286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:17.993381Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:23:17.994106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:18.095410Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:18.095455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:18.114825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25568 2025-06-30T09:23:18.498567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:23:19.613968Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-30T09:23:19.631110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:19.631156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:19.683655Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:23:19.691249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:19.910788Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:19.935577Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.935773Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.935959Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.935994Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.936064Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.936085Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.936101Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.936118Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.936138Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:20.095428Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:20.095492Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:20.107483Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:20.160168Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:20.175888Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-30T09:23:20.175937Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-30T09:23:20.187521Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-30T09:23:20.187995Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-30T09:23:20.188035Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-30T09:23:20.188040Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-30T09:23:20.188045Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-30T09:23:20.188051Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-30T09:23:20.188056Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-30T09:23:20.188063Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-30T09:23:20.189294Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-30T09:23:20.215599Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7960: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:23:20.215656Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7990: ConnectToSA(), pipe client id: [2:1799:2566], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:23:20.216516Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1804:2570] 2025-06-30T09:23:20.221408Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-30T09:23:20.222234Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1848:2595] 2025-06-30T09:23:20.224283Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1848:2595], schemeshard id = 72075186224037897 2025-06-30T09:23:20.234215Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-30T09:23:20.234244Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-30T09:23:20.234261Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-30T09:23:20.238157Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:20.241079Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-30T09:23:20.241123Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-30T09:23:20.335493Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-30T09:23:20.511607Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-30T09:23:20.591137Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-30T09:23:21.266233Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:21.321568Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2152:3024], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:21.321638Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:21.327984Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:23:21.419954Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2289:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:23:21.420036Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2289:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:23:21.420109Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2289:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:23:21.420137Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2289:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:23:21.420166Z node 2 :TX_COLUMNSHARD WARN: ... 5186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-30T09:25:41.988413Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-30T09:25:43.140473Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-30T09:25:43.140537Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-30T09:25:43.140545Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-30T09:25:43.140791Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-30T09:25:43.162095Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-30T09:25:43.162481Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-30T09:25:43.162508Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-30T09:25:43.162770Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-30T09:25:43.177609Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-30T09:25:43.177718Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-30T09:25:43.177945Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10471:7788], server id = [2:10476:7793], tablet id = 72075186224037899, status = OK 2025-06-30T09:25:43.177974Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10471:7788], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:43.178033Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10472:7789], server id = [2:10477:7794], tablet id = 72075186224037900, status = OK 2025-06-30T09:25:43.178041Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10472:7789], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:43.178266Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10473:7790], server id = [2:10478:7795], tablet id = 72075186224037901, status = OK 2025-06-30T09:25:43.178280Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10473:7790], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:43.178436Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10474:7791], server id = [2:10479:7796], tablet id = 72075186224037902, status = OK 2025-06-30T09:25:43.178446Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10474:7791], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:43.178668Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10475:7792], server id = [2:10481:7798], tablet id = 72075186224037903, status = OK 2025-06-30T09:25:43.178683Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10475:7792], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:43.178790Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-30T09:25:43.178936Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-30T09:25:43.178970Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10471:7788], server id = [2:10476:7793], tablet id = 72075186224037899 2025-06-30T09:25:43.178975Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:43.179049Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10472:7789], server id = [2:10477:7794], tablet id = 72075186224037900 2025-06-30T09:25:43.179057Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:43.179128Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-30T09:25:43.179194Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-30T09:25:43.179242Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10485:7802], server id = [2:10488:7805], tablet id = 72075186224037904, status = OK 2025-06-30T09:25:43.179254Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10485:7802], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:43.179390Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10473:7790], server id = [2:10478:7795], tablet id = 72075186224037901 2025-06-30T09:25:43.179396Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:43.179425Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037903 2025-06-30T09:25:43.179504Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10474:7791], server id = [2:10479:7796], tablet id = 72075186224037902 2025-06-30T09:25:43.179509Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:43.179536Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10487:7804], server id = [2:10490:7807], tablet id = 72075186224037905, status = OK 2025-06-30T09:25:43.179547Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10487:7804], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:43.179594Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10489:7806], server id = [2:10492:7809], tablet id = 72075186224037906, status = OK 2025-06-30T09:25:43.179599Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10489:7806], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:43.179669Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10475:7792], server id = [2:10481:7798], tablet id = 72075186224037903 2025-06-30T09:25:43.179672Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:43.179699Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10491:7808], server id = [2:10495:7812], tablet id = 72075186224037907, status = OK 2025-06-30T09:25:43.179703Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10491:7808], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:43.179752Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037904 2025-06-30T09:25:43.179844Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10494:7811], server id = [2:10496:7813], tablet id = 72075186224037908, status = OK 2025-06-30T09:25:43.179850Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10494:7811], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-30T09:25:43.179893Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10485:7802], server id = [2:10488:7805], tablet id = 72075186224037904 2025-06-30T09:25:43.179897Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:43.179993Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-30T09:25:43.180051Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037906 2025-06-30T09:25:43.180105Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10487:7804], server id = [2:10490:7807], tablet id = 72075186224037905 2025-06-30T09:25:43.180109Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:43.180144Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037907 2025-06-30T09:25:43.180184Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10489:7806], server id = [2:10492:7809], tablet id = 72075186224037906 2025-06-30T09:25:43.180188Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:43.180227Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037908 2025-06-30T09:25:43.180235Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-30T09:25:43.180281Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-30T09:25:43.180319Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-30T09:25:43.180749Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-30T09:25:43.181367Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10491:7808], server id = [2:10495:7812], tablet id = 72075186224037907 2025-06-30T09:25:43.181378Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:43.181511Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10494:7811], server id = [2:10496:7813], tablet id = 72075186224037908 2025-06-30T09:25:43.181516Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:25:43.181590Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-30T09:25:43.197826Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODUxNDMwMTUtZDJiZDllNC0zMmUwMmM2MS04OTgyOTM5Nw==, TxId: 2025-06-30T09:25:43.197860Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODUxNDMwMTUtZDJiZDllNC0zMmUwMmM2MS04OTgyOTM5Nw==, TxId: 2025-06-30T09:25:43.198058Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-30T09:25:43.212769Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-30T09:25:43.212802Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:5725:3815] >> KqpKv::ReadRows_UnknownTable >> KqpSort::ReverseFirstKeyOptimized [GOOD] >> KqpSort::ReverseLimitOptimized >> KqpNewEngine::Delete+UseSink [GOOD] >> KqpNewEngine::Delete-UseSink >> KqpKv::ReadRows_SpecificKey [GOOD] >> KqpKv::ReadRows_NonExistentKeys >> KqpNotNullColumns::UpdateNotNullPkPg [GOOD] >> KqpNotNullColumns::UpdateNotNull |84.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |84.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |84.2%| [LD] {RESULT} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1024,0,100,0] [GOOD] >> KqpNewEngine::PkRangeSelect1 >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1024,0,100,0.5] >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByName [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByMe |84.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |84.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |84.2%| [LD] {RESULT} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut >> KqpStats::OneShardLocalExec-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionChanged+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionChanged-UseSink >> KqpNewEngine::StreamLookupWithView >> KqpQuery::CreateAsSelect_BadCases [GOOD] >> KqpQuery::CreateAsSelectView >> KqpKv::ReadRows_NonExistentKeys [GOOD] >> KqpKv::ReadRows_NotFullPK >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,0,1000,1000000,0] >> KqpKv::ReadRows_UnknownTable [GOOD] >> KqpMergeCn::TopSortByDesc_Double_Limit3 >> KqpRanges::NullInKeySuffix [GOOD] >> KqpRanges::NullInPredicate ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpStats::OneShardLocalExec-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 1445, MsgBus: 4782 2025-06-30T09:25:40.209069Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671174804711995:2178];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:40.209250Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0041fb/r3tmp/tmpsQuuGc/pdisk_1.dat 2025-06-30T09:25:40.272807Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:40.272891Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671174804711843:2080] 1751275540206788 != 1751275540206791 TServer::EnableGrpc on GrpcPort 1445, node 1 2025-06-30T09:25:40.294592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:40.294609Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:40.294612Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:40.294673Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:40.310138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:40.310174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:40.311257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4782 TClient is connected to server localhost:4782 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:40.368108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:40.381349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:40.456360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:40.485149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.545708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:40.707047Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671174804713472:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.707090Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.756244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.764857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.778633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.792785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.807296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.822611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.835021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.852161Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671174804714124:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.852191Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671174804714129:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.852197Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.853086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:40.862168Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671174804714131:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:40.936318Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671174804714182:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:41.152486Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521671179099681788:2474] TxId: 281474976715673. Ctx: { TraceId: 01jz02g4kq2cxcdsggfbvc1w49, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTRlYzM2YWItZWEzZmJhOTctZGNiN2ZlNjMtNzgyZDdiNWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-30T09:25:41.163606Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275541197, txId: 281474976715672] shutting down 2025-06-30T09:25:41.210093Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 19821, MsgBus: 7956 2025-06-30T09:25:41.602993Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:752167117592481602 ... pleted, doublechecking } 2025-06-30T09:25:43.596431Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671185270219935:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:43.775760Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 27447, MsgBus: 7416 2025-06-30T09:25:44.175835Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671188587695786:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:44.175871Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0041fb/r3tmp/tmpAfHNKy/pdisk_1.dat 2025-06-30T09:25:44.212103Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671188587695766:2080] 1751275544175703 != 1751275544175706 2025-06-30T09:25:44.214048Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27447, node 4 2025-06-30T09:25:44.234073Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:44.234090Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:44.234097Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:44.234175Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7416 2025-06-30T09:25:44.286865Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:44.286911Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:44.287999Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7416 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:44.305159Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:44.308627Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:44.323151Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.351590Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.384557Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.400744Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.647278Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671188587697360:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.647315Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.655915Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.676830Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.698147Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.758730Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.773861Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.831752Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.841642Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.863515Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671188587698018:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.863534Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671188587698023:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.863542Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.864444Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:44.869327Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671188587698025:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:44.949270Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671188587698076:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:45.178081Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpNotNullColumns::UpdateNotNull [GOOD] >> KqpNotNullColumns::UpdateTable_DontChangeNotNull |84.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest |84.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |84.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |84.2%| [LD] {RESULT} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut >> KqpSort::ReverseLimitOptimized [GOOD] >> KqpSort::ReverseEightShardOptimized >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,0,1000,0.5,CATEGORY_BLOOM_FILTER] [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,0,1000,0.5,BLOOM_FILTER] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1024,0,100,0.5] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1,1000,100,0] >> KqpNewEngine::Delete-UseSink [GOOD] >> KqpNewEngine::DecimalColumn >> KqpKv::ReadRows_NotFullPK [GOOD] >> KqpNewEngine::PkRangeSelect1 [GOOD] >> KqpNewEngine::PkRangeSelect2 >> KqpKv::ReadRows_SpecificReturnValue >> KqpNamedExpressions::NamedExpressionChanged-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged+UseSink >> KqpQuery::QueryClientTimeoutPrecompiled [GOOD] >> KqpQuery::QueryExplain >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByMe [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCombineFilters >> KqpMergeCn::TopSortByDesc_Double_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Date_Limit4 >> KqpQuery::CreateAsSelectView [GOOD] >> KqpLimits::QSReplySize-useSink [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStoragePipeline::ShouldReturnPartialBatchForGetTask [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... GE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:42.483599Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/queries". Create session OK 2025-06-30T09:25:42.483607Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:42.483608Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:42.483616Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-30T09:25:42.483618Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:42.483620Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:42.483676Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/quotas". Create session OK 2025-06-30T09:25:42.483685Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:42.483686Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:42.483709Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/mappings". Create session OK 2025-06-30T09:25:42.483711Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:42.483712Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:42.501683Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:42.501704Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:25:42.529577Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:42.529601Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:42.542169Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:42.542193Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:42.542566Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:42.542577Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:42.542666Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:42.542669Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:42.542722Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:42.542725Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:42.542805Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:42.542808Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:42.543745Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:42.543750Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:42.543881Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:42.543886Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:42.543936Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:42.543939Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:42.544018Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:42.544020Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:42.544059Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:42.544061Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:42.544070Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:42.544073Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:42.544136Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:42.544138Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:42.544165Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:42.544167Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:43.706282Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: task_ping.cpp:427: PingTaskRequest (resign): UNAVAILABLE 1 2025-06-30T09:25:43.706229Z 0.000000s 2025-06-30T09:25:44.202929Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: task_ping.cpp:427: PingTaskRequest (resign): UNAVAILABLE 1 2025-06-30T09:25:44.202899Z 0.000000s 2025-06-30T09:25:44.527274Z node 17 :YQ_CONTROL_PLANE_STORAGE ERROR: ydb_control_plane_storage.cpp:608: Validation: (NYql::TCodeLineException) :0: Error parsing proto message for query. Please contact internal support >> KqpKv::ReadRows_SpecificReturnValue [GOOD] >> KqpKv::ReadRows_TimeoutCancelsReads >> KqpNotNullColumns::UpdateTable_DontChangeNotNull [GOOD] >> KqpNotNullColumns::UpdateNotNullPg >> KqpReturning::ReturningWorksIndexedUpsert+QueryService >> KqpSort::ReverseEightShardOptimized [GOOD] >> KqpSort::PassLimit >> KqpNewEngine::StreamLookupWithView [GOOD] >> KqpNewEngine::Truncated |84.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[without_queues-tables_format_v1] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpLimits::QSReplySize-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 9109, MsgBus: 6514 2025-06-30T09:25:34.936343Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671145280407274:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:34.936366Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004213/r3tmp/tmpDjX29x/pdisk_1.dat 2025-06-30T09:25:35.007842Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671145280407251:2080] 1751275534936181 != 1751275534936184 2025-06-30T09:25:35.010802Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9109, node 1 2025-06-30T09:25:35.022387Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:35.022404Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:35.022407Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:35.022456Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:35.041040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:35.041066Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:35.041777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6514 TClient is connected to server localhost:6514 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:35.116229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:35.119622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:35.127988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:35.201687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:35.237242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:35.258175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:35.392176Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671149575376171:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.392218Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.449939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.507685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.566053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.577709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.591824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.606335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.619752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:35.638484Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671149575376827:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.638529Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.638643Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671149575376832:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:35.639789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:35.647103Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671149575376834:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:35.705232Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671149575376885:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:35.859820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:35.942350Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:36.250796Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7521671153870344865:2503], SessionActorId: [1:7521671153870344847:2503], statusCode=PRECONDITION_FAILED. Issue=
: Error: Memory limit exception, c ... tes of 67108864 bytes., code: 2029 Trying to start YDB, gRPC: 62998, MsgBus: 29276 2025-06-30T09:25:42.352338Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671181643275070:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:42.353981Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004213/r3tmp/tmpnPXISS/pdisk_1.dat 2025-06-30T09:25:42.371439Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62998, node 4 2025-06-30T09:25:42.396468Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:42.396489Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:42.396493Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:42.396565Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29276 2025-06-30T09:25:42.459376Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:42.459412Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:42.460579Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29276 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:42.487809Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:42.501012Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:42.520293Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:42.564255Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:42.583786Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:42.770466Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671181643276633:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.770502Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.778716Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.840273Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.851657Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.868174Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.885513Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.906954Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.965105Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.983965Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671181643277291:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.983995Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.984129Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671181643277296:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.985132Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:42.990091Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671181643277298:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:43.071036Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671185938244645:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:43.251657Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:43.355880Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:46.697070Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=MzY3NDY2NzktNDRkOTFhNTAtNzc4ZmQ1OWEtNjBlZjcxMTA=, ActorId: [4:7521671194528180223:2569], ActorState: ExecuteState, TraceId: 01jz02g8q81c9vemdmd620e4r3, Create QueryResponse for error on request, msg:
: Error: Intermediate data materialization exceeded size limit (88240924 > 50331648). This usually happens when trying to write large amounts of data or to perform lookup by big collection of keys in single query. Consider using smaller batches of data., code: 2013 >> KqpNewEngine::DecimalColumn [GOOD] >> KqpNewEngine::DecimalColumn35 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::CreateAsSelectView [GOOD] Test command err: Trying to start YDB, gRPC: 61712, MsgBus: 26656 2025-06-30T09:25:40.480270Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671171358024835:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:40.480295Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0041f1/r3tmp/tmpUA9Ujg/pdisk_1.dat 2025-06-30T09:25:40.540756Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:40.540868Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671171358024807:2080] 1751275540479984 != 1751275540479987 TServer::EnableGrpc on GrpcPort 61712, node 1 2025-06-30T09:25:40.563034Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:40.563047Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:40.563049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:40.563088Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26656 2025-06-30T09:25:40.619626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:40.619658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:40.620733Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26656 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:40.655664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:40.663558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:40.741682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.777328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:40.795675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:40.946657Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671171358026401:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.946696Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.003434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.012054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.068196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.081685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.094536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.109080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.123087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.148234Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671175652994351:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.148268Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.148468Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671175652994356:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.149515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:41.154026Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671175652994358:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:41.230836Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671175652994409:3399] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 2912, MsgBus: 10138 2025-06-30T09:25:41.689428Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671175255263899:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:41.690242Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0041f1/r3tmp/tmpqnN0UB/pdisk_1.dat 2025-06-30T09:25:41.711243Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2912, node 2 2025-06-30T09:25:41.730930Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:41.730945Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize fro ... WARN: log.cpp:784: tablet_id=72075186224037961;self_id=[3:7521671191867503418:2615];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-30T09:25:44.912420Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671191867506469:5125] txid# 281474976715685, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" severity: 1 } 2025-06-30T09:25:44.914767Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715686, at schemeshard: 72057594046644480 2025-06-30T09:25:44.915957Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715687:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:45.006479Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671196162473910:5214] txid# 281474976715689, issues: { message: "Check failed: path: \'/Root/RowSrc\', error: path exist, request doesn\'t accept it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_move_table.cpp:722" severity: 1 } 2025-06-30T09:25:45.006824Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=3&id=NjYyMWU0OWUtZGViMjY1Y2EtYzMzOTRiMTYtMTkzYTM3Mjk=, ActorId: [3:7521671191867506443:3186], ActorState: ExecuteState, TraceId: 01jz02g8a0b2vp3mbtt25bdzzy, Create QueryResponse for error on request, msg: 2025-06-30T09:25:45.084543Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671196162473978:5242] txid# 281474976715691, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" severity: 1 } 2025-06-30T09:25:45.093523Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:45.356423Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671196162474328:5393] txid# 281474976715697, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" severity: 1 } 2025-06-30T09:25:45.365491Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715699:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 13351, MsgBus: 21602 2025-06-30T09:25:45.898591Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671196169527990:2090];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:45.900535Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0041f1/r3tmp/tmpJItnNd/pdisk_1.dat 2025-06-30T09:25:45.933062Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13351, node 4 2025-06-30T09:25:45.951159Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:45.951173Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:45.951175Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:45.951228Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:45.993882Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:45.993916Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:45.994576Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21602 TClient is connected to server localhost:21602 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:46.135505Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:46.139892Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:46.516111Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671200464495829:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.516146Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.516295Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671200464495841:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.517291Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:46.520595Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671200464495843:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:25:46.575978Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671200464495894:2326] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:46.587075Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.613087Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.735013Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715665, at schemeshard: 72057594046644480 2025-06-30T09:25:46.745462Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.898822Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:46.900522Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671200464496536:2691] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" severity: 1 } 2025-06-30T09:25:46.902775Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715672, at schemeshard: 72057594046644480 2025-06-30T09:25:46.903908Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex-UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex+UseSink-UseDataQuery >> KqpRanges::NullInPredicate [GOOD] >> KqpRanges::NullInPredicateRow >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1,1000,100,0] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1,1000,1000000,0] >> KqpQuery::QueryExplain [GOOD] >> KqpQuery::QueryFromSqs >> KqpNewEngine::PkRangeSelect2 [GOOD] >> KqpNewEngine::OnlineRO_Consistent >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,0,1000,1000000,0] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,0,1000,1000000,0.5] >> KqpNamedExpressions::NamedExpressionRandomChanged+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged-UseSink >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[with_queues-tables_format_v0] [GOOD] >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[with_queues-tables_format_v1] |84.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |84.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/kqprun/kqprun |84.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/kqprun |84.3%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/kqprun >> TYdbControlPlaneStorageListBindings::ShouldCheckScopeVisibility [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCheckPrivateVisibility >> KqpNewEngine::MultiSelect |84.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |84.3%| [LD] {RESULT} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |84.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel >> KqpNewEngine::Truncated [GOOD] >> KqpNewEngine::Update+UseSink >> KqpMergeCn::TopSortBy_Date_Limit4 [GOOD] >> KqpMergeCn::TopSortByDesc_Datetime_Limit3 >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,0,1000,0.5,BLOOM_FILTER] [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,10,0,0,CATEGORY_BLOOM_FILTER] >> KqpNotNullColumns::UpdateNotNullPg [GOOD] >> KqpNotNullColumns::UpdateOnNotNull >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[with_queues-tables_format_v1] [GOOD] >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[without_queues-tables_format_v0] >> TYdbControlPlaneStorageListConnections::ShouldCombineFilters [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByConnectionType >> KqpNewEngine::DecimalColumn35 [GOOD] >> KqpNewEngine::DeleteByKey >> KqpSort::PassLimit [GOOD] >> KqpSort::Offset >> KqpExtractPredicateLookup::OverflowLookup [GOOD] >> KqpExtractPredicateLookup::SimpleRange >> KqpRanges::IsNull >> KqpNewEngine::OnlineRO_Consistent [GOOD] >> KqpNewEngine::OnlineRO_Inconsistent >> KqpQuery::ExecuteWriteQuery [GOOD] >> KqpNewEngine::KeyColumnOrder >> KqpNewEngine::SimpleUpsertSelect >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[without_queues-tables_format_v0] [GOOD] >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[without_queues-tables_format_v1] >> KqpNamedExpressions::NamedExpressionRandomChanged-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged2+UseSink >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1,1000,1000000,0] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1,1000,1000000,0.5] >> KqpQuery::QueryFromSqs [GOOD] >> TPersQueueTest::Delete [GOOD] >> TPersQueueTest::DisableWrongSettings >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[without_queues-tables_format_v1] [GOOD] |84.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> KqpRanges::NullInPredicateRow [GOOD] >> KqpRanges::NoFullScanAtScanQuery >> KqpNewEngine::Update+UseSink [GOOD] >> KqpNewEngine::StaleRO_IndexFollowers-EnableFollowers >> KqpReturning::ReturningWorksIndexedUpsert+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedUpsert-QueryService ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::ExecuteWriteQuery [GOOD] Test command err: Trying to start YDB, gRPC: 8269, MsgBus: 29806 2025-06-30T09:25:34.151120Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671146636178294:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:34.152636Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004217/r3tmp/tmpTHHFV7/pdisk_1.dat 2025-06-30T09:25:34.227008Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8269, node 1 2025-06-30T09:25:34.246590Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:34.246604Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:34.246607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:34.246652Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:34.253234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:34.253262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:34.254306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29806 TClient is connected to server localhost:29806 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:34.331812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:34.341652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.446825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:34.493314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.509006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.588141Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671146636179837:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.588172Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.647632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.659988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.671245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.683973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.701272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.716546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.732177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.752426Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671146636180489:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.752467Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.752618Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671146636180494:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.753829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:34.759620Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671146636180496:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:34.831265Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671146636180547:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:35.006674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 21840, MsgBus: 17747 2025-06-30T09:25:35.265981Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671152671106973:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:35.266004Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004217/r3tmp/tmpqsrKWT/pdisk_1.dat 2025-06-30T09:25:35.286109Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:35.286392Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notificat ... ersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:38.243120Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:38.283756Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.344373Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.379278Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:38.395428Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.575957Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671164022115898:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.575995Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.585840Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.618700Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.653558Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.673735Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.687982Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.699879Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.717866Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.787944Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671164022116553:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.787969Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.788043Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671164022116558:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.789037Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:38.791880Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:38.791954Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671164022116560:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:38.850176Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671164022116611:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:39.084426Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.122825Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:41.813217Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7521671168317084367:2469], Table: `/Root/.tmp/sessions/MjM3MGYzNWItYzYzZjRiYzEtMzc4ZWU1MDEtYWQ3MDE0NWM=/Root/test_table_cas_8088697762925560178` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-30T09:25:42.510467Z node 4 :GLOBAL WARN: log.cpp:784: fline=events.h:105;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability;tx_id=0; 2025-06-30T09:25:42.510903Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [4:7521671168317084367:2469], Table: `/Root/.tmp/sessions/MjM3MGYzNWItYzYzZjRiYzEtMzc4ZWU1MDEtYWQ3MDE0NWM=/Root/test_table_cas_8088697762925560178` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Got OVERLOADED for table `/Root/.tmp/sessions/MjM3MGYzNWItYzYzZjRiYzEtMzc4ZWU1MDEtYWQ3MDE0NWM=/Root/test_table_cas_8088697762925560178`. ShardID=72075186224037922, Sink=[4:7521671168317084367:2469]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability, code: 2006 } 2025-06-30T09:25:42.765546Z node 4 :GLOBAL WARN: log.cpp:784: fline=events.h:105;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability;tx_id=0; 2025-06-30T09:25:42.766855Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [4:7521671168317084367:2469], Table: `/Root/.tmp/sessions/MjM3MGYzNWItYzYzZjRiYzEtMzc4ZWU1MDEtYWQ3MDE0NWM=/Root/test_table_cas_8088697762925560178` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Got OVERLOADED for table `/Root/.tmp/sessions/MjM3MGYzNWItYzYzZjRiYzEtMzc4ZWU1MDEtYWQ3MDE0NWM=/Root/test_table_cas_8088697762925560178`. ShardID=72075186224037922, Sink=[4:7521671168317084367:2469]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability, code: 2006 } 2025-06-30T09:25:43.123122Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7521671164022114437:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:43.123160Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:25:43.880729Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7521671168317084367:2469], Table: `/Root/.tmp/sessions/MjM3MGYzNWItYzYzZjRiYzEtMzc4ZWU1MDEtYWQ3MDE0NWM=/Root/test_table_cas_8088697762925560178` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-30T09:25:44.307596Z --------------- Start update --------------- >> KqpNewEngine::DeleteWithInputMultiConsumptionLimit+UseSink >> KqpNotNullColumns::UpdateOnNotNull [GOOD] >> KqpNotNullColumns::UpdateOnNotNullPg >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,0,1000,1000000,0.5] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,0,0] >> KqpSort::Offset [GOOD] >> KqpSort::OffsetPk >> KqpMergeCn::TopSortByDesc_Datetime_Limit3 [GOOD] >> KqpMergeCn::TopSortByDesc_Bool_And_PKUint64_Limit4 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::QueryFromSqs [GOOD] Test command err: Trying to start YDB, gRPC: 12628, MsgBus: 19031 2025-06-30T09:25:38.686978Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671165453666645:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:38.687010Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00420a/r3tmp/tmpF4kTEO/pdisk_1.dat 2025-06-30T09:25:38.758582Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671165453666623:2080] 1751275538686722 != 1751275538686725 2025-06-30T09:25:38.760907Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12628, node 1 2025-06-30T09:25:38.772270Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:38.772282Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:38.772285Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:38.772339Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19031 TClient is connected to server localhost:19031 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:25:38.835551Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:38.835585Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:38.836473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:38.862561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:38.868182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:38.876948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:38.951523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.975274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.993393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:39.126021Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671169748635536:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.126048Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.195582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.213050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.228123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.245025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.268667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.284526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.296864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.310944Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671169748636189:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.310958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671169748636194:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.310970Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.311918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:39.314609Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671169748636196:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:39.387650Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671169748636247:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:39.652041Z node 1 :GRPC_SERVER DEBUG: logger.cpp:36: [0x7379bcf6fc00] received request Name# ExecuteDataQuery ok# true data# session_id: "ydb://session/3?node_id=1&id=ZDUyN2M3OTktYjkwYjRmOTUtOTAwYmZkOC0xNGFkNDUyNQ==" tx_control { begin_tx { serializable_read_write { } } commit_tx: true } query { yql_text: "\n SELECT * FROM `/Root/TwoShard`;\n " } query_cache_policy { } operation_params { } peer# ipv6:%5B::1%5D:51804 2025-06-30T09:25:39.652071Z node 1 :GRPC_SERVER DEBUG: logger.cpp:36: [0x7379bcf71100] created request Name# ExecuteDataQuery 2025-06-30T09:25:39.652111Z node 1 :GRPC_SERVER DEBUG: logger.cpp:36: [0x7379bcf6fc00] received request without user token Name# ExecuteDataQuery data# session_id: "ydb://session/3?node_id=1&id=ZDUyN2M3OTktYjkwYjRmOTUtOTAwYmZkOC0xNGFkNDUyNQ==" tx_cont ... ":"1001"}],"Node Type":"Limit"}],"Operators":[{"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 28936, MsgBus: 3072 2025-06-30T09:25:48.725087Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671207011638076:2222];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00420a/r3tmp/tmpNac9uq/pdisk_1.dat 2025-06-30T09:25:48.735350Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:48.782660Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671207011637866:2080] 1751275548706281 != 1751275548706284 2025-06-30T09:25:48.785383Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28936, node 4 2025-06-30T09:25:48.799223Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:48.799244Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:48.799248Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:48.799322Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:48.831188Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:48.831231Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:48.835210Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3072 TClient is connected to server localhost:3072 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:48.991268Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:48.994319Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:49.000983Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:49.027341Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.077800Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:49.124292Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:49.489381Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671211306606754:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:49.489419Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:49.502919Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.522697Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.540115Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.570117Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.588234Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.608112Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.635319Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.703293Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671211306607411:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:49.703339Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:49.703422Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671211306607416:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:49.704689Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:49.706119Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:49.714241Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671211306607418:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:49.793305Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671211306607478:3400] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:50.023510Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpNewEngine::DeleteByKey [GOOD] >> KqpNewEngine::SimpleUpsertSelect [GOOD] >> KqpNewEngine::StaleRO+EnableFollowers >> KqpNewEngine::MultiSelect [GOOD] >> KqpNewEngine::MultiOutput >> KqpNewEngine::OnlineRO_Inconsistent [GOOD] >> KqpNewEngine::Nondeterministic >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1,1000,1000000,0.5] [GOOD] >> KqpNewEngine::KeyColumnOrder [GOOD] >> KqpNewEngine::KeyColumnOrder2 >> KqpNamedExpressions::NamedExpressionRandomChanged2+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged2-UseSink >> KqpRanges::IsNull [GOOD] >> KqpRanges::IsNotNullSecondComponent ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::DeleteByKey [GOOD] Test command err: Trying to start YDB, gRPC: 14909, MsgBus: 30468 2025-06-30T09:25:41.118677Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671178461556173:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:41.119927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001e0c/r3tmp/tmpQuy9dW/pdisk_1.dat 2025-06-30T09:25:41.202139Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14909, node 1 2025-06-30T09:25:41.218385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:41.218421Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:41.219506Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:41.228241Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:41.228258Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:41.228261Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:41.228311Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30468 TClient is connected to server localhost:30468 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:41.314250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:41.317908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:41.599386Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671178461556756:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.599425Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.646884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.687482Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671178461556856:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.687520Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.687631Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671178461556861:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.688712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:41.692011Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671178461556863:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:41.771737Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671178461556914:2385] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:41.996611Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_scan_query.cpp:410: Client lost Trying to start YDB, gRPC: 29787, MsgBus: 6221 2025-06-30T09:25:42.250126Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671182988243361:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:42.250153Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001e0c/r3tmp/tmpAl41xA/pdisk_1.dat 2025-06-30T09:25:42.267701Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29787, node 2 2025-06-30T09:25:42.295225Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:42.295243Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:42.295246Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:42.295308Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6221 2025-06-30T09:25:42.350846Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:42.350885Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:42.352118Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6221 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:42.367448Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:42.371581Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:42.385686Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:42.450094Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:42.483375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:42.503581Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first call ... load_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:49.433262Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.498989Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 23075, MsgBus: 29466 2025-06-30T09:25:49.816185Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671209559966918:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:49.817070Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001e0c/r3tmp/tmpCC1Qca/pdisk_1.dat 2025-06-30T09:25:49.833364Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23075, node 7 2025-06-30T09:25:49.845053Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:49.845067Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:49.845069Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:49.845112Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29466 TClient is connected to server localhost:29466 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:49.928227Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:49.928261Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:49.928648Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:49.930121Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:25:49.931171Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:49.939731Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:49.963857Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:50.003183Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:50.022063Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:50.212953Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671213854935768:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.212992Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.222593Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.232705Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.243736Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.259289Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.274952Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.292556Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.314056Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.340285Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671213854936420:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.340325Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.340445Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671213854936425:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.341240Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:50.344257Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671213854936427:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:50.407827Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671213854936478:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:50.821188Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByConnectionType [GOOD] >> KqpNotNullColumns::CreateTableWithDisabledNotNullDataColumns ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,1,1000,1000000,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 7810, MsgBus: 65521 2025-06-30T09:25:41.711908Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671176743529082:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:41.713136Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003da8/r3tmp/tmpDNhEUh/pdisk_1.dat 2025-06-30T09:25:41.773277Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7810, node 1 2025-06-30T09:25:41.815000Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:41.815015Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:41.815018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:41.815069Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65521 2025-06-30T09:25:41.852979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:41.853009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:41.853678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:65521 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:41.914162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:41.923385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, Col3 UTF8, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 10); 2025-06-30T09:25:42.246034Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671181038496876:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.246064Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.314264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:25:42.342472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521671181038496990:2300];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:42.345117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671181038496985:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:42.345211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671181038496985:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:25:42.345298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671181038496985:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:25:42.345326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671181038496985:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:25:42.345350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671181038496985:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:25:42.345378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671181038496985:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:25:42.345406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671181038496985:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:25:42.345439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671181038496985:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:25:42.345474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671181038496985:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:25:42.345504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671181038496985:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:25:42.345541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671181038496985:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:25:42.345571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671181038496985:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:25:42.347293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521671181038496990:2300];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:25:42.348948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521671181038496990:2300];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:25:42.348999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521671181038496990:2300];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:25:42.349038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521671181038496990:2300];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:25:42.349064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521671181038496990:2300];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:25:42.349103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521671181038496990:2300];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:25:42.349141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521671181038496990:2300];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:25:42.349165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521671181038496990:2300];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:25:42.349189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521671181038496990:2300];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:25:42.349228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521671181038496990:2300];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:25:42.349265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7521671181038496990:2300];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:25:42.352772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7521671181038496986:2296];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:42.352810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7521671181038496986:2296];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline ... 2808Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:50.929937Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:50.930116Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:50.930244Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:50.930369Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:50.930497Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:50.930624Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:50.930782Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:50.930914Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:50.931069Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:50.931090Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`false`, `COLUMNS_LIMIT`=`1`, `SPARSED_DETECTOR_KFF`=`1000`, `MEM_LIMIT_CHUNK`=`1000000`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:25:50.945605Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:50.946506Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671216943995837:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.947192Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.955396Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:50.955612Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:50.955771Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:50.955923Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:50.956069Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:50.956223Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:50.956371Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:50.956518Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:50.956660Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:25:50.956801Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2, Col3) VALUES(1u, JsonDocument('{"a" : "value_a", "b" : "b1", "c" : "c1"}'), "value1"), (2u, JsonDocument('{"a" : "value_a"}'), "value1"), (3u, JsonDocument('{"a" : "value_a", "b" : "value_b"}'), "value2"), (4u, JsonDocument('{"b" : "value_b", "a" : "a4"}'), "value4") 2025-06-30T09:25:50.983890Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671216943995904:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.983924Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.984041Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671216943995909:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.985206Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:50.989311Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:25:50.989397Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671216943995911:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:25:51.088046Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671221238963258:2673] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:51.155191Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:25:51.155191Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:25:51.155315Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:25:51.155417Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[7:7521671216943995445:2296];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037889;local_tx_no=18;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037893; 2025-06-30T09:25:51.155426Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[7:7521671216943995445:2296];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037889;local_tx_no=19;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037894; 2025-06-30T09:25:51.155435Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[7:7521671216943995445:2296];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037889;local_tx_no=20;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037893; 2025-06-30T09:25:51.155444Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[7:7521671216943995445:2296];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037889;local_tx_no=21;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037894; 2025-06-30T09:25:51.155500Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "value_a" AND Col3 = "value2" ORDER BY Col1; 2025-06-30T09:25:51.260827Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; COMPARE: [[3u;["{\"a\":\"value_a\",\"b\":\"value_b\"}"];["value2"]]] OUTPUT: [[3u;["{\"a\":\"value_a\",\"b\":\"value_b\"}"];["value2"]]] INDEX:8/0/0 HEADER:0/0/0 >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,10,0,0,CATEGORY_BLOOM_FILTER] [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,10,0,0,BLOOM_FILTER] >> KqpNotNullColumns::UpdateOnNotNullPg [GOOD] >> KqpNewEngine::MultiOutput [GOOD] >> KqpNewEngine::MultiStatement >> KqpSort::OffsetPk [GOOD] >> KqpSort::OffsetTopSort >> KqpNewEngine::ReadAfterWrite >> KqpNewEngine::InShardsWrite >> KqpMergeCn::TopSortByDesc_Bool_And_PKUint64_Limit4 [GOOD] >> KqpMergeCn::TopSortBy_Date_And_Datetime_Limit4 >> KqpNewEngine::Nondeterministic [GOOD] >> KqpNewEngine::OrderedScalarContext >> KqpLimits::QueryExecTimeoutCancel [GOOD] >> KqpLimits::QueryExecTimeout >> KqpNewEngine::DeleteWithInputMultiConsumptionLimit+UseSink [GOOD] >> KqpNewEngine::DeleteWithInputMultiConsumptionLimit-UseSink >> KqpReturning::ReturningWorksIndexedUpsert-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedReplace+QueryService ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::UpdateOnNotNullPg [GOOD] Test command err: Trying to start YDB, gRPC: 22644, MsgBus: 4920 2025-06-30T09:25:42.964330Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671183290309625:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:42.964346Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ddb/r3tmp/tmp5UQuSS/pdisk_1.dat 2025-06-30T09:25:43.022495Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22644, node 1 2025-06-30T09:25:43.049439Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:43.049452Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:43.049454Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:43.049512Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:43.065073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:43.065105Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:43.065963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4920 TClient is connected to server localhost:4920 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:43.133600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:43.140297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:43.485979Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671187585277499:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.486025Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.558900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.585611Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671187585277599:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.585647Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.585656Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671187585277604:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.586620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:43.588789Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671187585277606:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:43.643134Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671187585277657:2386] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:43.709917Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521671187585277721:2320], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:61: Error: At function: KiUpdateTable!
:1:61: Error: Cannot update primary key column: Key 2025-06-30T09:25:43.710030Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NGY2YzI3NzgtMmVhMWQwMmUtNDExOWFmYzEtZjAwMzc0Yjg=, ActorId: [1:7521671187585277495:2288], ActorState: ExecuteState, TraceId: 01jz02g74qd2dphvsqyqd9bs58, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-30T09:25:43.715327Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521671187585277730:2324], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:63: Error: At function: KiUpdateTable!
:1:63: Error: Cannot update primary key column: Key 2025-06-30T09:25:43.715425Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NGY2YzI3NzgtMmVhMWQwMmUtNDExOWFmYzEtZjAwMzc0Yjg=, ActorId: [1:7521671187585277495:2288], ActorState: ExecuteState, TraceId: 01jz02g74z6rjxaterwzsxh9w3, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 25668, MsgBus: 10278 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ddb/r3tmp/tmpkCWF9C/pdisk_1.dat 2025-06-30T09:25:44.176736Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671191049732606:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:44.179619Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:44.200248Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:44.202951Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521671191049732409:2080] 1751275544156612 != 1751275544156615 TServer::EnableGrpc on GrpcPort 25668, node 2 2025-06-30T09:25:44.239069Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:44.239087Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:44.239090Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:44.239148Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10278 2025-06-30T09:25:44.279578Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:44.279624Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:44.280426Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10278 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:44.377369Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:44.383191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:44.736774Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorI ... ARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:50.351946Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671216871076721:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.351979Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.359626Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.391744Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671216871076822:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.391791Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.391979Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671216871076827:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.393237Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:50.396888Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671216871076829:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:50.478321Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671216871076880:2387] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:50.535132Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7521671216871076940:2318], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:63: Error: At function: KiWriteTable!
:1:45: Error: Failed to convert type: Struct<'Key':Int32,'Value':Null> to Struct<'Key':Uint64?,'Value':String>
:1:45: Error: Failed to convert 'Value': Null to String
:1:45: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-30T09:25:50.535257Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=6&id=YTA4ZWJhYjEtYTczZWE4MmMtNTMwM2E0OWItNjYwYzQ0NA==, ActorId: [6:7521671216871076703:2288], ActorState: ExecuteState, TraceId: 01jz02gdt3evr0d0d6kks1z5b5, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 11574, MsgBus: 4053 2025-06-30T09:25:51.107578Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671219607098085:2087];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:51.109269Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ddb/r3tmp/tmprm1xxL/pdisk_1.dat 2025-06-30T09:25:51.156262Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11574, node 7 2025-06-30T09:25:51.187382Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:51.187418Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:51.187956Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:51.195076Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:51.195093Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:51.195097Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:51.195152Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4053 TClient is connected to server localhost:4053 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:51.369893Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:51.378956Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:52.074870Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671223902065941:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.074912Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.076750Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:52.092113Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:52.102270Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671223902066049:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.102294Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.102451Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671223902066054:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.103562Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:52.111404Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671223902066056:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:52.195809Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671223902066108:2389] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:52.304879Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [7:7521671223902066179:2288], TxId: 281474976715663, task: 1. Ctx: { TraceId : 01jz02gfgm0gd9dh06k9kzd8zv. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=OGFkZjJiYzMtNzc4YjgyOS0yOWQ0YWZkMC1mZTdhNzM1MQ==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: BAD_REQUEST KIKIMR_BAD_COLUMN_TYPE: {
: Error: Tried to insert NULL value into NOT NULL column: Value, code: 2031 }. 2025-06-30T09:25:52.305056Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=7&id=OGFkZjJiYzMtNzc4YjgyOS0yOWQ0YWZkMC1mZTdhNzM1MQ==, ActorId: [7:7521671223902065923:2288], ActorState: ExecuteState, TraceId: 01jz02gfgm0gd9dh06k9kzd8zv, Create QueryResponse for error on request, msg: >> KqpNotNullColumns::CreateTableWithDisabledNotNullDataColumns [GOOD] >> KqpNotNullColumns::InsertNotNull >> KqpNewEngine::KeyColumnOrder2 [GOOD] >> KqpNewEngine::LocksMultiShard >> KqpNamedExpressions::NamedExpressionRandomChanged2-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandom+UseSink >> KqpRanges::IsNotNullSecondComponent [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,0,0] [GOOD] >> KqpRanges::IsNotNullInValue >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,0,0.5] >> KqpNewEngine::Select1 >> KqpSort::TopSortParameter |84.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |84.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |84.3%| [LD] {RESULT} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut >> KqpNewEngine::MultiStatement [GOOD] >> KqpNewEngine::MultiStatementMixPure >> test_format_without_version.py::TestQueueWithoutVersionWithTenant::test_common[std] [GOOD] >> KqpNewEngine::OrderedScalarContext [GOOD] >> KqpNewEngine::PagingNoPredicateExtract |84.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |84.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |84.3%| [LD] {RESULT} $(B)/ydb/core/graph/ut/ydb-core-graph-ut >> KqpMergeCn::TopSortBy_Date_And_Datetime_Limit4 [GOOD] >> KqpMergeCn::SortBy_PK_Uint64_Desc >> KqpNotNullColumns::InsertNotNull [GOOD] >> KqpNotNullColumns::InsertFromSelect >> KqpNewEngine::ReadAfterWrite [GOOD] >> KqpNewEngine::Replace >> KqpSort::OffsetTopSort [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCheckPrivateVisibility [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCheckSuperUser >> KqpNewEngine::DeleteWithInputMultiConsumptionLimit-UseSink [GOOD] >> KqpNewEngine::DependentSelect >> KqpNewEngine::InShardsWrite [GOOD] >> KqpNewEngine::Join >> KqpNewEngine::LocksMultiShard [GOOD] >> KqpNewEngine::LocksEffects >> KqpReturning::ReturningWorksIndexedReplace+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedReplace-QueryService >> KqpNewEngine::Select1 [GOOD] >> KqpNewEngine::ShuffleWrite ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpSort::OffsetTopSort [GOOD] Test command err: Trying to start YDB, gRPC: 28439, MsgBus: 1758 2025-06-30T09:25:43.826602Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671183661610300:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:43.827768Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d98/r3tmp/tmpMWV0FB/pdisk_1.dat 2025-06-30T09:25:43.915923Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28439, node 1 2025-06-30T09:25:43.928973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:43.929007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:43.932703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:43.943133Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:43.943148Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:43.943150Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:43.943194Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1758 TClient is connected to server localhost:1758 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:44.028778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:44.035097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:44.045390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.077124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.108094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.126895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.267317Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671187956579174:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.267349Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.335264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.353913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.373568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.387832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.400228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.418253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.434239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.450300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671187956579830:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.450331Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.450509Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671187956579835:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.451696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:44.455379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-30T09:25:44.455498Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671187956579837:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:44.553469Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671187956579888:3401] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:44.828864Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 27297, MsgBus: 27358 2025-06-30T09:25:45.176160Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671193247940837:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:45.177184Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d98/r3tmp/tmpvMVikp/pdisk_1.dat 2025-06-30T09:25:45.197809Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Tabl ... SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671222496326522:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:52.259755Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671222496326573:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:52.307790Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 9628, MsgBus: 26593 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d98/r3tmp/tmp86WKbm/pdisk_1.dat 2025-06-30T09:25:52.916171Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:25:52.928379Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:52.929524Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7521671225999808080:2080] 1751275552894624 != 1751275552894627 TServer::EnableGrpc on GrpcPort 9628, node 7 2025-06-30T09:25:52.975177Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:52.975195Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:52.975198Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:52.975270Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:53.011338Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:53.011379Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:53.011990Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26593 TClient is connected to server localhost:26593 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:53.167612Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:53.169817Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:53.172245Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.188236Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.226359Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.245815Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.578883Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671230294776971:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.578917Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.588748Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.605153Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.660067Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.673027Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.686895Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.700825Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.715000Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.735956Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671230294777624:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.735979Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671230294777630:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.735982Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.736766Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:53.741387Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671230294777632:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:53.841173Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671230294777683:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:53.908340Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpNotNullColumns::InsertFromSelect [GOOD] >> KqpNotNullColumns::FailedMultiEffects >> KqpNewEngine::MultiStatementMixPure [GOOD] >> KqpNewEngine::MultiUsagePrecompute >> KqpNewEngine::PagingNoPredicateExtract [GOOD] >> KqpRanges::IsNotNullInValue [GOOD] >> KqpRanges::IsNotNullInJsonValue >> KqpMergeCn::SortBy_PK_Uint64_Desc [GOOD] >> KqpMergeCn::SortBy_Int32 >> KqpSort::TopSortParameter [GOOD] >> KqpSort::TopSortExpr |84.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |84.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |84.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,10,0,0,BLOOM_FILTER] [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,10,0,0.5,CATEGORY_BLOOM_FILTER] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,0,0.5] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,100,0] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex+UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex-UseSink+UseDataQuery >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] [GOOD] >> KqpNewEngine::DependentSelect [GOOD] >> KqpNewEngine::DqSourceCount >> KqpNewEngine::Join [GOOD] >> KqpNewEngine::JoinIdxLookup >> test_queues_managing.py::TestQueuesManagingWithTenant::test_request_to_deleted_queue[tables_format_v1-std] >> KqpNewEngine::Replace [GOOD] >> KqpNewEngine::ReadRangeWithParams ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByConnectionType [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:50.150850Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:50.150873Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-30T09:25:50.150877Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:50.150878Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:50.151276Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/quotas". Create session OK 2025-06-30T09:25:50.151281Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:50.151282Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:50.152524Z node 17 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)" error: OVERLOADED {
: Error: Check failed: path: '/local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 262], type: EPathTypeDir, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155 } 2025-06-30T09:25:50.170878Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:25:50.170901Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:25:50.176801Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:25:50.176822Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:25:50.203882Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:25:50.203900Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:25:50.219557Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:25:50.219577Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-30T09:25:50.219645Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:25:50.219653Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:25:50.258247Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:25:50.258271Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:25:50.258317Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:25:50.258324Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:25:50.258586Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:25:50.258597Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:25:50.258604Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:25:50.258608Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:25:50.258698Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:25:50.258713Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:25:50.269362Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:25:50.269382Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:25:50.269821Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:25:50.269829Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:25:50.270201Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:25:50.270209Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:25:50.270332Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:25:50.270342Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:25:50.782842Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:50.782862Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:383: Call create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:50.796992Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:25:50.797024Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)": {
: Error: Check failed: path: '/local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 262], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155 } >> KqpNewEngine::ShuffleWrite [GOOD] >> KqpNewEngine::SelfJoin >> KqpNotNullColumns::FailedMultiEffects [GOOD] >> KqpNotNullColumns::CreateIndexedTableWithDisabledNotNullDataColumns ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PagingNoPredicateExtract [GOOD] Test command err: Trying to start YDB, gRPC: 31224, MsgBus: 24349 2025-06-30T09:25:45.469631Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671196271386606:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:45.469654Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001cf8/r3tmp/tmpqnTJiX/pdisk_1.dat 2025-06-30T09:25:45.541747Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31224, node 1 2025-06-30T09:25:45.562963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:45.562978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:45.562980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:45.563029Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24349 2025-06-30T09:25:45.617384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:45.617417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:45.618325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24349 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:45.640559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:45.647202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:45.655820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:45.691729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:45.755109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:45.822034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:46.119502Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671200566355489:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.119543Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.178957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.194605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.210805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.227348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.240297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.253845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.277442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.319818Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671200566356139:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.319881Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.320117Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671200566356147:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.321331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:46.326251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:46.326370Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671200566356149:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:46.420375Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671200566356200:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:46.475174Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 18621, MsgBus: 65030 2025-06-30T09:25:47.200316Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671203459796173:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:47.210066Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001cf8/r3tmp/tmpcXpIFi/pdisk_1.dat 2025-06-30T09:25:47.256632Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: T ... called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:53.693838Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671229365517840:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:53.776407Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671229365517891:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 20268, MsgBus: 10673 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001cf8/r3tmp/tmpbUUcDF/pdisk_1.dat 2025-06-30T09:25:54.399974Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:25:54.414874Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20268, node 7 2025-06-30T09:25:54.431482Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:54.431496Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:54.431501Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:54.431549Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10673 2025-06-30T09:25:54.486548Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:54.486596Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:54.491306Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10673 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:54.555627Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:54.563261Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:54.579845Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:54.606084Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:54.646826Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:54.672887Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:54.866882Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671234779376035:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.866930Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.880471Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.937198Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.949475Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.961449Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.975295Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.989810Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:55.016569Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:55.082856Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671239074343993:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:55.082915Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:55.083100Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671239074343998:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:55.084017Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:55.086990Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:55.087148Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671239074344000:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:55.171232Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671239074344051:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:55.394834Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpRanges::NoFullScanAtScanQuery [GOOD] >> KqpRanges::NoFullScanAtDNFPredicate >> KqpNewEngine::LocksEffects [GOOD] >> KqpNewEngine::LeftSemiJoin >> KqpReturning::ReturningWorksIndexedReplace-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedOperationsWithDefault-QueryService >> KqpNewEngine::MultiUsagePrecompute [GOOD] >> KqpNewEngine::MultiUsageInnerConnection >> KqpRanges::IsNotNullInJsonValue [GOOD] >> KqpRanges::IsNotNullInJsonValue2 >> KqpMergeCn::SortBy_Int32 [GOOD] >> KqpNewEngine::JoinIdxLookup [GOOD] >> KqpNewEngine::JoinIdxLookupWithPredicate >> KqpRanges::UpdateMulti >> KqpSort::TopSortExpr [GOOD] >> KqpSort::TopSortExprPk >> KqpNotNullColumns::CreateIndexedTableWithDisabledNotNullDataColumns [GOOD] >> KqpNotNullColumns::Describe >> KqpNewEngine::DqSourceCount [GOOD] >> KqpNewEngine::DqSource >> KqpNewEngine::ReadRangeWithParams [GOOD] >> KqpNewEngine::ReadDifferentColumns >> KqpNamedExpressions::NamedExpressionRandom+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandom-UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpMergeCn::SortBy_Int32 [GOOD] Test command err: Trying to start YDB, gRPC: 3903, MsgBus: 30902 2025-06-30T09:25:45.173032Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671193237045648:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:45.173052Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d10/r3tmp/tmpybfPcE/pdisk_1.dat 2025-06-30T09:25:45.246466Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3903, node 1 2025-06-30T09:25:45.273995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:45.274007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:45.274009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:45.274051Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30902 2025-06-30T09:25:45.324302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:45.324332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:45.325460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30902 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:45.361127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:45.363613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:45.643301Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671193237046246:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:45.643335Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:45.684145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:45.759520Z node 1 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: Unknown table '/Root/WrongTable' Trying to start YDB, gRPC: 23082, MsgBus: 24358 2025-06-30T09:25:46.126096Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671197719792644:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:46.126113Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d10/r3tmp/tmpM2igKh/pdisk_1.dat 2025-06-30T09:25:46.151439Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23082, node 2 2025-06-30T09:25:46.178433Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:46.178448Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:46.178450Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:46.178500Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24358 2025-06-30T09:25:46.227213Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:46.227256Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:46.235044Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24358 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:46.291396Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:46.295461Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:25:46.303436Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.335798Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:46.384102Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:46.411888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:46.610231Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671197719794180:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.610261Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.619480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.631574Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.645424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.661678Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part propose ... tor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:55.569694Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275555610, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 27120, MsgBus: 6067 2025-06-30T09:25:55.864443Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7521671237396122046:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:55.865395Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d10/r3tmp/tmpS0sXH9/pdisk_1.dat 2025-06-30T09:25:55.892148Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27120, node 8 2025-06-30T09:25:55.907297Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:55.907309Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:55.907311Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:55.907354Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6067 2025-06-30T09:25:55.964673Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:55.964712Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:55.965863Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6067 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:56.028138Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:56.035900Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:56.042171Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:56.069577Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:56.140588Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:56.168220Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:56.293301Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7521671241691090876:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:56.293414Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:56.298910Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:56.311446Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:56.321836Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:56.335128Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:56.349023Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:56.372273Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:56.389979Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:56.422761Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7521671241691091528:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:56.422807Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:56.422912Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7521671241691091533:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:56.424123Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:56.431962Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7521671241691091535:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:56.516026Z node 8 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [8:7521671241691091586:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:56.769930Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:56.868155Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:56.938905Z node 8 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275556982, txId: 281474976715674] shutting down |84.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |84.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |84.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign >> KqpNewEngine::SelfJoin [GOOD] >> KqpNewEngine::ScalarFunctions >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,100,0] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,100,0.5] >> KqpNewEngine::DeleteOn+UseSink |84.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot >> KqpReturning::ReturningWorksIndexedDelete+QueryService |84.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |84.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot >> KqpReturning::ReturningWorksIndexedOperationsWithDefault-QueryService [GOOD] >> KqpSort::ComplexPkExclusiveSecondOptionalPredicate >> TPersQueueTest::DisableWrongSettings [GOOD] >> TPersQueueTest::DisableDeduplication >> KqpRanges::NoFullScanAtDNFPredicate [GOOD] >> KqpRanges::MergeRanges >> KqpNotNullColumns::Describe [GOOD] >> KqpNotNullColumns::CreateTableWithNotNullColumns >> KqpRanges::UpdateMulti [GOOD] >> KqpRanges::UpdateWhereInBigLiteralList >> KqpNewEngine::MultiUsageInnerConnection [GOOD] >> KqpNewEngine::MultipleBroadcastJoin >> KqpSort::TopSortExprPk [GOOD] >> KqpSort::TopSortTableExpr >> KqpNewEngine::LeftSemiJoin [GOOD] >> KqpNewEngine::LocksInRoTx >> KqpRanges::IsNotNullInJsonValue2 [GOOD] >> KqpRanges::DuplicateKeyPredicateParam >> KqpNewEngine::JoinIdxLookupWithPredicate [GOOD] >> KqpNewEngine::ItemsLimit >> KqpKv::ReadRows_TimeoutCancelsReads [GOOD] >> KqpKv::ReadRows_PgValue >> KqpNotNullColumns::CreateTableWithNotNullColumns [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,10,0,0.5,CATEGORY_BLOOM_FILTER] [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,10,0,0.5,BLOOM_FILTER] |84.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |84.3%| [LD] {RESULT} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |84.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut >> KqpSqlIn::SimpleKey >> KqpNewEngine::ReadDifferentColumns [GOOD] >> KqpNewEngine::ReadDifferentColumnsPk >> KqpNewEngine::DqSource [GOOD] >> KqpNewEngine::DqSourceLiteralRange >> KqpNewEngine::DeleteOn+UseSink [GOOD] >> KqpNewEngine::DeleteOn-UseSink >> KqpReturning::ReturningWorksIndexedDelete+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedDelete-QueryService >> KqpSort::ComplexPkExclusiveSecondOptionalPredicate [GOOD] >> KqpSort::ComplexPkInclusiveSecondOptionalPredicate ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::CreateTableWithNotNullColumns [GOOD] Test command err: Trying to start YDB, gRPC: 25205, MsgBus: 2110 2025-06-30T09:25:52.611226Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671223296195330:2240];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001917/r3tmp/tmpruzV8Y/pdisk_1.dat 2025-06-30T09:25:52.719488Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:52.770541Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25205, node 1 2025-06-30T09:25:52.813297Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:52.813329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:52.814199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:52.820121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:52.820146Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:52.820148Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:52.820244Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2110 TClient is connected to server localhost:2110 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:52.913170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:52.917304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:53.127317Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671227591163030:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.127354Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.178823Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671227591163051:2298] txid# 281474976715658, issues: { message: "It is not allowed to create not null data column: Value" severity: 1 } 2025-06-30T09:25:53.186012Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671227591163059:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.186070Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.187624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 7435, MsgBus: 17019 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001917/r3tmp/tmp5C6Pcg/pdisk_1.dat 2025-06-30T09:25:53.528867Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671229220204512:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:53.528923Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:53.546974Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7435, node 2 2025-06-30T09:25:53.564600Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:53.564615Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:53.564617Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:53.564668Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17019 TClient is connected to server localhost:17019 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:53.638343Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:53.638372Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:53.638639Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:53.639127Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:25:53.640412Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:53.998842Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671229220205101:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.998936Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.000790Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.036576Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671233515172497:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.036611Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.036704Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671233515172502:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.037855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:54.042648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-30T09:25:54.042756Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521671233515172504:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:54.123212Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521671233515172555:238 ... 594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:56.739338Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:56.747124Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:57.087208Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671247167071840:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:57.087253Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:57.152877Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7521671247167071861:2297] txid# 281474976715658, issues: { message: "It is not allowed to create not null data column: Value" severity: 1 } 2025-06-30T09:25:57.159474Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7521671247167071869:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:57.159500Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:57.168015Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 16571, MsgBus: 27130 2025-06-30T09:25:57.638657Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7521671246551752462:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:57.638683Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001917/r3tmp/tmpeWKEEb/pdisk_1.dat 2025-06-30T09:25:57.660281Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16571, node 6 2025-06-30T09:25:57.679894Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:57.679909Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:57.679913Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:57.679970Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27130 TClient is connected to server localhost:27130 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:25:57.739156Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:57.739189Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:57.740310Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:57.744266Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:57.746016Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:58.059169Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671250846720334:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.059198Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.061437Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 20062, MsgBus: 24288 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001917/r3tmp/tmpeBIZVT/pdisk_1.dat 2025-06-30T09:25:58.582682Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 20062, node 7 2025-06-30T09:25:58.584166Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:58.614978Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:58.614996Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:58.614999Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:58.615052Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24288 2025-06-30T09:25:58.651159Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:58.651211Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:58.652377Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24288 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:58.731735Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:58.733171Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:59.010590Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpNewEngine::LocksInRoTx [GOOD] >> KqpNewEngine::LiteralKeys >> KqpRanges::DuplicateKeyPredicateParam [GOOD] >> KqpRanges::DuplicateKeyPredicateMixed >> KqpRanges::MergeRanges [GOOD] >> KqpSort::TopSortTableExpr [GOOD] >> KqpSort::TopSortTableExprOffset >> KqpRanges::UpdateWhereInBigLiteralList [GOOD] >> KqpRanges::UpdateWhereInBigLiteralListPrefix >> TYdbControlPlaneStorageListBindings::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCheckFilterByConnectionId >> KqpNewEngine::MultipleBroadcastJoin [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,100,0.5] [GOOD] >> IndexBuildTestReboots::BaseCaseWithDataColumns [GOOD] >> KqpNewEngine::DeleteOn-UseSink [GOOD] >> KqpNewEngine::DeleteWithBuiltin+UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpRanges::MergeRanges [GOOD] Test command err: Trying to start YDB, gRPC: 14963, MsgBus: 6060 2025-06-30T09:25:42.863118Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671179733286902:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:42.863274Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001de3/r3tmp/tmpaHfMht/pdisk_1.dat TServer::EnableGrpc on GrpcPort 14963, node 1 2025-06-30T09:25:42.949281Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:42.951595Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671179733286805:2080] 1751275542862151 != 1751275542862154 2025-06-30T09:25:42.966962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:42.966976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:42.966978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:42.967017Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6060 2025-06-30T09:25:43.011214Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:43.011244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:43.012190Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6060 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:43.083924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:43.103873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:43.185416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.231823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:43.265728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:43.559855Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671184028255706:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.559892Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.627766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.650165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.661752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.674539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.733658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.752707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.769192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:43.792753Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671184028256359:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.792788Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.793023Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671184028256364:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:43.794373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:43.797954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:43.798085Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671184028256366:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:43.854227Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671184028256417:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:43.863365Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:44.065885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.109808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 720575940466 ... 6-30T09:25:58.059640Z node 9 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275558095, txId: 281474976715676] shutting down Trying to start YDB, gRPC: 16469, MsgBus: 27968 2025-06-30T09:25:58.525154Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7521671250814178852:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:58.527562Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001de3/r3tmp/tmp3k6eU8/pdisk_1.dat 2025-06-30T09:25:58.561632Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16469, node 10 2025-06-30T09:25:58.599512Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:58.599528Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:58.599530Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:58.599583Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:58.639500Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:58.639536Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:58.640230Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27968 TClient is connected to server localhost:27968 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:58.817821Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:58.820058Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:58.826678Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:58.857206Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:58.923239Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:58.953549Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:59.227067Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521671255109147715:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.227100Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.233334Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.245310Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.257804Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.270275Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.283536Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.344707Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.362913Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.379519Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521671255109148373:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.379547Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.379635Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7521671255109148378:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.380595Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:59.384279Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7521671255109148380:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:59.448676Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7521671255109148431:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:59.530812Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:59.648863Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.831433Z node 10 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275559873, txId: 281474976715674] shutting down >> KqpNewEngine::ReadDifferentColumnsPk [GOOD] >> KqpNewEngine::PushFlatmapInnerConnectionsToStageInput >> KqpNewEngine::DqSourceLiteralRange [GOOD] >> KqpNewEngine::DqSourceLimit >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookup [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::MultipleBroadcastJoin [GOOD] Test command err: Trying to start YDB, gRPC: 24307, MsgBus: 26023 2025-06-30T09:25:49.621963Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671212995190687:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:49.622104Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001bf0/r3tmp/tmptoZFuP/pdisk_1.dat 2025-06-30T09:25:49.714872Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24307, node 1 2025-06-30T09:25:49.766962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:49.766977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:49.766980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:49.767029Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:49.774705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:49.774751Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:49.775317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26023 TClient is connected to server localhost:26023 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:49.904240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:49.911634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:25:49.923705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.979018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:50.051849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:50.072152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:50.281378Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671217290159468:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.281407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.339752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.350232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.411270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.423893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.483120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.502559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.560849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.580559Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671217290160129:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.580597Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.580759Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671217290160134:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.581816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:50.585891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:50.585986Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671217290160136:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:50.609165Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:50.671271Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671217290160196:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 4415, MsgBus: 19110 2025-06-30T09:25:51.463432Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671218470722326:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:51.475510Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001bf0/r3tmp/tmpMaEFbD/pdisk_1.dat 2025-06-30T09:25:51.519555Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Ta ... annot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001bf0/r3tmp/tmpj1gUvf/pdisk_1.dat 2025-06-30T09:25:58.725412Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27155, node 7 2025-06-30T09:25:58.759185Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:58.759200Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:58.759202Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:58.759248Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64560 2025-06-30T09:25:58.807746Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:58.807777Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:58.810722Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64560 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:58.835433Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:58.839485Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:25:58.849692Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.862351Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:58.897350Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:58.912129Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:59.129256Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671253071677013:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.129293Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.139667Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.149423Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.174425Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.234171Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.244502Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.258408Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.273287Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.289554Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671253071677666:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.289573Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671253071677671:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.289581Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.290240Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:59.292633Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671253071677673:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:59.356871Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671253071677724:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:59.536526Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.547845Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.560618Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.701414Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; [] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v0-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 31152, MsgBus: 28231 2025-06-30T09:25:46.090848Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671200466575830:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:46.092941Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003da7/r3tmp/tmpy69e09/pdisk_1.dat 2025-06-30T09:25:46.175221Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31152, node 1 2025-06-30T09:25:46.202821Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:46.202837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:46.202839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:46.202898Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28231 2025-06-30T09:25:46.240164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:46.240204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:46.241482Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28231 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:46.319109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:46.322657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2); 2025-06-30T09:25:46.671464Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671200466576409:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.671497Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.718063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:25:46.733353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671200466576480:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:46.733444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671200466576480:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:25:46.733521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671200466576480:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:25:46.733544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671200466576480:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:25:46.733578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671200466576480:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:25:46.733603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671200466576480:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:25:46.733650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671200466576480:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:25:46.733671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671200466576480:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:25:46.733693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671200466576480:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:25:46.733718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671200466576480:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:25:46.733739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671200466576480:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:25:46.733761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671200466576480:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:25:46.734317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671200466576492:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:46.734357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671200466576492:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:25:46.734405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671200466576492:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:25:46.734424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671200466576492:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:25:46.734445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671200466576492:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:25:46.734469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671200466576492:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:25:46.734490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671200466576492:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:25:46.734510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671200466576492:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:25:46.734533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671200466576492:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:25:46.734552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671200466576492:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:25:46.734571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671200466576492:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:25:46.734592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671200466576492:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:25:46.735301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:25:46.735323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:25:46.735343Z nod ... ;ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=19649384644928;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275558773;max=18446744073709551615;plan=0;src=[6:7521671249692185156:2143];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:25:58.774259Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[6:7521671249692185509:2294];ev=NActors::IEventHandle;tablet_id=72075186224037889;tx_id=281474976715658;this=19649384611648;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275558774;max=18446744073709551615;plan=0;src=[6:7521671249692185156:2143];cookie=22:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:25:58.776882Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:25:58.778046Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:25:58.778179Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:25:58.779004Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `COMPACTION_PLANNER.CLASS_NAME`=`l-buckets`) 2025-06-30T09:25:58.802620Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671249692185612:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.802655Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.803042Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:58.811671Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:25:58.811836Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1"}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3"}')), (4u, JsonDocument('{"b" : "b4", "a" : "a4"}')) 2025-06-30T09:25:58.822968Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671249692185647:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.822993Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.823993Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671249692185652:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.824991Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:58.827889Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-30T09:25:58.827939Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671249692185654:2319], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-30T09:25:58.912310Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671249692185705:2427] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:58.932462Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=6;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:58.932515Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;local_tx_no=6;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037889;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:58.939984Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-30T09:25:58.940090Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:25:58.947098Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:58.949571Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:25:58.949573Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`true`, `COLUMNS_LIMIT`=`1`, `SPARSED_DETECTOR_KFF`=`0`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:25:58.964783Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:25:58.971891Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715665; 2025-06-30T09:25:58.972056Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715665; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(11u, JsonDocument('{"a" : "1a1"}')), (12u, JsonDocument('{"a" : "1a2"}')), (13u, JsonDocument('{"b" : "1b3"}')), (14u, JsonDocument('{"b" : "1b4", "a" : "a4"}')) 2025-06-30T09:25:59.003596Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=18;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:59.003596Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;local_tx_no=18;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037889;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:59.010121Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-30T09:25:59.010321Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1) VALUES(10u) 2025-06-30T09:25:59.030769Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=26;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:59.038234Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; WAIT_COMPACTION: 0 2025-06-30T09:25:59.227612Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WAIT_COMPACTION: 0 WAIT_COMPACTION: 0 COMPACTION_HAPPENED: 0 -> 2 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4\"}"]];[10u;#];[11u;["{\"a\":\"1a1\"}"]];[12u;["{\"a\":\"1a2\"}"]];[13u;["{\"b\":\"1b3\"}"]];[14u;["{\"a\":\"a4\",\"b\":\"1b4\"}"]]] OUTPUT: [[1u;["{\"a\":\"a1\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4\"}"]];[10u;#];[11u;["{\"a\":\"1a1\"}"]];[12u;["{\"a\":\"1a2\"}"]];[13u;["{\"b\":\"1b3\"}"]];[14u;["{\"a\":\"a4\",\"b\":\"1b4\"}"]]] INDEX:0/0/0 HEADER:0/0/0 >> KqpSort::ComplexPkInclusiveSecondOptionalPredicate [GOOD] >> KqpNewEngine::ItemsLimit [GOOD] >> KqpNewEngine::JoinDictWithPure >> KqpRanges::DuplicateKeyPredicateMixed [GOOD] >> KqpSqlIn::SimpleKey [GOOD] >> KqpSqlIn::SelectNotAllElements >> KqpSqlIn::TableSource >> KqpNewEngine::LiteralKeys [GOOD] >> Initializer::Simple [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 13645, MsgBus: 13889 2025-06-30T09:25:07.426947Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671032132265501:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:07.426980Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004255/r3tmp/tmpYzbdq2/pdisk_1.dat 2025-06-30T09:25:07.481143Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671032132265291:2080] 1751275507424957 != 1751275507424960 2025-06-30T09:25:07.482826Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13645, node 1 2025-06-30T09:25:07.493305Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:07.493318Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:07.493321Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:07.493382Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13889 2025-06-30T09:25:07.528781Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:07.528827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:07.529597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13889 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:07.570805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:07.584345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:07.650343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:07.726770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:07.751935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:07.995689Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671032132266910:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:07.995727Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:08.087496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.145046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.162177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.201966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.228421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.257843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.280370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.303863Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671036427234861:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:08.303904Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:08.304658Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671036427234866:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:08.306437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:08.313492Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671036427234868:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:08.371228Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671036427234919:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:08.427064Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:08.626215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 7177, MsgBus: 18069 2025-06-30T09:25:09.926625Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671041022390151:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:09.926675Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/init ... ession/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzaf9t0pxjtmzyypp0b8, Create QueryResponse for error on request, msg: 2025-06-30T09:25:35.724475Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzb6agnxkp4ks8gfnq8f, Create QueryResponse for error on request, msg: 2025-06-30T09:25:35.746880Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzbf9vb6xpv3qrdmk8mf, Create QueryResponse for error on request, msg: 2025-06-30T09:25:35.764858Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [4:7521671150769601354:2469] TxId: 281474976715680. Ctx: { TraceId: 01jz02fzc357s11h9w3kt3x4c3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 16ms } {
: Error: Cancelling after 16ms during execution } ] 2025-06-30T09:25:35.764904Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7521671150769601358:2517], TxId: 281474976715680, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jz02fzc357s11h9w3kt3x4c3. SessionId : ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7521671150769601354:2469], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-30T09:25:35.765054Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7521671150769601359:2518], TxId: 281474976715680, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=. TraceId : 01jz02fzc357s11h9w3kt3x4c3. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7521671150769601354:2469], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-30T09:25:35.765193Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzc357s11h9w3kt3x4c3, Create QueryResponse for error on request, msg: 2025-06-30T09:25:35.765426Z node 4 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037922 Cancelled read: {[4:7521671150769601361:2517], 0} 2025-06-30T09:25:35.784026Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzcp35z472hnykc9na5b, Create QueryResponse for error on request, msg: 2025-06-30T09:25:35.806059Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzdb143qmfvc7m452sz4, Create QueryResponse for error on request, msg: 2025-06-30T09:25:35.834873Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fze3b2nbqwh1x08wsnvz, Create QueryResponse for error on request, msg: 2025-06-30T09:25:35.858959Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzew7n9crcc6p9c0zw52, Create QueryResponse for error on request, msg: 2025-06-30T09:25:35.890523Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzfy69bt0zvbpfppm9fw, Create QueryResponse for error on request, msg: 2025-06-30T09:25:35.922868Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzgqf7rq8sfbg5c55p09, Create QueryResponse for error on request, msg: 2025-06-30T09:25:35.962913Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzj0dbnmqx9t83a3davp, Create QueryResponse for error on request, msg: 2025-06-30T09:25:36.005115Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzkafmapzetpm8fbszma, Create QueryResponse for error on request, msg: 2025-06-30T09:25:36.038362Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [4:7521671155064568730:2469] TxId: 281474976715685. Ctx: { TraceId: 01jz02fzm678c8e9ynsnwqyv3g, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 28ms } {
: Error: Cancelling after 31ms during execution } ] 2025-06-30T09:25:36.038462Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzm678c8e9ynsnwqyv3g, Create QueryResponse for error on request, msg: 2025-06-30T09:25:36.071041Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzn86tnsr5sef0c6fzwt, Create QueryResponse for error on request, msg: 2025-06-30T09:25:36.133650Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzq46f0v56sfhyfps02x, Create QueryResponse for error on request, msg: 2025-06-30T09:25:36.191196Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [4:7521671155064568782:2469] TxId: 281474976715689. Ctx: { TraceId: 01jz02fzrxe6hddyr61vcw4xwg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 33ms } {
: Error: Cancelling after 33ms during execution } ] 2025-06-30T09:25:36.191280Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzrxe6hddyr61vcw4xwg, Create QueryResponse for error on request, msg: 2025-06-30T09:25:36.228971Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzt2521sz0n9begv5hkd, Create QueryResponse for error on request, msg: 2025-06-30T09:25:36.355152Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [4:7521671155064568848:2469] TxId: 281474976715694. Ctx: { TraceId: 01jz02fzxv2fh2pffbsnvvzy5k, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 38ms } {
: Error: Cancelling after 39ms during execution } ] 2025-06-30T09:25:36.355739Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02fzxv2fh2pffbsnvvzy5k, Create QueryResponse for error on request, msg: 2025-06-30T09:25:36.439425Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02g00c1m0862m128bk5ye8, Create QueryResponse for error on request, msg: 2025-06-30T09:25:36.527062Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [4:7521671155064568905:2469] TxId: 281474976715698. Ctx: { TraceId: 01jz02g0336m9ewzzb3q0yzadd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 42ms } {
: Error: Cancelling after 43ms during execution } ] 2025-06-30T09:25:36.527157Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02g0336m9ewzzb3q0yzadd, Create QueryResponse for error on request, msg: 2025-06-30T09:25:36.657454Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02g0730mdfc718bxb0s0qx, Create QueryResponse for error on request, msg: 2025-06-30T09:25:36.810895Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02g0bwbqvvbmp1yzhbrq3k, Create QueryResponse for error on request, msg: 2025-06-30T09:25:37.864310Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02g1by2rrfpa6yy0sfmxvr, Create QueryResponse for error on request, msg: 2025-06-30T09:25:37.995851Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=Y2Q3MDNmMGMtMmY2MjczMmMtMTQ4NmQwMGQtNWNiZjVjZDk=, ActorId: [4:7521671150769601157:2469], ActorState: ExecuteState, TraceId: 01jz02g1fz82f1thxqysm4d6yb, Create QueryResponse for error on request, msg: 2025-06-30T09:25:39.623646Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7521671146474631315:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:39.623679Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:25:49.638829Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7389: Cannot get console configs 2025-06-30T09:25:49.638860Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded >> KqpReturning::ReturningWorksIndexedDelete-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedDeleteV2+QueryService >> KqpNewEngine::ScalarFunctions [GOOD] >> KqpNewEngine::ScalarMultiUsage >> KqpSort::TopSortTableExprOffset [GOOD] >> KqpSort::TopSortResults ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpSort::ComplexPkInclusiveSecondOptionalPredicate [GOOD] Test command err: Trying to start YDB, gRPC: 27064, MsgBus: 14697 2025-06-30T09:25:48.247636Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671209083263085:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:48.247655Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001c48/r3tmp/tmpwnikg5/pdisk_1.dat 2025-06-30T09:25:48.333832Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27064, node 1 2025-06-30T09:25:48.348144Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:48.348159Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:48.348161Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:48.348211Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:48.348922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:48.348958Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:48.350005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14697 TClient is connected to server localhost:14697 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:48.418623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:48.427184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:48.435968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:48.479522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:48.539591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:48.582824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:49.019018Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671213378231955:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:49.019055Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:49.133255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.170013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.240957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.251416Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:49.259167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.276690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.294343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.314252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.336428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671213378232621:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:49.336465Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:49.336714Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671213378232626:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:49.338053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:49.343025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:49.343218Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671213378232628:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:49.420459Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671213378232679:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:49.678932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:49.701256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, firs ... lpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671248395788008:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:59.018254Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671252690755355:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:59.345008Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 18189, MsgBus: 24383 2025-06-30T09:25:59.716480Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671255307977537:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:59.717313Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001c48/r3tmp/tmpyQS0QA/pdisk_1.dat 2025-06-30T09:25:59.737315Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18189, node 7 2025-06-30T09:25:59.751363Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:59.751379Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:59.751382Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:59.751431Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24383 TClient is connected to server localhost:24383 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:59.823250Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:59.823286Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:59.824456Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:59.824737Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:59.827099Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:59.832758Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:59.855702Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:59.880020Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:59.896173Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.130997Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671259602946320:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.131094Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.134411Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.144592Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.157574Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.178280Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.195586Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.214183Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.241104Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.273947Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671259602946973:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.273973Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.274134Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671259602946978:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.275078Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:00.277234Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671259602946980:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:00.328156Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671259602947031:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:00.717709Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpRanges::UpdateWhereInBigLiteralListPrefix [GOOD] >> KqpRanges::UpdateWhereInMultipleUpdate >> KqpNewEngine::DeleteWithBuiltin+UseSink [GOOD] >> KqpNewEngine::DeleteWithBuiltin-UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::LiteralKeys [GOOD] Test command err: Trying to start YDB, gRPC: 31953, MsgBus: 31048 2025-06-30T09:25:50.235744Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671214680258765:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:50.237779Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a07/r3tmp/tmptw41zU/pdisk_1.dat 2025-06-30T09:25:50.335943Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:50.339341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:50.339385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:50.340722Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31953, node 1 2025-06-30T09:25:50.366633Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:50.366651Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:50.366653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:50.366706Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31048 TClient is connected to server localhost:31048 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:50.487944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:50.493936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:25:50.496486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:50.591684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:50.670278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.699318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:50.759112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671214680260314:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.759151Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.821269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.833344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.846170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.865145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.888282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.906636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.929598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:51.009541Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671218975228269:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:51.009568Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:51.009813Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671218975228274:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:51.011117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:51.016470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:51.016578Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671218975228276:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:51.078644Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671218975228327:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:51.240038Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:51.389472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 2903, MsgBus: 19964 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a07/r3tmp/tmpouvM8o/pdisk_1.dat 2025-06-30T09:25:51.907021Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExis ... ctions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:59.377662Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671253081057439:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:59.472368Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671253081057490:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 13626, MsgBus: 5550 2025-06-30T09:25:59.992592Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671256173315461:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:59.994088Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a07/r3tmp/tmp7nNbdf/pdisk_1.dat 2025-06-30T09:26:00.024206Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13626, node 7 2025-06-30T09:26:00.041821Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:00.041844Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:00.041846Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:00.041907Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5550 2025-06-30T09:26:00.093155Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:00.093199Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:00.094313Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5550 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:00.124605Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:00.130658Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:00.170075Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:00.247891Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:00.324192Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.349030Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:00.591247Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671260468284320:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.591287Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.607932Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.622499Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.637982Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.650078Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.661973Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.677073Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.689413Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.714978Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671260468284973:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.715018Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.716430Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671260468284978:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.717834Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:00.721695Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671260468284980:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:00.784085Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671260468285031:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:00.996209Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpNamedExpressions::NamedExpressionRandom-UseSink [GOOD] >> LocalPartitionReader::Booting ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpRanges::DuplicateKeyPredicateMixed [GOOD] Test command err: Trying to start YDB, gRPC: 30792, MsgBus: 11213 2025-06-30T09:25:50.079842Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671216160817014:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:50.079868Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a33/r3tmp/tmpS8jyk9/pdisk_1.dat 2025-06-30T09:25:50.157450Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30792, node 1 2025-06-30T09:25:50.169159Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:25:50.180704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:50.180731Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:50.181500Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:50.185153Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:50.185156Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:50.185158Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:50.185190Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11213 TClient is connected to server localhost:11213 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:50.274303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:50.277237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:50.278620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:50.348260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.397914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:50.420681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:50.527023Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671216160818570:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.527058Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.584849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.597778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.607617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.622031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.637053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.652173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.666312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.688585Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671216160819222:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.688625Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.688813Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671216160819227:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.690263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:50.694500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-30T09:25:50.694730Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671216160819229:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:50.744521Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671216160819280:3399] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:51.082109Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:51.087855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:51.228413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, bu ... tions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:59.390699Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671254302274989:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:59.480080Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671254302275040:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 5719, MsgBus: 16448 2025-06-30T09:26:00.035297Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671260596851172:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:00.036259Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001a33/r3tmp/tmpkuyZgE/pdisk_1.dat 2025-06-30T09:26:00.059238Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5719, node 7 2025-06-30T09:26:00.065520Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:00.065537Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:00.065543Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:00.065594Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16448 2025-06-30T09:26:00.125869Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:00.125910Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:00.127483Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16448 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:00.151561Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:00.155161Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:00.168034Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:00.236050Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:00.270195Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:00.290415Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:00.571234Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671260596852742:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.571261Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.578446Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.591151Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.604622Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.662917Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.673188Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.688831Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.747748Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.764985Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671260596853400:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.765016Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.765167Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671260596853405:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.766062Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:00.770403Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671260596853407:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:00.828003Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671260596853458:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:01.026054Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> LocalPartitionReader::Booting [GOOD] >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,10,0,0.5,BLOOM_FILTER] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> Initializer::Simple [GOOD] Test command err: 2025-06-30T09:24:56.475317Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:107:2153], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:24:56.475425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-30T09:24:56.475444Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004190/r3tmp/tmpHA0emc/pdisk_1.dat 2025-06-30T09:24:56.592648Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 9742, node 1 TClient is connected to server localhost:19131 2025-06-30T09:24:56.625706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:24:56.647217Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:24:56.648777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:24:56.648803Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:24:56.648809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:24:56.648999Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:24:56.649118Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1751275496009302 != 1751275496009306 2025-06-30T09:24:56.695267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:24:56.695318Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:24:56.706155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:06.799286Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:647:2538], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:06.799328Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:657:2543], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:06.799344Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:06.800739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:06.892099Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:661:2546], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-30T09:25:06.912643Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:06.965561Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:730:2584] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:07.022158Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:740:2593], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/test]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:25:07.023666Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=ZmU5OGE2MTktOTU5OWFkMTYtZmM5Y2ZjNzEtM2E1ZTc2Nzc=, ActorId: [1:638:2530], ActorState: ExecuteState, TraceId: 01jz02f33e6ca89rs1d9gt3c3p, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: REQUEST=SELECT * FROM `/Root/.metadata/test`;RESULT=
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/test]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 ;EXPECTATION=0 REQUEST=SELECT * FROM `/Root/.metadata/test`;EXPECTATION=0 2025-06-30T09:25:07.095130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:07.492127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:25:07.609413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:08.013606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Initialization finished 2025-06-30T09:25:18.508489Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jz02fega9kfv234cmjkn5fsn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY1YzlhMDQtMzk2OWZmYTQtMjA0NzNmYTEtODg2ZTRmYTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT * FROM `/Root/.metadata/test`;RESULT=;EXPECTATION=1 REQUEST=SELECT * FROM `/Root/.metadata/test`;EXPECTATION=1 REQUEST=DROP TABLE `/Root/.metadata/test`;EXPECTATION=0;WAITING=1 2025-06-30T09:25:29.527880Z node 1 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [1:1294:2988] txid# 281474976715678, Access denied for root@builtin on path /Root/.metadata/test, with access RemoveSchema 2025-06-30T09:25:29.527964Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1294:2988] txid# 281474976715678, issues: { message: "Access denied for root@builtin on path /Root/.metadata/test" issue_code: 200000 severity: 1 } REQUEST=DROP TABLE `/Root/.metadata/test`;RESULT=
: Error: Execution, code: 1060
:1:12: Error: Executing DROP TABLE
: Error: Access denied., code: 2018
: Error: Access denied for root@builtin on path /Root/.metadata/test, code: 200000 ;EXPECTATION=0 FINISHED_REQUEST=DROP TABLE `/Root/.metadata/test`;EXPECTATION=0;WAITING=1 2025-06-30T09:25:39.936081Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715681. Ctx: { TraceId: 01jz02g3e26fm1k294rkvg1fsx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWE1N2ZmZTQtNTMxNmFlMTItOGEzYTVkYmQtMjFmNDAzNmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=DELETE FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=0;WAITING=1 REQUEST=DELETE FROM `/Root/.metadata/initialization/migrations`;RESULT=
: Fatal: ydb/core/kqp/host/kqp_host.cpp:979 ExecuteDataQuery(): requirement false failed, message: Unexpected query type for execute script action: Ddl, code: 1 ;EXPECTATION=0 FINISHED_REQUEST=DELETE FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=0;WAITING=1 REQUEST=DROP TABLE `/Root/.metadata/initialization/migrations`;EXPECTATION=0;WAITING=1 2025-06-30T09:26:00.822659Z node 1 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [1:1462:3106] txid# 281474976715686, Access denied for root@builtin on path /Root/.metadata/initialization/migrations, with access RemoveSchema 2025-06-30T09:26:00.822790Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1462:3106] txid# 281474976715686, issues: { message: "Access denied for root@builtin on path /Root/.metadata/initialization/migrations" issue_code: 200000 severity: 1 } REQUEST=DROP TABLE `/Root/.metadata/initialization/migrations`;RESULT=
: Error: Execution, code: 1060
:1:12: Error: Executing DROP TABLE
: Error: Access denied., code: 2018
: Error: Access denied for root@builtin on path /Root/.metadata/initialization/migrations, code: 200000 ;EXPECTATION=0 FINISHED_REQUEST=DROP TABLE `/Root/.metadata/initialization/migrations`;EXPECTATION=0;WAITING=1 |84.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |84.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |84.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats >> KqpKv::ReadRows_PgValue [GOOD] >> KqpKv::ReadRows_PgKey >> KqpNewEngine::PushFlatmapInnerConnectionsToStageInput [GOOD] >> KqpNewEngine::PushPureFlatmapInnerConnectionsToStage >> KqpNewEngine::DqSourceLimit [GOOD] |84.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::Booting [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandom-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 25624, MsgBus: 25995 2025-06-30T09:25:44.066754Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671191332936066:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:44.068499Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d73/r3tmp/tmp5Lq9ot/pdisk_1.dat 2025-06-30T09:25:44.143372Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:44.143451Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671191332936032:2080] 1751275544065785 != 1751275544065788 TServer::EnableGrpc on GrpcPort 25624, node 1 2025-06-30T09:25:44.162943Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:44.162960Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:44.162963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:44.163010Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25995 TClient is connected to server localhost:25995 2025-06-30T09:25:44.215682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:44.215711Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:44.217685Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:44.247141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:25:44.259937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.331174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.362818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.430953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.547757Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671191332937637:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.547796Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.623688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.683492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.695491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.708692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.720683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.735260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.748700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.776123Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671191332938293:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.776159Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.776299Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671191332938298:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.777650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:44.781608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:44.781713Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671191332938300:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:44.856399Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671191332938351:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:45.069712Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 31758, MsgBus: 21148 2025-06-30T09:25:45.639381Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671195470819153:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:45.639399Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d73/r3tmp/tmpx8umnI/pdisk_1.dat 2025-06-30T09:25:45.655799Z node 2 :IMPORT WARN: schemeshard_imp ... Changes), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } [[["ab05a4f2-8614-4d44-a8d7-c31cbed09840"]];[["bad9cd36-d7e2-4145-9059-f30f9edf694b"]]] [[["ab05a4f2-8614-4d44-a8d7-c31cbed09840"]];[["bad9cd36-d7e2-4145-9059-f30f9edf694b"]]] [["93b32400-2c14-4438-90a2-732922c4c12e"];["b3b4f52b-fb31-4eac-97ee-f6aaa52d93f4"]] Trying to start YDB, gRPC: 21172, MsgBus: 30166 2025-06-30T09:26:00.482291Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7521671258415235060:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:00.483305Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d73/r3tmp/tmpZr0Esx/pdisk_1.dat 2025-06-30T09:26:00.503061Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21172, node 12 2025-06-30T09:26:00.535771Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:00.535786Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:00.535789Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:00.535839Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30166 2025-06-30T09:26:00.582831Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:00.582880Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:00.584023Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30166 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:00.630676Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:00.635453Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:00.645090Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:00.656859Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:00.683695Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.704268Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:00.875029Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7521671258415236604:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.875058Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.886270Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.902395Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.915335Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.932305Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.944712Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.964540Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.980114Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.019845Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7521671262710204558:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:01.019881Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:01.020057Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7521671262710204563:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:01.020962Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:01.024253Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:26:01.024336Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7521671262710204565:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:01.084743Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7521671262710204616:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:01.484269Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; [[["9c3a47ef-d1a4-4605-a390-c373b857028b"]];[["c467efd1-7792-40bc-938b-7c5599f669d6"]]] [[["9c3a47ef-d1a4-4605-a390-c373b857028b"]];[["c467efd1-7792-40bc-938b-7c5599f669d6"]]] [["52219ad6-1824-4579-83d1-8ecdf05d275d"];["8d52390b-308f-4bbc-9989-2ce48a60a7ae"]] >> KqpOlapJson::OrFilterVariants[2,true,0,0,0,0] >> KqpExtractPredicateLookup::SimpleRange [GOOD] >> KqpExtractPredicateLookup::PointJoin >> KqpNewEngine::JoinDictWithPure [GOOD] >> KqpNewEngine::IdxLookupExtractMembers >> KqpSqlIn::SelectNotAllElements [GOOD] >> KqpSqlIn::SecondaryIndex_SimpleKey_In_And >> KqpOlapJson::EmptyVariants[1,true,1024,10,100,0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::BloomCategoryIndexesVariants[false,false,1024,10,0,0.5,BLOOM_FILTER] [GOOD] Test command err: Trying to start YDB, gRPC: 25404, MsgBus: 16460 2025-06-30T09:25:41.633426Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671176436009572:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:41.634928Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003ba8/r3tmp/tmpK3a1N4/pdisk_1.dat 2025-06-30T09:25:41.723335Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:41.723523Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671176436009379:2080] 1751275541630722 != 1751275541630725 TServer::EnableGrpc on GrpcPort 25404, node 1 2025-06-30T09:25:41.736816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:41.736833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:41.736837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:41.736891Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16460 2025-06-30T09:25:41.791119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:41.791158Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:41.792655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16460 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:41.833107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:41.842021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2); 2025-06-30T09:25:42.108612Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671180730977298:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.108665Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:42.153799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:25:42.164921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671180730977369:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:42.164997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671180730977369:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:25:42.165045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671180730977369:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:25:42.165065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671180730977369:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:25:42.165081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671180730977369:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:25:42.165098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671180730977369:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:25:42.165116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671180730977369:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:25:42.165137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671180730977369:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:25:42.165154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671180730977369:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:25:42.165173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671180730977369:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:25:42.165189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671180730977369:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:25:42.165209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671180730977369:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:25:42.165748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:25:42.165761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:25:42.165781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:25:42.165788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:25:42.165801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:25:42.165807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:25:42.165815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:25:42.165817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:25:42.165823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:25:42.165828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:25:42.165846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:25:42.165853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:25:42.165872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:25:42.165884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:25:42.165892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:25:42.165899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_swi ... pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.881875Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671253737979399:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.882931Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:59.888781Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671253737979401:2324], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:25:59.966522Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671253737979452:2454] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:59.991511Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;local_tx_no=8;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037889;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:59.991516Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=8;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:25:59.998542Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:25:59.998769Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(11u, JsonDocument('{"a.b.c" : "1a1"}')), (12u, JsonDocument('{"a.b.c" : "1a2"}')), (13u, JsonDocument('{"b.c.d" : "1b3"}')), (14u, JsonDocument('{"b.c.d" : "1b4", "a" : "a4"}')) 2025-06-30T09:26:00.043454Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=16;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:00.045677Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;local_tx_no=16;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037889;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:00.055780Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715666;tx_id=281474976715666;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715666; 2025-06-30T09:26:00.056108Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715666;tx_id=281474976715666;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715666; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_INDEX, NAME=a_index, TYPE=BLOOM_FILTER, FEATURES=`{"column_name" : "Col2", "false_positive_probability" : 0.01}`) 2025-06-30T09:26:00.068279Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:00.071380Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-30T09:26:00.071650Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1) VALUES(10u) 2025-06-30T09:26:00.097304Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=26;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:00.102313Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-30T09:26:00.120010Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:00.123011Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715670;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715670; 2025-06-30T09:26:00.123207Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715670;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715670; waiting actualization: 5/0.000014s 2025-06-30T09:26:00.299826Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:00.803048Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521671253737979224:2294];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037888;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:00.803586Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521671253737979224:2294];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:00.810719Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;task_id=3c8f5094-559411f0-9330a565-d38cd3fb;tablet_id=72075186224037888;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:00.817831Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[7:7521671253737979236:2295];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037889;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:00.818222Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[7:7521671253737979236:2295];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:00.821363Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;task_id=3c9191ec-559411f0-9d72f52b-3d2752dc;tablet_id=72075186224037889;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"a.b.c\"") = "a1" ORDER BY Col1; COMPARE: [[1u;["{\"a.b.c\":\"a1\"}"]]] OUTPUT: [[1u;["{\"a.b.c\":\"a1\"}"]]] INDEX:0/4/1 HEADER:0/0/0 EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=DROP_INDEX, NAME=a_index) 2025-06-30T09:26:01.297201Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:01.303672Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:26:01.303741Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"a.b.c\"") = "1a1" ORDER BY Col1; COMPARE: [[11u;["{\"a.b.c\":\"1a1\"}"]]] OUTPUT: [[11u;["{\"a.b.c\":\"1a1\"}"]]] INDEX:0/4/1 HEADER:0/0/0 EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_INDEX, NAME=b_index, TYPE=CATEGORY_BLOOM_FILTER, FEATURES=`{"column_name" : "Col2", "false_positive_probability" : 0.01}`) 2025-06-30T09:26:01.403883Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:01.407185Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715674; 2025-06-30T09:26:01.407400Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715674; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") = "1b4" ORDER BY Col1; COMPARE: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]] OUTPUT: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]] INDEX:0/4/1 HEADER:0/0/0 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "1b5" ORDER BY Col1; COMPARE: [] OUTPUT: [] INDEX:0/5/0 HEADER:0/0/0 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "a4" ORDER BY Col1; COMPARE: [[4u;["{\"a\":\"a4\",\"b.c.d\":\"b4\"}"]];[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]] OUTPUT: [[4u;["{\"a\":\"a4\",\"b.c.d\":\"b4\"}"]];[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]] INDEX:0/3/2 HEADER:0/0/0 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d111\"") = "1b5" ORDER BY Col1; COMPARE: [] OUTPUT: [] INDEX:0/5/0 HEADER:0/0/0 >> KqpSqlIn::TableSource [GOOD] >> KqpSqlIn::SimpleKey_Negated >> TYdbControlPlaneStorageListBindings::ShouldCheckFilterByConnectionId [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCombineFilters ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::DqSourceLimit [GOOD] Test command err: Trying to start YDB, gRPC: 62864, MsgBus: 8020 2025-06-30T09:25:51.250949Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671220338507054:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:51.253397Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001930/r3tmp/tmpDHBdWp/pdisk_1.dat 2025-06-30T09:25:51.487441Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:51.499789Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:51.499824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:51.511234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62864, node 1 2025-06-30T09:25:51.558901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:51.558914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:51.558916Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:51.558964Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8020 TClient is connected to server localhost:8020 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:51.755895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:51.759926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:51.772330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:51.833042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:51.876263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:51.912515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:52.161295Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671224633475893:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.161328Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.189735Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:52.284666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:52.308119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:52.331686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:52.351652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:52.368781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:52.392962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:52.417453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:52.453446Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671224633476558:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.453480Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.453771Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671224633476563:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.454903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:52.459190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:52.459292Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671224633476565:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:52.520155Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671224633476616:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 20700, MsgBus: 20709 2025-06-30T09:25:53.240762Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671229102862522:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:53.240840Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001930/r3tmp/tmphhs1HP/pdisk_1.dat 2025-06-30T09:25:53.271878Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Tabl ... led at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:00.078045Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671259816434316:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:00.160704Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671259816434367:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:00.456959Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 1194, MsgBus: 23286 2025-06-30T09:26:00.845129Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671260410967286:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:00.845153Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001930/r3tmp/tmp8rC9uF/pdisk_1.dat 2025-06-30T09:26:00.875370Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1194, node 7 2025-06-30T09:26:00.887728Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:00.887741Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:00.887743Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:00.887802Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23286 TClient is connected to server localhost:23286 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:00.955563Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:00.955597Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting waiting... 2025-06-30T09:26:00.956192Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:00.957485Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:26:00.970029Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:00.997819Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:01.053358Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:01.079613Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.395510Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671264705936136:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:01.395547Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:01.408009Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.429197Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.450068Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.465325Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.488010Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.504724Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.520113Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.541560Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671264705936789:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:01.541587Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:01.541776Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671264705936794:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:01.542833Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:01.547191Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671264705936796:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:01.618547Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671264705936847:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:01.844824Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpNewEngine::ScalarMultiUsage [GOOD] >> KqpNewEngine::SequentialReadsPragma+Enabled >> KqpNewEngine::DeleteWithBuiltin-UseSink [GOOD] >> KqpNewEngine::DeleteON >> KqpReturning::ReturningWorksIndexedDeleteV2+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedDeleteV2-QueryService >> KqpKv::ReadRows_PgKey [GOOD] >> KqpKv::ReadRows_Nulls >> KqpNewEngine::StaleRO+EnableFollowers [GOOD] >> KqpNewEngine::StaleRO-EnableFollowers >> KqpNewEngine::PushPureFlatmapInnerConnectionsToStage [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[without_queues-tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex-UseSink+UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex+UseSink+UseDataQuery >> KqpRanges::UpdateWhereInMultipleUpdate [GOOD] >> KqpRanges::UpdateWhereInFullScan+UseSink |84.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> KqpNewEngine::IdxLookupExtractMembers [GOOD] >> KqpOlapJson::OrFilterVariants[2,true,0,0,0,0] [GOOD] >> KqpOlapJson::OrFilterVariants[2,true,0,0,0,0.5] >> KqpKv::ReadRows_Nulls [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PushPureFlatmapInnerConnectionsToStage [GOOD] Test command err: Trying to start YDB, gRPC: 31889, MsgBus: 63696 2025-06-30T09:25:53.087355Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671230243823837:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:53.087626Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001865/r3tmp/tmpjJ4eS7/pdisk_1.dat 2025-06-30T09:25:53.196884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:53.196919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:53.199075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:53.204861Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31889, node 1 2025-06-30T09:25:53.242968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:53.242985Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:53.242987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:53.243045Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63696 TClient is connected to server localhost:63696 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:53.432463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:53.441171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:53.451549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.528834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.559896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.573404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.615388Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671230243825323:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.615420Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.675489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.685920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.698318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.708433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.765684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.778901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.794595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.812227Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671230243825976:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.812256Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.812266Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671230243825981:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.813182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:53.819182Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671230243825983:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:53.885198Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671230243826034:3402] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:54.091360Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 19678, MsgBus: 7210 2025-06-30T09:25:54.568177Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671234633886904:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:54.568242Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001865/r3tmp/tmpmqKcsU/pdisk_1.dat 2025-06-30T09:25:54.645333Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521671234633886806:2080] 1751275554563931 != 1751275554563934 2025-06-30T09:25:54.651773Z node 2 :IMPORT WARN: schemeshard_impo ... ons_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:01.473586Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671263988439615:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:01.536291Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671263988439666:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:01.790302Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 64015, MsgBus: 30830 2025-06-30T09:26:02.215797Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671267014908699:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:02.216704Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001865/r3tmp/tmpq8Gxsm/pdisk_1.dat 2025-06-30T09:26:02.234253Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64015, node 7 2025-06-30T09:26:02.242629Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:02.242647Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:02.242649Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:02.242703Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30830 TClient is connected to server localhost:30830 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:26:02.313808Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:02.313841Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:02.316550Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:02.319880Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:02.327147Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:02.359623Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:02.400826Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.427716Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:02.497977Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:02.647114Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671267014910251:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:02.647144Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:02.654761Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.678843Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.689099Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.703088Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.760928Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.776108Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.837043Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.864663Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671267014910909:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:02.864693Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:02.864790Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671267014910914:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:02.865701Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:02.873338Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671267014910916:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:02.953009Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671267014910967:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } |84.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] [GOOD] >> KqpLimits::CancelAfterRwTx+useSink [GOOD] >> KqpLimits::CancelAfterRwTx-useSink |84.4%| [TA] $(B)/ydb/services/metadata/initializer/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpNewEngine::SequentialReadsPragma+Enabled [GOOD] >> KqpNewEngine::SequentialReadsPragma-Enabled >> KqpSqlIn::SecondaryIndex_SimpleKey_In_And [GOOD] >> KqpSqlIn::SecondaryIndex_SimpleKey_In_And_In >> KqpSqlIn::SimpleKey_Negated [GOOD] >> KqpSqlIn::TupleParameter ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::IdxLookupExtractMembers [GOOD] Test command err: Trying to start YDB, gRPC: 24708, MsgBus: 22276 2025-06-30T09:25:53.083839Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671227370986526:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:53.085566Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001827/r3tmp/tmpZkBnZ3/pdisk_1.dat 2025-06-30T09:25:53.149738Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:53.164281Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 24708, node 1 2025-06-30T09:25:53.184746Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:53.184757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:53.184759Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:53.184801Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:53.185005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:53.185048Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:53.186036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22276 TClient is connected to server localhost:22276 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:53.286399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:53.289849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:25:53.297910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.325127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.356589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.375992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.672957Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671227370988075:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.672995Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.727666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.734999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.790365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.799299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.805683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.819666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.834193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.852937Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671227370988728:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.852963Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.853087Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671227370988733:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.853967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:53.861115Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671227370988735:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:53.945912Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671227370988786:3401] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:54.087362Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 16131, MsgBus: 32121 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001827/r3tmp/tmpHP2SFP/pdisk_1.dat 2025-06-30T09:25:54.702601Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671234315657940:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:54.704483Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:54.723014Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc ... 6715669 completed, doublechecking } 2025-06-30T09:26:01.990265Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671264198068612:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:02.268485Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 11004, MsgBus: 22334 2025-06-30T09:26:02.671173Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671266207349409:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:02.671464Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001827/r3tmp/tmpptFL0l/pdisk_1.dat 2025-06-30T09:26:02.688544Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11004, node 7 2025-06-30T09:26:02.702963Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:02.702978Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:02.702980Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:02.703035Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22334 TClient is connected to server localhost:22334 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:02.771888Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:02.771923Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:02.772330Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:02.773002Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:02.774825Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:02.779258Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:02.801723Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:02.833250Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:02.849462Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:03.181791Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671270502318252:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:03.181819Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:03.195793Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:03.207909Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:03.222634Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:03.236949Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:03.249758Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:03.265527Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:03.283423Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:03.302167Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671270502318906:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:03.302200Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:03.302285Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671270502318911:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:03.303424Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:03.306389Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:26:03.306633Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671270502318913:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:03.390367Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671270502318964:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:03.675378Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |84.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction >> KqpOlapJson::RestoreFullJsonVariants[1,true,1024,0,0,0] >> KqpSort::TopSortResults [GOOD] >> KqpSort::UnionAllSortLimit >> KqpNewEngine::DeleteON [GOOD] >> KqpNewEngine::DeleteWithInputMultiConsumption+UseSink |84.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |84.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |84.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |84.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |84.4%| [TA] {RESULT} $(B)/ydb/services/metadata/initializer/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpKv::ReadRows_Nulls [GOOD] Test command err: Trying to start YDB, gRPC: 17283, MsgBus: 30134 2025-06-30T09:25:44.323256Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671190660084165:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:44.323290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d37/r3tmp/tmpTT1ga2/pdisk_1.dat 2025-06-30T09:25:44.410663Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17283, node 1 2025-06-30T09:25:44.450958Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:44.450974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:44.450976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:44.451018Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:44.478020Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:44.478063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:44.482647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30134 TClient is connected to server localhost:30134 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:44.553672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:44.557577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:44.827334Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671190660084751:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.827365Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.880331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) IsSuccess(): 1 GetStatus(): SUCCESS Trying to start YDB, gRPC: 7858, MsgBus: 62618 2025-06-30T09:25:45.246396Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671192763096747:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:45.248224Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d37/r3tmp/tmpKGupbr/pdisk_1.dat 2025-06-30T09:25:45.275745Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7858, node 2 2025-06-30T09:25:45.305773Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:45.305786Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:45.305788Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:45.305845Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62618 2025-06-30T09:25:45.355151Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:45.355184Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:45.360388Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62618 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:45.374072Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:45.375803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:45.699025Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671192763097323:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:45.699087Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:45.700618Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) IsSuccess(): 1 GetStatus(): SUCCESS [] IsSuccess(): 1 GetStatus(): SUCCESS 2025-06-30T09:25:45.732230Z node 2 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: no keys are found in request's proto Trying to start YDB, gRPC: 1393, MsgBus: 30008 2025-06-30T09:25:46.018463Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7521671198798696202:2246];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d37/r3tmp/tmpMYuIfa/pdisk_1.dat 2025-06-30T09:25:46.020885Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:46.043643Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1393, node 3 2025-06-30T09:25:46.070953Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:46.070967Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:46.070968Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:46.071009Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30008 2025-06-30T09:25:46.116709Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:46.116743Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:46.118405Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30008 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".s ... er/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.828741Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715756:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.829748Z node 6 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037936 not found 2025-06-30T09:26:01.859772Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715758:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.864047Z node 6 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037937 not found 2025-06-30T09:26:01.888112Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.894863Z node 6 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037938 not found 2025-06-30T09:26:01.975849Z node 6 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037939 not found Trying to start YDB, gRPC: 26449, MsgBus: 25380 2025-06-30T09:26:02.255732Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671268967815470:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:02.255761Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d37/r3tmp/tmpEGBmzI/pdisk_1.dat 2025-06-30T09:26:02.282005Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26449, node 7 2025-06-30T09:26:02.303507Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:02.303521Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:02.303523Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:02.303571Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25380 2025-06-30T09:26:02.362126Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:02.362176Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:25380 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:26:02.364962Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:02.378558Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:02.383398Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:02.675804Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.809669Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.827233Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-06-30T09:26:02.848038Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.855144Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-06-30T09:26:02.881233Z node 7 :HIVE WARN: hive_impl.cpp:495: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found Trying to start YDB, gRPC: 28358, MsgBus: 30090 2025-06-30T09:26:03.235355Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7521671271896354444:2246];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:03.237668Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d37/r3tmp/tmpHi3Wss/pdisk_1.dat 2025-06-30T09:26:03.250723Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28358, node 8 2025-06-30T09:26:03.267039Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:03.267049Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:03.267050Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:03.267084Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30090 TClient is connected to server localhost:30090 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:03.321012Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:03.323025Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:03.338178Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:03.338215Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:03.339422Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:03.797498Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7521671271896354818:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:03.797533Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:03.801703Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) IsSuccess(): 1 GetStatus(): SUCCESS >> test_create_tablets.py::TestHive::test_when_create_tablets_after_bs_groups_and_kill_hive_then_tablets_start >> KqpOlapJson::EmptyVariants[1,true,1024,10,100,0] [GOOD] >> KqpOlapJson::EmptyVariants[1,true,1024,10,100,0.5] |84.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |84.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |84.4%| [LD] {RESULT} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |84.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue_batch[tables_format_v1] >> KqpReturning::ReturningWorksIndexedDeleteV2-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedInsert+QueryService >> KqpOlapJson::OrFilterVariants[2,true,0,0,0,0.5] [GOOD] >> KqpOlapJson::OrFilterVariants[2,true,0,0,100,0] |84.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> KqpRanges::UpdateWhereInFullScan+UseSink [GOOD] >> KqpRanges::UpdateWhereInFullScan-UseSink >> KqpNewEngine::SequentialReadsPragma-Enabled [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_read_message[std] >> KqpOlapJson::RestoreFullJsonVariants[1,true,1024,0,0,0] [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[1,true,1024,0,0,0.5] |84.4%| [TA] $(B)/ydb/core/backup/impl/ut_local_partition_reader/test-results/unittest/{meta.json ... results_accumulator.log} >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v0-fifo] >> KqpSort::UnionAllSortLimit [GOOD] >> KqpNewEngine::DeleteWithInputMultiConsumption+UseSink [GOOD] >> KqpNewEngine::DeleteWithInputMultiConsumption-UseSink >> TYdbControlPlaneStorageListBindings::ShouldCombineFilters [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_deduplication_table[tables_format_v0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::SequentialReadsPragma-Enabled [GOOD] Test command err: Trying to start YDB, gRPC: 17105, MsgBus: 5385 2025-06-30T09:25:53.984465Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671229958373718:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:53.984725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0017ca/r3tmp/tmpKpDIpw/pdisk_1.dat 2025-06-30T09:25:54.093471Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:54.104276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:54.104306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:54.107511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17105, node 1 2025-06-30T09:25:54.158973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:54.158991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:54.158993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:54.159038Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5385 TClient is connected to server localhost:5385 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:54.305514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:54.326927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:54.518139Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671234253341588:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.518170Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.518357Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671234253341600:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.519336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:54.522442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:25:54.522520Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671234253341602:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:25:54.620018Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671234253341653:2328] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 19366, MsgBus: 3225 2025-06-30T09:25:55.107708Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671237751784917:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:55.109394Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0017ca/r3tmp/tmp7vqwzD/pdisk_1.dat 2025-06-30T09:25:55.123175Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19366, node 2 2025-06-30T09:25:55.143879Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:55.143896Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:55.143898Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:55.143945Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3225 TClient is connected to server localhost:3225 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:55.207092Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:55.207132Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:55.207451Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:55.208159Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:25:55.210016Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:55.215039Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:55.248109Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:55.269204Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:55.285164Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:55.549169Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671237751786454:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:55.549199Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:55.562318Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 2814 ... WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671273419063498:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:03.931539Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671273419063549:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:04.170808Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:04.198988Z node 6 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037892 Cancelled read: {[6:7521671277714031144:2479], 2} Trying to start YDB, gRPC: 18583, MsgBus: 23059 2025-06-30T09:26:04.513208Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671276536055839:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:04.515073Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0017ca/r3tmp/tmpyxMEN5/pdisk_1.dat 2025-06-30T09:26:04.541669Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18583, node 7 2025-06-30T09:26:04.551612Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:04.551625Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:04.551628Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:04.551955Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23059 TClient is connected to server localhost:23059 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:04.624153Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:04.624172Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:04.624551Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:04.625160Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:26:04.626145Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:04.632175Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:04.644883Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:04.682410Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:04.752413Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:04.958915Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671276536057401:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:04.958958Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:04.969501Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:04.984462Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:04.994451Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:05.006146Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:05.022068Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:05.035138Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:05.049741Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:05.064858Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671280831025348:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:05.064887Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671280831025353:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:05.064895Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:05.065909Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:05.075546Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671280831025355:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:05.169285Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671280831025406:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> KqpSqlIn::TupleParameter [GOOD] >> KqpSqlIn::TupleLiteral >> KqpOlapJson::OrFilterVariants[2,true,0,0,100,0] [GOOD] >> KqpOlapJson::OrFilterVariants[2,true,0,0,100,0.5] >> KqpSqlIn::SecondaryIndex_SimpleKey_In_And_In [GOOD] >> KqpSqlIn::SecondaryIndex_TupleParameter |84.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut >> SelfHeal::TestOneFaultyMaintenanceRequestOneMaintenanceRequest4Plus2Block [GOOD] >> Shred::Basic >> TPersQueueTest::DisableDeduplication [GOOD] |84.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |84.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |84.4%| [TA] {RESULT} $(B)/ydb/core/backup/impl/ut_local_partition_reader/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [LD] {RESULT} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpSort::UnionAllSortLimit [GOOD] Test command err: Trying to start YDB, gRPC: 23292, MsgBus: 19805 2025-06-30T09:25:54.178608Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671230827018858:2227];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0017a2/r3tmp/tmpnqoY9f/pdisk_1.dat 2025-06-30T09:25:54.190988Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:54.247231Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:54.249283Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671230827018667:2080] 1751275554058929 != 1751275554058932 TServer::EnableGrpc on GrpcPort 23292, node 1 2025-06-30T09:25:54.278956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:54.278969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:54.278973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:54.279018Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:54.299508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:54.299545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:54.307110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19805 TClient is connected to server localhost:19805 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:54.440099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:54.451116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:54.459510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:54.507189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:54.600632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:54.615232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.675262Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671230827020279:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.675290Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.737108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.753347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.767888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.787885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.806667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.836947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.860607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:54.888241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671230827020933:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.888277Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.891414Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671230827020938:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:54.892749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:54.897643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-30T09:25:54.897730Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671230827020940:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:54.972064Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671230827020991:3402] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:55.058814Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 23750, MsgBus: 21149 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0017a2/r3tmp/tmpx3L0ro/pdisk_1.dat 2025-06-30T09:25:55.832735Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:25:55.841958Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles ... ault\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:03.129055Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 30248, MsgBus: 15426 2025-06-30T09:26:04.710111Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671273900552169:2088];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:04.710346Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0017a2/r3tmp/tmpW5J4V4/pdisk_1.dat 2025-06-30T09:26:04.727481Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30248, node 7 2025-06-30T09:26:04.739641Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:04.739655Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:04.739658Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:04.739704Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15426 TClient is connected to server localhost:15426 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:04.814265Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:04.814296Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:04.814625Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:04.815143Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:04.818881Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:04.825174Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:04.840505Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:04.863961Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:04.874987Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:05.110874Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671278195520983:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:05.110924Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:05.119571Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:05.132650Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:05.190977Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:05.203624Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:05.261254Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:05.325726Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:05.349127Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:05.374864Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671278195521644:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:05.374907Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:05.375042Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671278195521649:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:05.379818Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:05.383260Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:26:05.383331Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671278195521651:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:05.456526Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671278195521702:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:05.712077Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> AnalyzeColumnshard::AnalyzeServerless [GOOD] >> Shred::Basic [GOOD] >> SnapshotTesting::Compaction >> KqpOlapJson::RestoreFullJsonVariants[1,true,1024,0,0,0.5] [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[1,true,1024,0,100,0] >> ImportBigEncryptedFileTest::ImportBigEncryptedFile [GOOD] >> test_queue_counters.py::TestSqsGettingCounters::test_counters_when_sending_duplicates >> KqpOlapJson::FilterVariants[2,true,0,0,1000000,0] >> KqpOlapJson::EmptyVariants[1,true,1024,10,100,0.5] [GOOD] >> KqpOlapJson::EmptyVariants[1,true,1024,10,1000000,0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeServerless [GOOD] Test command err: 2025-06-30T09:23:17.910147Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:420:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:23:17.910202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:23:17.910214Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0045a6/r3tmp/tmpjx8nt4/pdisk_1.dat 2025-06-30T09:23:18.003024Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10750, node 1 2025-06-30T09:23:18.170968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:18.171002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:18.171009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:18.171277Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:23:18.172208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:23:18.288424Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:18.288457Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:18.300649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6926 2025-06-30T09:23:18.706478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:23:19.692197Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-30T09:23:19.712366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:19.712423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:19.771924Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:23:19.772634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:19.950861Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:19.973941Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.974078Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.974183Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.974239Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.974276Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.974291Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.974302Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.974317Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:19.974330Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:23:20.126867Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:20.126918Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:20.139034Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:20.185032Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:20.197453Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-30T09:23:20.197493Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-30T09:23:20.208511Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-30T09:23:20.208786Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-30T09:23:20.208813Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-30T09:23:20.208820Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-30T09:23:20.208826Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-30T09:23:20.208833Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-30T09:23:20.208838Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-30T09:23:20.208846Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-30T09:23:20.208983Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-30T09:23:20.226282Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7960: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:23:20.226318Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7990: ConnectToSA(), pipe client id: [2:1794:2561], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:23:20.228140Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1804:2570] 2025-06-30T09:23:20.229326Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1829:2582] 2025-06-30T09:23:20.229432Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1829:2582], schemeshard id = 72075186224037897 2025-06-30T09:23:20.231588Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-30T09:23:20.236950Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-30T09:23:20.236978Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-30T09:23:20.236992Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-30T09:23:20.240770Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:23:20.242884Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-30T09:23:20.242920Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-30T09:23:20.352229Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-30T09:23:20.466318Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-30T09:23:20.531289Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-30T09:23:21.271094Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:23:21.300442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:23:21.889354Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:21.990039Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7905: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-30T09:23:21.990082Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7921: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-30T09:23:21.990094Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7990: ConnectToSA(), pipe client id: [2:2505:2904], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-30T09:23:21.990503Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2513:2908] 2025-06-30T09:23:21.990530Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2513:2908], schemeshard id = 72075186224037899 2025-06-30T09:23:22.969515Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2633:3201], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:22.969561Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06- ... ice] [TPoolFetcherActor] ActorId: [2:8244:6068], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:02.926967Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:8254:6073], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:02.927249Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Shared, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:02.931465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:02.965639Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:8258:6076], DatabaseId: /Root/Shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-30T09:26:03.172364Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:8356:6124] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:03.188133Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8385:6139]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-30T09:26:03.188240Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-30T09:26:03.188258Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8387:6141] 2025-06-30T09:26:03.188276Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8387:6141] 2025-06-30T09:26:03.188428Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8388:6142] 2025-06-30T09:26:03.188502Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8387:6141], server id = [2:8388:6142], tablet id = 72075186224037894, status = OK 2025-06-30T09:26:03.188521Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8388:6142], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-30T09:26:03.188545Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-30T09:26:03.188590Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-30T09:26:03.188609Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8385:6139], StatRequests.size() = 1 2025-06-30T09:26:03.220367Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OGY1YzE0Yi1hNGYxYjY4ZS01MmViZjUyYS1hZGI5NTJhMQ==, TxId: 2025-06-30T09:26:03.220404Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OGY1YzE0Yi1hNGYxYjY4ZS01MmViZjUyYS1hZGI5NTJhMQ==, TxId: 2025-06-30T09:26:03.220613Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-30T09:26:03.235634Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-30T09:26:03.235672Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-30T09:26:03.293286Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-30T09:26:03.293333Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-30T09:26:03.367610Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8387:6141], schemeshard count = 1 2025-06-30T09:26:03.635205Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8087: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-30T09:26:03.635249Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7930: Schedule next SendBaseStatsToSA in 196.000000s, at schemeshard: 72075186224037899 2025-06-30T09:26:03.635359Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 28 2025-06-30T09:26:03.655489Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-30T09:26:04.605961Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-30T09:26:04.606011Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-30T09:26:04.607176Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-30T09:26:04.619801Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-30T09:26:04.620016Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-30T09:26:04.620029Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037899, LocalPathId: 2], AnalyzedShards 1 2025-06-30T09:26:04.633232Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-30T09:26:04.659203Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-30T09:26:04.659658Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-30T09:26:04.659709Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-30T09:26:04.676279Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-30T09:26:06.000822Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-30T09:26:06.000900Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-30T09:26:06.000907Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-30T09:26:06.001140Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-30T09:26:06.018351Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-30T09:26:06.018558Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-30T09:26:06.018581Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-30T09:26:06.018907Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-30T09:26:06.030537Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-30T09:26:06.030631Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-30T09:26:06.030846Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8514:6223], server id = [2:8515:6224], tablet id = 72075186224037905, status = OK 2025-06-30T09:26:06.030892Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8514:6223], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-30T09:26:06.032153Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-30T09:26:06.032177Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-30T09:26:06.032262Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-30T09:26:06.032301Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-30T09:26:06.032410Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-30T09:26:06.033033Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8514:6223], server id = [2:8515:6224], tablet id = 72075186224037905 2025-06-30T09:26:06.033043Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-30T09:26:06.033274Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-30T09:26:06.045072Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8535:6243]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-30T09:26:06.045177Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-30T09:26:06.045186Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8535:6243], StatRequests.size() = 1 2025-06-30T09:26:06.084448Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzJjYTlkMTUtZDJiMWFiNDUtMWJiMWQyNGEtYTMyYzAyZjE=, TxId: 2025-06-30T09:26:06.084488Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzJjYTlkMTUtZDJiMWFiNDUtMWJiMWQyNGEtYTMyYzAyZjE=, TxId: 2025-06-30T09:26:06.084708Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-30T09:26:06.096756Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-30T09:26:06.096794Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3556:3475] >> TxUsage::WriteToTopic_Demo_24_Table >> KqpNewEngine::DeleteWithInputMultiConsumption-UseSink [GOOD] >> KqpReturning::ReturningWorksIndexedInsert+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedInsert-QueryService |84.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> KqpRanges::UpdateWhereInFullScan-UseSink [GOOD] >> KqpRanges::ScanKeyPrefix >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Table >> ListObjectsInS3Export::ExportWithSchemaMapping >> KqpOlapJson::OrFilterVariants[2,true,0,0,100,0.5] [GOOD] >> KqpOlapJson::OrFilterVariants[2,false,1024,1000,1000000,0] |84.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> KqpOlapJson::RestoreFullJsonVariants[1,true,1024,0,100,0] [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[1,true,1024,0,100,0.5] >> TxUsage::WriteToTopic_Demo_18_RestartNo_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::DeleteWithInputMultiConsumption-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 7360, MsgBus: 9310 2025-06-30T09:25:58.143430Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671248932172035:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:58.143443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001793/r3tmp/tmpFwH0ws/pdisk_1.dat 2025-06-30T09:25:58.216749Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7360, node 1 2025-06-30T09:25:58.242424Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:58.242442Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:58.242444Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:58.242483Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:58.246637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:58.246668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:58.248023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9310 TClient is connected to server localhost:9310 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:58.330864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:58.333731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:25:58.336009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.386171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:58.434757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:58.495791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:58.659082Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671248932173600:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.659146Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.740918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.769180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.788097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.810244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.826977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.846789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.872422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.938527Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671248932174254:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.938550Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.938678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671248932174259:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.939516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:58.943634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:58.943733Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671248932174261:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:59.045875Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671253227141611:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:59.146108Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 23335, MsgBus: 27834 2025-06-30T09:25:59.457008Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671256301918201:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:59.461681Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001793/r3tmp/tmptElpY7/pdisk_1.dat 2025-06-30T09:25:59.471137Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table ... helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671279860181891:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:05.497333Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671279860181942:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:05.773352Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 20740, MsgBus: 6575 2025-06-30T09:26:06.321901Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671284566282784:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:06.321919Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001793/r3tmp/tmp7Pncsy/pdisk_1.dat 2025-06-30T09:26:06.344479Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20740, node 7 2025-06-30T09:26:06.366383Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:06.366401Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:06.366404Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:06.366466Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6575 2025-06-30T09:26:06.422349Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:06.422384Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:06.423353Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6575 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:06.447208Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:06.455233Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:06.467846Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:06.503807Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:06.540865Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:06.566030Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:06.732981Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671284566284354:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:06.733015Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:06.744836Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:06.780939Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:06.799753Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:06.823493Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:06.843084Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:06.866170Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:06.880584Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:06.898833Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671284566285006:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:06.898861Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:06.899012Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671284566285011:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:06.900152Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:06.904485Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671284566285013:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:06.985681Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671284566285064:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:07.323725Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpSqlIn::TupleLiteral [GOOD] >> KqpSqlIn::TupleSelect >> KqpOlapJson::SimpleVariants[1,true,1024,1000,1000000,0] >> KqpOlapJson::FilterVariants[2,true,0,0,1000000,0] [GOOD] >> KqpOlapJson::FilterVariants[2,true,0,0,1000000,0.5] >> KqpSqlIn::SecondaryIndex_TupleParameter [GOOD] >> KqpSqlIn::SecondaryIndex_TupleLiteral ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::DisableDeduplication [GOOD] Test command err: === Server->StartServer(false); 2025-06-30T09:22:52.982099Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670453197543944:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:52.982131Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003047/r3tmp/tmpy0kTrB/pdisk_1.dat 2025-06-30T09:22:53.027356Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:22:53.044065Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:53.047068Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521670453197543914:2080] 1751275372981857 != 1751275372981860 TServer::EnableGrpc on GrpcPort 61985, node 1 2025-06-30T09:22:53.062005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/003047/r3tmp/yandexsXk5C5.tmp 2025-06-30T09:22:53.062019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/003047/r3tmp/yandexsXk5C5.tmp 2025-06-30T09:22:53.062129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/003047/r3tmp/yandexsXk5C5.tmp 2025-06-30T09:22:53.062181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:53.069008Z INFO: TTestServer started on Port 12432 GrpcPort 61985 TClient is connected to server localhost:12432 PQClient connected to localhost:61985 === TenantModeEnabled() = 0 === Init PQ - start server on port 61985 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:22:53.085309Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:53.085355Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:53.086472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:53.134911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:22:53.134965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:22:53.135025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-30T09:22:53.135033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-30T09:22:53.135090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:22:53.135132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:53.135358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-30T09:22:53.135392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-30T09:22:53.135422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:22:53.135430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-30T09:22:53.135432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-30T09:22:53.135435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715657:0 2 -> 3 2025-06-30T09:22:53.135540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:22:53.135548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-30T09:22:53.135549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715657:0 3 -> 128 2025-06-30T09:22:53.135575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:22:53.135577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:22:53.135580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-30T09:22:53.135583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-30T09:22:53.136144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:53.136266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-30T09:22:53.136319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 waiting... 2025-06-30T09:22:53.136842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 1751275373183, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-30T09:22:53.136878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1751275373183 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-30T09:22:53.136889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-30T09:22:53.136968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 281474976715657:0 128 -> 240 2025-06-30T09:22:53.136979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-30T09:22:53.137012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-30T09:22:53.137026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-30T09:22:53.137130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-30T09:22:53.137143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-30T09:22:53.137197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-30T09:22:53.137208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7521670457492511694:2236], at schemeshard: 72057594046644480, txId: 281474976715657, path id: 1 2025-06-30T09:22:53.137219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-30T09:22:53.137232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715657:0 ProgressState 2025-06-30T09:22:53.137245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715657:0 progress is 1/1 2025-06-30T09:22:53.137256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 1/1 2025-06-30T09:22:53.137261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715657:0 progress is 1/1 2025-06-30T09:22:53.137262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp ... : 1 WriteTimestampMS: 1751275565916 CreateTimestampMS: 1751275565911 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 99 bytes ..." SourceId: "" SeqNo: 2 WriteTimestampMS: 1751275565916 CreateTimestampMS: 1751275565911 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 99 bytes ..." SourceId: "" SeqNo: 3 WriteTimestampMS: 1751275565916 CreateTimestampMS: 1751275565911 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 1 SizeLag: 7 RealReadOffset: 2 WaitQuotaTimeMs: 0 EndOffset: 3 StartOffset: 0 } Cookie: 0 } 2025-06-30T09:26:06.065087Z node 17 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:3) wait data in partition inited, cookie 1 from offset 3 2025-06-30T09:26:06.065094Z node 17 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:3) EndOffset 3 ReadOffset 3 ReadGuid f3e58aec-b10cbc86-6968a87a-ee8f6115 has messages 1 2025-06-30T09:26:06.065107Z node 17 :PQ_READ_PROXY INFO: partition_actor.cpp:1012: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 Start reading TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 clientCommitOffset (empty maybe) clientReadOffset 0 2025-06-30T09:26:06.065111Z node 17 :PQ_READ_PROXY DEBUG: partition_actor.cpp:958: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:1) ready for read with readOffset 0 endOffset 3 2025-06-30T09:26:06.065125Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 read done: guid# a997b66d-556c2d9e-2e179a30-9b7a52cd, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1(assignId:2), size# 276 2025-06-30T09:26:06.065134Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 response to read: guid# a997b66d-556c2d9e-2e179a30-9b7a52cd 2025-06-30T09:26:06.065215Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 Process answer. Aval parts: 0 2025-06-30T09:26:06.065246Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 read done: guid# f3e58aec-b10cbc86-6968a87a-ee8f6115, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:3), size# 276 2025-06-30T09:26:06.065250Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 response to read: guid# f3e58aec-b10cbc86-6968a87a-ee8f6115 2025-06-30T09:26:06.065254Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 Process answer. Aval parts: 0 2025-06-30T09:26:06.065265Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:1), readOffset# 0, endOffset# 3, WTime# 1751275565946, sizeLag# 409 2025-06-30T09:26:06.065267Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1TEvPartitionReady. Aval parts: 1 2025-06-30T09:26:06.065273Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 performing read request: guid# 7e14862d-68af3231-1fe20ec7-7634f37d, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:1), count# 3, size# 490, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-30T09:26:06.065290Z node 17 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:1)maxCount 3 maxSize 490 maxTimeLagMs 0 readTimestampMs 0 readOffset 0 EndOffset 3 ClientCommitOffset 0 committedOffset 0 Guid 7e14862d-68af3231-1fe20ec7-7634f37d 2025-06-30T09:26:06.066061Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-30T09:26:06.066075Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 2 2025-06-30T09:26:06.066125Z node 18 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 2, State: StateIdle] read cookie 4 Topic 'rt3.dc1--topic1' partition 2 user debug offset 0 count 3 size 490 endOffset 3 max time lag 0ms effective offset 0 2025-06-30T09:26:06.066147Z node 18 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 2, State: StateIdle] read cookie 4 added 1 blobs, size 409 count 3 last offset 0, current partition end offset: 3 2025-06-30T09:26:06.066150Z node 18 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 2, State: StateIdle] Reading cookie 4. Send blob request. 2025-06-30T09:26:06.066163Z node 18 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 2 offset 0 partno 0 count 3 parts_count 0 source 1 size 409 accessed 2 times before, last time 2025-06-30T09:26:05.000000Z 2025-06-30T09:26:06.066170Z node 18 :PERSQUEUE DEBUG: read.h:121: Reading cookie 4. All 1 blobs are from cache. 2025-06-30T09:26:06.066184Z node 18 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-30T09:26:06.066218Z node 18 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 3 count 3 size 389 from pos 0 cbcount 3 2025-06-30T09:26:06.066259Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--topic1' partition: 2 messageNo: 0 requestId: cookie: 0 2025-06-30T09:26:06.066415Z node 18 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 2 offset 0 partno 0 count 3 parts 0 suffix '63' 2025-06-30T09:26:06.066620Z node 17 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 3 Result { Offset: 0 Data: "... 99 bytes ..." SourceId: "" SeqNo: 1 WriteTimestampMS: 1751275565946 CreateTimestampMS: 1751275565939 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 99 bytes ..." SourceId: "" SeqNo: 2 WriteTimestampMS: 1751275565946 CreateTimestampMS: 1751275565939 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 99 bytes ..." SourceId: "" SeqNo: 3 WriteTimestampMS: 1751275565946 CreateTimestampMS: 1751275565939 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 1 SizeLag: 7 RealReadOffset: 2 WaitQuotaTimeMs: 0 EndOffset: 3 StartOffset: 0 } Cookie: 0 } 2025-06-30T09:26:06.066654Z node 17 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:1) wait data in partition inited, cookie 1 from offset 3 2025-06-30T09:26:06.066662Z node 17 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid 7e14862d-68af3231-1fe20ec7-7634f37d has messages 1 2025-06-30T09:26:06.066689Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 read done: guid# 7e14862d-68af3231-1fe20ec7-7634f37d, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:1), size# 276 2025-06-30T09:26:06.066694Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 response to read: guid# 7e14862d-68af3231-1fe20ec7-7634f37d 2025-06-30T09:26:06.066793Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 Process answer. Aval parts: 0 Got data event with total 3 messages, current total messages: 3 Got data event with total 3 messages, current total messages: 6 Got data event with total 3 messages, current total messages: 9 2025-06-30T09:26:06.075122Z node 17 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 grpc read done: success# 1, data# { read_request { bytes_size: 276 } } 2025-06-30T09:26:06.075148Z node 17 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 grpc closed 2025-06-30T09:26:06.075172Z node 17 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/debug session shared/debug_17_1_9615967040825421514_v1 is DEAD 2025-06-30T09:26:06.075842Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/debug_17_1_9615967040825421514_v1 2025-06-30T09:26:06.075862Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [17:7521671286062840382:2475] destroyed 2025-06-30T09:26:06.075867Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/debug_17_1_9615967040825421514_v1 2025-06-30T09:26:06.075870Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [17:7521671286062840381:2474] destroyed 2025-06-30T09:26:06.075873Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/debug_17_1_9615967040825421514_v1 2025-06-30T09:26:06.075876Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [17:7521671286062840380:2473] destroyed 2025-06-30T09:26:06.075892Z node 18 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/debug_17_1_9615967040825421514_v1 2025-06-30T09:26:06.075896Z node 18 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/debug_17_1_9615967040825421514_v1 2025-06-30T09:26:06.075899Z node 18 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/debug_17_1_9615967040825421514_v1 2025-06-30T09:26:06.076227Z node 17 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--topic1] pipe [17:7521671286062840375:2470] disconnected; active server actors: 1 2025-06-30T09:26:06.076238Z node 17 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--topic1] pipe [17:7521671286062840375:2470] client debug disconnected session shared/debug_17_1_9615967040825421514_v1 >> KqpOlapJson::RestoreFullJsonVariants[1,true,1024,0,100,0.5] [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[1,true,1,1000,1000000,0] >> KqpRanges::ScanKeyPrefix [GOOD] >> KqpOlapJson::OrFilterVariants[2,false,1024,1000,1000000,0] [GOOD] >> KqpOlapJson::OrFilterVariants[2,false,1024,1000,1000000,0.5] >> KqpOlapJson::EmptyVariants[1,true,1024,10,1000000,0] [GOOD] >> KqpOlapJson::EmptyVariants[1,true,1024,10,1000000,0.5] |84.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |84.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |84.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence >> KqpReturning::ReturningWorksIndexedInsert-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedOperationsWithDefault+QueryService ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpRanges::ScanKeyPrefix [GOOD] Test command err: Trying to start YDB, gRPC: 65423, MsgBus: 10501 2025-06-30T09:25:57.320253Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671245542701090:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:57.320299Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00179a/r3tmp/tmpyw3WaF/pdisk_1.dat 2025-06-30T09:25:57.413359Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65423, node 1 2025-06-30T09:25:57.446393Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:57.446404Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:57.446406Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:57.446449Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10501 2025-06-30T09:25:57.486550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:57.486578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:57.487461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10501 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:57.525092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:57.531932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:57.597241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:57.619012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:57.630127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:57.765195Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671245542702658:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:57.765236Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:57.822960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:57.831230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:57.845579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:57.859675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:57.872335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:57.886907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:57.901341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:57.919641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671245542703309:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:57.919661Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671245542703314:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:57.919669Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:57.920594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:57.927806Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671245542703316:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:58.015340Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671249837670663:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:58.324227Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 3415, MsgBus: 63595 2025-06-30T09:25:58.679779Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671248608142980:2090];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:58.681528Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00179a/r3tmp/tmpLJA89Z/pdisk_1.dat 2025-06-30T09:25:58.723787Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3415, node 2 2025-06-30T09:25:58.742967Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:58.742982Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09: ... alled at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Warning: Type annotation, code: 1030
:1:44: Warning: At lambda, At function: Coalesce
:1:58: Warning: At function: SqlIn
:1:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 Trying to start YDB, gRPC: 5209, MsgBus: 19123 2025-06-30T09:26:07.937345Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671290155453797:2145];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00179a/r3tmp/tmpsB0ocm/pdisk_1.dat 2025-06-30T09:26:07.947650Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:26:08.002830Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:08.022948Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7521671290155453677:2080] 1751275567928422 != 1751275567928425 TServer::EnableGrpc on GrpcPort 5209, node 7 2025-06-30T09:26:08.035603Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:08.035617Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:08.035620Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:08.035677Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:08.055165Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:08.055205Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:08.059392Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19123 TClient is connected to server localhost:19123 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:08.259368Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:08.276816Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:08.288848Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:08.356116Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:08.390374Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:08.409717Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:08.675575Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671294450422565:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.675610Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.687097Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:08.704832Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:08.727755Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:08.741196Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:08.753062Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:08.814189Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:08.875678Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:08.926391Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:08.940524Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671294450423227:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.940569Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.940704Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671294450423232:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.941755Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:08.946708Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:26:08.946797Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671294450423234:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:09.052401Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671298745390590:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> KqpOlapJson::FilterVariants[2,true,0,0,1000000,0.5] [GOOD] >> KqpOlapJson::FilterVariants[2,true,0,10,0,0] >> KqpOlapJson::SimpleVariants[1,true,1024,1000,1000000,0] [GOOD] >> KqpOlapJson::SimpleVariants[1,true,1024,1000,1000000,0.5] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,1000000,0] >> KqpSqlIn::TupleSelect [GOOD] >> KqpSqlIn::SimpleKey_In_And_In |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> test_create_tablets.py::TestHive::test_when_create_tablets_after_bs_groups_and_kill_hive_then_tablets_start [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_ya_count_queues[tables_format_v0] >> KqpOlapJson::RestoreFullJsonVariants[1,true,1,1000,1000000,0] [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[1,true,1,1000,1000000,0.5] |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageListBindings::ShouldCombineFilters [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 167284 7528 ? Ss 08:27 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 08:27 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 08:27 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 08:27 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 08:27 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/0:0H-events_highpri] root 9 1.5 0.0 0 0 ? I 08:27 0:54 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 08:27 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 08:27 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/0] root 15 0.3 0.0 0 0 ? I 08:27 0:13 [rcu_sched] root 16 0.0 0.0 0 0 ? S 08:27 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/0] root 18 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/0:1-rcu_par_gp] root 19 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/1] root 22 0.0 0.0 0 0 ? S 08:27 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/2] root 28 0.0 0.0 0 0 ? S 08:27 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/3] root 34 0.0 0.0 0 0 ? S 08:27 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/4] root 40 0.0 0.0 0 0 ? S 08:27 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/5] root 46 0.0 0.0 0 0 ? S 08:27 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/6] root 52 0.0 0.0 0 0 ? S 08:27 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/6] root 55 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/7] root 58 0.0 0.0 0 0 ? S 08:27 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/8] root 64 0.0 0.0 0 0 ? S 08:27 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/9] root 70 0.0 0.0 0 0 ? S 08:27 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/10] root 76 0.0 0.0 0 0 ? S 08:27 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/10] root 78 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/10:0-rcu_gp] root 79 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/11] root 82 0.0 0.0 0 0 ? S 08:27 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/12] root 88 0.0 0.0 0 0 ? S 08:27 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/12] root 91 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/13] root 94 0.0 0.0 0 0 ? S 08:27 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/14] root 100 0.0 0.0 0 0 ? S 08:27 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/14] root 102 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/14:0-rcu_gp] root 103 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/14:0H-kblockd] root 104 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/15] root 106 0.0 0.0 0 0 ? S 08:27 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/16] root 112 0.0 0.0 0 0 ? S 08:27 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/17] root 118 0.0 0.0 0 0 ? S 08:27 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/18] root 124 0.0 0.0 0 0 ? S 08:27 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/19] root 130 0.0 0.0 0 0 ? S 08:27 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/20] root 136 0.0 0.0 0 0 ? S 08:27 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/20] root 138 0.0 0.0 0 0 ? I 08:27 0:00 [kworker/20:0-rcu_gp] root 139 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/21] root 142 0.0 0.0 0 0 ? S 08:27 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/22] root 148 0.0 0.0 0 0 ? S 08:27 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/22] root 151 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/23] root 154 0.0 0.0 0 0 ? S 08:27 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 08:27 0:00 [idle_inject/24] root 160 0.0 0.0 0 0 ? S 08:27 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 08:27 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 08:27 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 08:27 0:00 [cpuhp/25] ... chema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:26:03.620060Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:26:03.620170Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections". Create session OK 2025-06-30T09:26:03.620172Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:26:03.620174Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:26:03.620250Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes". Create session OK 2025-06-30T09:26:03.620252Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:26:03.620253Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:26:03.620339Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas". Create session OK 2025-06-30T09:26:03.620341Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:26:03.620343Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:26:03.620772Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks". Create session OK 2025-06-30T09:26:03.620783Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:26:03.620785Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:26:03.620994Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings". Create session OK 2025-06-30T09:26:03.620996Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:26:03.620998Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:26:03.659004Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)" 2025-06-30T09:26:03.659020Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)": 2025-06-30T09:26:03.746964Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-30T09:26:03.746982Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-30T09:26:03.767073Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-30T09:26:03.767091Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-30T09:26:03.767748Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-30T09:26:03.767756Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-30T09:26:03.767926Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-30T09:26:03.767928Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-30T09:26:03.768559Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-30T09:26:03.768565Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-30T09:26:03.768724Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-30T09:26:03.768727Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-30T09:26:03.768820Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-30T09:26:03.768822Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-30T09:26:03.768901Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-30T09:26:03.768903Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-30T09:26:03.768985Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-30T09:26:03.768988Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-30T09:26:03.769063Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-30T09:26:03.769066Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-30T09:26:03.769837Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-30T09:26:03.769846Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-30T09:26:03.770019Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-30T09:26:03.770022Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-30T09:26:03.770487Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-30T09:26:03.770496Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-30T09:26:03.770673Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-30T09:26:03.770676Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases": >> KqpOlapJson::OrFilterVariants[2,false,1024,1000,1000000,0.5] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v0-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v0-std] >> KqpSqlIn::SecondaryIndex_TupleLiteral [GOOD] >> KqpSqlIn::SecondaryIndex_TupleSelect >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex+UseSink+UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink-UseDataQuery |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v0-std] >> KqpOlapJson::FilterVariants[2,true,0,10,0,0] [GOOD] >> KqpOlapJson::FilterVariants[2,true,0,10,0,0.5] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::OrFilterVariants[2,false,1024,1000,1000000,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 28009, MsgBus: 61597 2025-06-30T09:26:02.749797Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671268522357215:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:02.749839Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003ba7/r3tmp/tmpui2ZD8/pdisk_1.dat 2025-06-30T09:26:02.817561Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:02.822163Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671268522357096:2080] 1751275562746402 != 1751275562746405 TServer::EnableGrpc on GrpcPort 28009, node 1 2025-06-30T09:26:02.847059Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:02.847084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:02.847087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:02.847156Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:02.847587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:02.847617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:02.851395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61597 TClient is connected to server localhost:61597 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:03.059164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:03.062852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2); 2025-06-30T09:26:03.325110Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671272817325016:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:03.325166Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:03.421915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:26:03.445856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671272817325098:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:03.445941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671272817325098:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:26:03.446030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671272817325098:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:26:03.446054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671272817325098:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:26:03.446081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671272817325098:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:26:03.446106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671272817325098:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:26:03.446129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671272817325098:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:26:03.446154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671272817325098:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:26:03.446175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671272817325098:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:26:03.446195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671272817325098:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:26:03.446214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671272817325098:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:26:03.446233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671272817325098:2295];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:26:03.447222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:26:03.447239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:26:03.447254Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:26:03.447260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:26:03.447282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:26:03.447287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:26:03.447299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:26:03.447305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:26:03.447315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:26:03.447320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:26:03.447347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:26:03.447353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:26:03.447375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:26:03.447384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:26:03.447398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:26:03.447404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_swi ... ion=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:26:10.215393Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:26:10.215397Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:26:10.215416Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:26:10.215421Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:26:10.215435Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:26:10.215443Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:26:10.215451Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:26:10.215458Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:26:10.215464Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:26:10.215505Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:26:10.215510Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:26:10.215525Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:26:10.215529Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:26:10.221356Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[6:7521671301367972720:2295];ev=NActors::IEventHandle;tablet_id=72075186224037889;tx_id=281474976715658;this=123975235966592;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275570221;max=18446744073709551615;plan=0;src=[6:7521671297073005082:2148];cookie=22:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:10.221501Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521671301367972717:2294];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=123975235968192;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275570221;max=18446744073709551615;plan=0;src=[6:7521671297073005082:2148];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:10.224577Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:10.226101Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:26:10.226209Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:10.227311Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:26:10.254952Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:10.255531Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671301367972823:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.255792Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.261465Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:10.261691Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`false`, `COLUMNS_LIMIT`=`1024`, `SPARSED_DETECTOR_KFF`=`1000`, `MEM_LIMIT_CHUNK`=`1000000`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:26:10.280514Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:10.281216Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671301367972857:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.281413Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.288366Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:10.288541Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1"}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3", "d" : "d3"}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}')) 2025-06-30T09:26:10.293988Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671301367972892:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.294014Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.294165Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671301367972897:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.295112Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:10.298466Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:26:10.298550Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671301367972899:2324], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:26:10.351782Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671301367972950:2454] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:10.388841Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:26:10.389147Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE (JSON_VALUE(Col2, "$.b") = "b3" AND JSON_VALUE(Col2, "$.d") = "d3") OR (JSON_VALUE(Col2, "$.b") = "b1" AND JSON_VALUE(Col2, "$.c") = "c1") ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\"}"]]] OUTPUT: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\"}"]]] INDEX:8/0/0 HEADER:0/0/0 >> TTopicApiDescribes::DescribeTopic >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v0-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v1-fifo] >> KqpOlapJson::SimpleVariants[1,true,1024,1000,1000000,0.5] [GOOD] >> KqpOlapJson::SimpleVariants[10,false,0,0,0,0] >> KqpOlapJson::EmptyVariants[1,true,1024,10,1000000,0.5] [GOOD] >> KqpOlapJson::EmptyVariants[1,true,1024,1000,0,0] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v1-fifo] [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[1,true,1,1000,1000000,0.5] [GOOD] >> KqpReturning::ReturningWorksIndexedOperationsWithDefault+QueryService [GOOD] >> KqpSqlIn::SimpleKey_In_And_In [GOOD] >> KqpSqlIn::TupleNotOnlyOfKeys >> test_queue_counters.py::TestSqsGettingCounters::test_counters_when_sending_duplicates [GOOD] >> test_queue_counters.py::TestSqsGettingCounters::test_counters_when_sending_reading_deleting >> test_queues_managing.py::TestQueuesManagingWithTenant::test_request_to_deleted_queue[tables_format_v1-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_ya_count_queues[tables_format_v0] >> KqpOlapJson::FilterVariants[2,true,0,10,0,0.5] [GOOD] >> KqpOlapJson::FilterVariants[2,true,0,10,100,0] >> KqpOlapJson::SimpleExistsVariants[1,true,1,10,1000000,0] >> KqpSqlIn::SecondaryIndex_TupleSelect [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::RestoreFullJsonVariants[1,true,1,1000,1000000,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 21604, MsgBus: 27919 2025-06-30T09:26:04.710872Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671277466745381:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:04.711033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003da5/r3tmp/tmpHhCUJm/pdisk_1.dat 2025-06-30T09:26:04.832568Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671277466745188:2080] 1751275564707055 != 1751275564707058 2025-06-30T09:26:04.834075Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21604, node 1 2025-06-30T09:26:04.846818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:04.846836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:04.846839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:04.846911Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27919 2025-06-30T09:26:04.899498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:04.899535Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:04.901113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27919 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:04.929669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:04.934273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:26:05.183098Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671281761713105:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:05.183145Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:05.231448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:26:05.242547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671281761713167:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:05.242605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671281761713167:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:26:05.242654Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671281761713167:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:26:05.242670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671281761713167:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:26:05.242687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671281761713167:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:26:05.242705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671281761713167:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:26:05.242719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671281761713167:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:26:05.242960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671281761713167:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:26:05.242987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671281761713167:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:26:05.243007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671281761713167:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:26:05.243042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671281761713167:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:26:05.243056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671281761713167:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:26:05.243937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:26:05.243948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:26:05.243958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:26:05.243961Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:26:05.243975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:26:05.243979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:26:05.243986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:26:05.243989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:26:05.243995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:26:05.243998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:26:05.244013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:26:05.244016Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:26:05.244030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:26:05.244035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:26:05.244043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:26:05.244046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_swi ... :26:11.344782Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:26:11.344802Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:26:11.344807Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:26:11.344818Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:26:11.344823Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:26:11.344830Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:26:11.344835Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:26:11.344881Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:26:11.344895Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:26:11.344917Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:26:11.344925Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:26:11.344940Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:26:11.344945Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:26:11.344954Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:26:11.344960Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:26:11.344966Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:26:11.345040Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:26:11.345046Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:26:11.345060Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:26:11.345065Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:26:11.385576Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521671305785071476:2294];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=90542166215648;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275571385;max=18446744073709551615;plan=0;src=[6:7521671301490103836:2147];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:11.388510Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:11.389875Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:26:11.399105Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:11.399952Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671305785071548:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.400144Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.403945Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `SCAN_FIRST_LEVEL_ONLY`=`false`, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`true`, `COLUMNS_LIMIT`=`1`, `SPARSED_DETECTOR_KFF`=`1000`, `MEM_LIMIT_CHUNK`=`1000000`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:26:11.409530Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671305785071578:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.409554Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.413419Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:11.416044Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1", "d" : null, "e.v" : {"c" : 1, "e" : {"c.a" : 2}}}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3", "d" : "d3", "e" : ["a", {"v" : ["c", 5]}]}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}')) 2025-06-30T09:26:11.421208Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671305785071609:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.421240Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.421256Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671305785071614:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.422161Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:11.430986Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671305785071616:2320], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:26:11.511603Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671305785071667:2425] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:11.540611Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\",\"e\":[\"a\",{\"v\":[\"c\",\"5\"]}]}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] OUTPUT: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\",\"e\":[\"a\",{\"v\":[\"c\",\"5\"]}]}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] INDEX:0/0/0 HEADER:0/0/0 |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpReturning::ReturningWorksIndexedOperationsWithDefault+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 2257, MsgBus: 18949 2025-06-30T09:25:58.159199Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671250740550394:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:58.159217Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00178d/r3tmp/tmpH5jn5I/pdisk_1.dat TServer::EnableGrpc on GrpcPort 2257, node 1 2025-06-30T09:25:58.234935Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:58.257534Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:58.257556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:58.257558Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:58.257601Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:58.262979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:58.263012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:58.265274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18949 TClient is connected to server localhost:18949 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:58.335997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:58.341411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:58.350627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:58.387518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:58.453689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:58.478707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:58.615328Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671250740551957:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.615360Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.677781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.693032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.707577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.724422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.734073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.794295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.813273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:58.834445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671250740552613:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.834464Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.834617Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671250740552618:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:58.835304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:58.837497Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671250740552620:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:58.895669Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671250740552671:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:59.061652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.079366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.093281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: ... e, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 31755, MsgBus: 21137 2025-06-30T09:26:10.018434Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671300302841454:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:10.018471Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00178d/r3tmp/tmpE8H1aH/pdisk_1.dat 2025-06-30T09:26:10.050646Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31755, node 7 2025-06-30T09:26:10.071797Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:10.071812Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:10.071816Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:10.071880Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:10.127369Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:10.127403Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:10.128297Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21137 TClient is connected to server localhost:21137 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:10.236473Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:10.240124Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:10.256052Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:10.284489Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:10.327946Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:10.352274Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:10.697885Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671300302843016:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.697948Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.754697Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:10.820435Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:10.847910Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:10.866020Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:10.878440Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:10.898647Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:10.913376Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:10.995708Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671300302843678:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.995745Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.995833Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671300302843683:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.996930Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:11.003448Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:26:11.003552Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671300302843685:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:11.020547Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:11.074107Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671304597811041:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:11.421563Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,1000000,0] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,1000000,0.5] >> KqpOlapJson::SimpleVariants[10,false,0,0,0,0] [GOOD] >> KqpOlapJson::SimpleVariants[10,false,0,0,0,0.5] >> test_queue_counters.py::TestSqsGettingCounters::test_counters_when_sending_reading_deleting [GOOD] >> test_queue_counters.py::TestSqsGettingCounters::test_purge_queue_counters >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,10,1000000,0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::SecondaryIndex_TupleSelect [GOOD] Test command err: Trying to start YDB, gRPC: 17669, MsgBus: 14629 2025-06-30T09:25:59.395407Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671256446678385:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:59.395437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001786/r3tmp/tmpGtwPxd/pdisk_1.dat 2025-06-30T09:25:59.461813Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17669, node 1 2025-06-30T09:25:59.487005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:59.487019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:59.487022Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:59.487078Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14629 TClient is connected to server localhost:14629 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:25:59.536340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:59.536370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-30T09:25:59.538131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:59.547170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:25:59.555887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.623029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:59.661575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:59.680552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:59.856293Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671256446679950:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.856324Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:59.916218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.930337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.942672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:59.954356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.017321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.046185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.059834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.087900Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671260741647903:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.087947Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.088051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671260741647908:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:00.089338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:00.094141Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671260741647910:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:00.189907Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671260741647961:3404] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:00.396980Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:00.411216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.478690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:00.495286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, a ... ated, will use file: (empty maybe) 2025-06-30T09:26:11.150285Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:11.150287Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:11.150325Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13389 TClient is connected to server localhost:13389 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:11.220275Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:11.220306Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:11.220716Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:11.221293Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:26:11.222661Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:11.228013Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:11.250435Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:11.274892Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:11.291437Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:11.601423Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671305616124203:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.601447Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.611875Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:11.619876Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:11.627943Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:11.641785Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:11.657337Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:11.671302Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:11.684854Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:11.704448Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671305616124856:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.704488Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.704583Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671305616124861:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.705454Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:11.711034Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671305616124863:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:11.792434Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671305616124914:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:11.991814Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:12.006212Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:12.017332Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:12.115152Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup;
: Warning: Type annotation, code: 1030
:5:17: Warning: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:6:56: Warning: At function: Filter, At lambda, At function: Coalesce
:7:29: Warning: At function: SqlIn
:7:29: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue_batch[tables_format_v0] |84.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/apps/etcd_proxy/etcd_proxy |84.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/etcd_proxy/etcd_proxy |84.5%| [LD] {RESULT} $(B)/ydb/apps/etcd_proxy/etcd_proxy |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::BaseCaseWithDataColumns [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:22:34.073221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:22:34.073247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:34.073254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:22:34.073260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:22:34.073279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:22:34.073284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:22:34.073294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:22:34.073311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:22:34.073440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:22:34.073531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:22:34.089392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:22:34.089413Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:34.089531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:22:34.093854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:22:34.094998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:22:34.095042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:22:34.096890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:22:34.096952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:22:34.097103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:34.097144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:22:34.098122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:34.098171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:22:34.098464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:22:34.098477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:22:34.098487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:22:34.098496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:22:34.098503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:22:34.098529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:22:34.100311Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:22:34.127335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:22:34.127439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:34.127521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:22:34.127533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:22:34.127591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:22:34.127609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:22:34.128549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:34.128610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:22:34.128709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:34.128723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:22:34.128730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:22:34.128736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:22:34.129484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:34.129506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:22:34.129514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:22:34.130120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:34.130135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:22:34.130142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:22:34.130151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:22:34.131098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:22:34.131686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:22:34.131742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:22:34.132001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:22:34.132042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... rtitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:00.299365Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:26:00.299414Z node 278 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1" took 53us result status StatusSuccess 2025-06-30T09:26:00.299640Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1" PathDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "index1" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataColumnNames: "value" DataSize: 0 IndexImplTableDescriptions { Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:00.299733Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-30T09:26:00.299772Z node 278 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplTable" took 42us result status StatusSuccess 2025-06-30T09:26:00.299939Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpOlapJson::EmptyVariants[1,true,1024,1000,0,0] [GOOD] >> KqpOlapJson::EmptyVariants[1,true,1024,1000,0,0.5] >> KqpOlapJson::SimpleExistsVariants[1,true,1,10,1000000,0] [GOOD] >> KqpOlapJson::SimpleExistsVariants[1,true,1,10,1000000,0.5] >> KqpOlapJson::FilterVariants[2,true,0,10,100,0] [GOOD] >> KqpOlapJson::FilterVariants[2,true,0,10,100,0.5] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v1-std] |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> KqpSqlIn::TupleNotOnlyOfKeys [GOOD] >> KqpOlapJson::SimpleVariants[10,false,0,0,0,0.5] [GOOD] >> KqpOlapJson::SimpleVariants[10,false,0,0,100,0] >> ListObjectsInS3Export::ExportWithSchemaMapping [GOOD] |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> KqpNewEngine::BlindWrite >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,10,1000000,0] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,10,1000000,0.5] >> KqpExtractPredicateLookup::PointJoin [GOOD] >> KqpExtractPredicateLookup::SqlInJoin ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::TupleNotOnlyOfKeys [GOOD] Test command err: Trying to start YDB, gRPC: 9307, MsgBus: 28378 2025-06-30T09:26:01.392101Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671263245645520:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:01.392452Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001781/r3tmp/tmpPKWRp6/pdisk_1.dat 2025-06-30T09:26:01.459485Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:01.467519Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 9307, node 1 2025-06-30T09:26:01.478894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:01.478905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:01.478906Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:01.478945Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:01.492757Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:01.492785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:01.494839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28378 TClient is connected to server localhost:28378 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:01.561051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:01.563900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:26:01.567036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:01.631838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:01.695449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:01.717320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:01.870941Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671263245647085:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:01.870980Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:01.923721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.932656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.948557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.961413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.976055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:01.989542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.004487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.030137Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671267540615034:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:02.030168Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:02.030274Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671267540615039:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:02.035946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:02.040095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-30T09:26:02.040339Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671267540615041:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:26:02.088038Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671267540615092:3398] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:02.271063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:02.285190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB fir ... Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:12.391373Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62718 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:26:12.431523Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:12.434549Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:12.447852Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:12.522890Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:12.576389Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:12.600761Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:12.862620Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671309397919050:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:12.862653Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:12.875053Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:12.900682Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:12.924415Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:12.948977Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:12.967304Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:12.984883Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:13.011251Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:13.032198Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671313692886999:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:13.032227Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:13.032314Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671313692887004:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:13.033411Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:13.036445Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:26:13.036607Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671313692887006:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:13.098575Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671313692887057:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:13.259636Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:13.268022Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:13.286813Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:13.302030Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Warning: Type annotation, code: 1030
:5:21: Warning: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:6:26: Warning: At function: Filter, At lambda, At function: Coalesce
:7:37: Warning: At function: SqlIn
:7:37: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108
: Warning: Type annotation, code: 1030
:5:21: Warning: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:6:26: Warning: At function: Filter, At lambda, At function: Coalesce
:7:37: Warning: At function: SqlIn
:7:37: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 |84.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tools/stress_tool/ydb_stress_tool >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_read_message[std] [GOOD] >> test_queue_counters.py::TestSqsGettingCounters::test_purge_queue_counters [GOOD] >> ListObjectsInS3Export::ExportWithoutSchemaMapping >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_send_message[fifo] |84.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/stress_tool/ydb_stress_tool |84.5%| [LD] {RESULT} $(B)/ydb/tools/stress_tool/ydb_stress_tool >> KqpNewEngine::Update-UseSink |84.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |84.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |84.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup >> KqpQuery::ReadOverloaded+StreamLookup [GOOD] >> KqpQuery::ReadOverloaded-StreamLookup >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,0,1000000,0.5] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,10,0,0] >> KqpOlapJson::SimpleExistsVariants[1,true,1,10,1000000,0.5] [GOOD] >> KqpOlapJson::SimpleExistsVariants[1,true,1,1000,0,0] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_send_message[fifo] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_send_message[std] >> KqpOlapJson::SimpleVariants[10,false,0,0,100,0] [GOOD] >> KqpOlapJson::SimpleVariants[10,false,0,0,100,0.5] >> KqpNewEngine::JoinWithParams >> KqpNotNullColumns::InsertNotNullPk >> test_acl.py::TestSqsACLWithPath::test_apply_permissions[tables_format_v0] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_send_message[std] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,10,1000000,0.5] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,1000,0,0] >> KqpOlapJson::FilterVariants[2,true,0,10,100,0.5] [GOOD] >> KqpNewEngine::BlindWrite [GOOD] >> KqpNewEngine::BlindWriteParameters >> ListObjectsInS3Export::ExportWithoutSchemaMapping [GOOD] >> KqpOlapJson::EmptyVariants[1,true,1024,1000,0,0.5] [GOOD] >> KqpOlapJson::EmptyVariants[1,true,1024,1000,100,0.5] >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v0-std] [GOOD] >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v1-fifo] >> KqpOlapJson::SimpleExistsVariants[1,true,1,1000,0,0] [GOOD] >> KqpOlapJson::SimpleExistsVariants[1,true,1,1000,0,0.5] >> KqpNewEngine::Update-UseSink [GOOD] >> KqpNewEngine::UpdateFromParams ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::FilterVariants[2,true,0,10,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 5872, MsgBus: 29688 2025-06-30T09:26:07.429151Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671288457212750:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:07.429312Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003ba6/r3tmp/tmpXgA1GA/pdisk_1.dat 2025-06-30T09:26:07.526272Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:07.526604Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671288457212543:2080] 1751275567425668 != 1751275567425671 TServer::EnableGrpc on GrpcPort 5872, node 1 2025-06-30T09:26:07.566971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:07.566986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:07.566989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:07.567038Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:07.579591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:07.579626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:07.583194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29688 TClient is connected to server localhost:29688 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:07.658498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:07.666509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2); 2025-06-30T09:26:08.056241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671292752180462:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.056296Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.155817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:26:08.170398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671292752180528:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:08.174950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671292752180528:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:26:08.175056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671292752180528:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:26:08.175081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671292752180528:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:26:08.175109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671292752180528:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:26:08.175135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671292752180528:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:26:08.175166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671292752180528:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:26:08.175189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671292752180528:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:26:08.175212Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671292752180528:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:26:08.175239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671292752180528:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:26:08.175262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671292752180528:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:26:08.175285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671292752180528:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:26:08.175723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671292752180532:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:08.175762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671292752180532:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:26:08.175820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671292752180532:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:26:08.175840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671292752180532:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:26:08.175858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671292752180532:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:26:08.175877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671292752180532:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:26:08.175896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671292752180532:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:26:08.175916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671292752180532:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:26:08.175934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671292752180532:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:26:08.175953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671292752180532:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:26:08.175970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671292752180532:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:26:08.175987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671292752180532:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:26:08.179429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:26:08.179453Z node 1 :TX_COLUMNSHARD ... OLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:26:14.891631Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:26:14.891640Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:26:14.891645Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:26:14.891724Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:26:14.891730Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:26:14.891751Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:26:14.891757Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:26:14.892842Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521671319724831173:2294];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=127477720809120;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275574892;max=18446744073709551615;plan=0;src=[6:7521671319724830861:2166];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:14.927126Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[6:7521671319724831196:2297];ev=NActors::IEventHandle;tablet_id=72075186224037889;tx_id=281474976715658;this=127477720809120;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275574926;max=18446744073709551615;plan=0;src=[6:7521671319724830861:2166];cookie=22:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:14.930221Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:14.931456Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:26:14.931628Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:14.932570Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:26:14.947338Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671319724831278:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:14.947377Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:14.953877Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:14.962482Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:14.962587Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `SCAN_FIRST_LEVEL_ONLY`=`false`, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`true`, `COLUMNS_LIMIT`=`0`, `SPARSED_DETECTOR_KFF`=`10`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:26:14.979346Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671319724831312:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:14.979370Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:14.980493Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:14.982621Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:14.982800Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1", "d" : null, "e.v" : {"c" : 1, "e" : {"c.a" : 2}}}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3", "d" : "d3"}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}')) 2025-06-30T09:26:14.996114Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671319724831347:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:14.996151Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:14.996209Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671319724831352:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:14.997312Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:15.000159Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671319724831354:2324], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:26:15.067930Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671324019798701:2453] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:15.105755Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:26:15.105838Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "a2" ORDER BY Col1; 2025-06-30T09:26:15.201744Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; COMPARE: [[2u;["{\"a\":\"a2\"}"]]] OUTPUT: [[2u;["{\"a\":\"a2\"}"]]] INDEX:2/0/0 HEADER:0/0/0 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] OUTPUT: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] INDEX:0/0/0 HEADER:0/0/0 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"e.v\".c") = "1" ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]]] OUTPUT: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]]] INDEX:2/0/0 HEADER:0/0/0 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"e.v\".e.\"c.a\"") = "2" ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]]] OUTPUT: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]]] INDEX:2/0/0 HEADER:0/0/0 >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v1-fifo] [GOOD] >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v1-std] >> KqpNotNullColumns::InsertNotNullPk [GOOD] >> KqpNotNullColumns::InsertNotNullPkPg+useSink >> ListObjectsInS3Export::ExportWithEncryption >> KqpReturning::ReturningTwice >> KqpOlapJson::SimpleVariants[10,false,0,0,100,0.5] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,1000,0,0] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,1000,0,0.5] >> KqpNewEngine::JoinWithParams [GOOD] >> KqpNewEngine::JoinPure >> KqpNewEngine::BlindWriteParameters [GOOD] >> KqpLimits::OutOfSpaceYQLUpsertFail+useSink [GOOD] >> KqpNewEngine::BlindWriteListParameter >> KqpLimits::OutOfSpaceYQLUpsertFail-useSink >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v1-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::SimpleVariants[10,false,0,0,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 22021, MsgBus: 4150 2025-06-30T09:26:08.959719Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671291034125109:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:08.974958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003da4/r3tmp/tmpaldppS/pdisk_1.dat 2025-06-30T09:26:09.094834Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671291034124932:2080] 1751275568904703 != 1751275568904706 2025-06-30T09:26:09.096623Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:09.120025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:09.120071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 22021, node 1 2025-06-30T09:26:09.124472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:09.138981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:09.138993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:09.138995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:09.139044Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4150 TClient is connected to server localhost:4150 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:09.237694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:09.245376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:26:09.573448Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671295329092853:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:09.573483Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:09.624605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:26:09.637697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671295329092915:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:09.637794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671295329092915:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:26:09.637868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671295329092915:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:26:09.637898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671295329092915:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:26:09.637932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671295329092915:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:26:09.637954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671295329092915:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:26:09.637982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671295329092915:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:26:09.638011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671295329092915:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:26:09.638038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671295329092915:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:26:09.638071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671295329092915:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:26:09.638098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671295329092915:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:26:09.638130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671295329092915:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:26:09.640964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:26:09.640993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:26:09.641012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:26:09.641018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:26:09.641043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:26:09.641054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:26:09.641070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:26:09.641077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:26:09.641086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:26:09.641093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:26:09.641124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:26:09.641137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:26:09.641162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:26:09.641171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:26:09.641186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:26:09.641194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switch ... ol default not found or you don't have access permissions } 2025-06-30T09:26:15.997963Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.000995Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:16.007323Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:16.007491Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:16.007632Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:16.007751Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:16.007889Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:16.008034Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:16.008161Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:16.008304Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:16.008620Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:16.008657Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`false`, `COLUMNS_LIMIT`=`0`, `SPARSED_DETECTOR_KFF`=`0`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:26:16.014213Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671326325633693:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.014243Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.018090Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:16.021343Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:16.021363Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:16.021523Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:16.021528Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:16.021704Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:16.021721Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:16.021845Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:16.021982Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:16.022050Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:16.022224Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1"}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3"}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}')) 2025-06-30T09:26:16.027960Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671326325633760:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.027982Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.028001Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671326325633765:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.028988Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:16.031671Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671326325633767:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:26:16.116742Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671326325633818:2675] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:16.147497Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:26:16.147624Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:26:16.147715Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:26:16.147922Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521671322030666106:2303];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=17;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037894,72075186224037897;receive=72075186224037889; 2025-06-30T09:26:16.147943Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521671322030666106:2303];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=18;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037894,72075186224037897;receive=72075186224037889; 2025-06-30T09:26:16.147982Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521671322030666106:2303];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=20;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037894; 2025-06-30T09:26:16.147992Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521671322030666106:2303];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=21;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037894; 2025-06-30T09:26:16.148057Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] OUTPUT: [[1u;["{\"a\":\"a1\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] INDEX:0/0/0 HEADER:0/0/0 >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Table [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartNo_Table [GOOD] >> TxUsage::WriteToTopic_Demo_24_Table [GOOD] >> KqpOlapJson::SimpleExistsVariants[1,true,1,1000,0,0.5] [GOOD] >> KqpOlapJson::SimpleExistsVariants[1,true,1,1000,100,0] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,10,0,0] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,10,0,0.5] >> KqpNotNullColumns::InsertNotNullPkPg+useSink [GOOD] >> KqpNotNullColumns::InsertNotNullPkPg-useSink >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Table >> TxUsage::WriteToTopic_Demo_18_RestartNo_Query >> TxUsage::WriteToTopic_Demo_27_Table >> ListObjectsInS3Export::ExportWithEncryption [GOOD] >> KqpNewEngine::UpdateFromParams [GOOD] >> KqpNewEngine::UpsertEmptyInput >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning-UseSink+UseDataQuery >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,1000,0,0.5] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,1000,100,0] >> KqpOlapJson::EmptyVariants[1,true,1024,1000,100,0.5] [GOOD] >> KqpReturning::ReturningTwice [GOOD] >> KqpReturning::ReplaceSerial >> ListObjectsInS3Export::ExportWithWrongEncryptionKey >> KqpNewEngine::JoinPure [GOOD] >> KqpNewEngine::JoinPureUncomparableKeys >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v1-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_generates_event[tables_format_v0] >> KqpNewEngine::BlindWriteListParameter [GOOD] >> KqpNewEngine::BrokenLocksAtROTx >> KqpOlapJson::SimpleExistsVariants[1,true,1,1000,100,0] [GOOD] >> KqpOlapJson::SimpleExistsVariants[1,true,1,1000,100,0.5] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_generates_event[tables_format_v0] [SKIPPED] >> KqpNotNullColumns::InsertNotNullPkPg-useSink [GOOD] >> KqpNotNullColumns::InsertNotNullPg+useSink |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_create_tablets.py::TestHive::test_when_create_tablets_after_bs_groups_and_kill_hive_then_tablets_start [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::EmptyVariants[1,true,1024,1000,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 31432, MsgBus: 3690 2025-06-30T09:26:02.933872Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671265397659384:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:02.933908Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003da6/r3tmp/tmpjXYwJH/pdisk_1.dat 2025-06-30T09:26:02.995645Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:02.995811Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671265397659264:2080] 1751275562930872 != 1751275562930875 TServer::EnableGrpc on GrpcPort 31432, node 1 2025-06-30T09:26:03.024244Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:03.024271Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:03.024273Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:03.024319Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:03.034581Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:03.034620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:03.035516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3690 TClient is connected to server localhost:3690 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:03.123575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:03.126004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:26:03.391880Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671269692627183:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:03.391915Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:03.442220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:26:03.466021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671269692627242:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:03.466110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671269692627242:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:26:03.466172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671269692627242:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:26:03.466194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671269692627242:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:26:03.466226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671269692627242:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:26:03.466257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671269692627242:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:26:03.466284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671269692627242:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:26:03.466312Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671269692627242:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:26:03.466340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671269692627242:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:26:03.466372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671269692627242:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:26:03.466402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671269692627242:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:26:03.466430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671269692627242:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:26:03.471314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:26:03.471338Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:26:03.471363Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:26:03.471369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:26:03.471395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:26:03.471401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:26:03.471415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:26:03.471421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:26:03.471429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:26:03.471436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:26:03.471463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:26:03.471469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:26:03.471492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:26:03.471501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:26:03.471516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:26:03.471523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switch ... ription=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:26:16.331014Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:26:16.331036Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:26:16.331044Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:26:16.331053Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:26:16.331060Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:26:16.331065Z node 7 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:26:16.331069Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:26:16.331072Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:26:16.331120Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:26:16.331129Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:26:16.331140Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:26:16.331144Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:26:16.374339Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[7:7521671327306563479:2294];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=89855994959488;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275576374;max=18446744073709551615;plan=0;src=[7:7521671323011595835:2144];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:16.375799Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:16.376949Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `COMPACTION_PLANNER.CLASS_NAME`=`l-buckets`) 2025-06-30T09:26:16.380808Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671327306563551:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.380830Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.382855Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:16.385703Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:26:16.389671Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671327306563581:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.389695Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.392258Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:16.400200Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`true`, `COLUMNS_LIMIT`=`1024`, `SPARSED_DETECTOR_KFF`=`1000`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:26:16.404519Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671327306563611:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.404544Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.407444Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:16.415487Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1) VALUES (1u), (2u), (3u), (4u) 2025-06-30T09:26:16.420145Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671327306563647:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.420145Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671327306563642:2321], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.420165Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.421022Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:16.423552Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671327306563649:2325], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:26:16.483785Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671327306563700:2447] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:16.500632Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=10;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:16.503653Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715665;tx_id=281474976715665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715665; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1) VALUES (11u), (12u), (13u), (14u) 2025-06-30T09:26:16.520587Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=16;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:16.523036Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; WAIT_COMPACTION: 0 WAIT_COMPACTION: 0 2025-06-30T09:26:16.868043Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WAIT_COMPACTION: 0 COMPACTION_HAPPENED: 0 -> 1 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;#];[2u;#];[3u;#];[4u;#];[11u;#];[12u;#];[13u;#];[14u;#]] OUTPUT: [[1u;#];[2u;#];[3u;#];[4u;#];[11u;#];[12u;#];[13u;#];[14u;#]] INDEX:0/0/0 HEADER:0/0/0 >> test_queues_managing.py::TestQueuesManagingWithTenant::test_ya_count_queues[tables_format_v0] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_generates_event[tables_format_v1] |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_counters.py::TestSqsGettingCounters::test_purge_queue_counters [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_ya_count_queues[tables_format_v1] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_generates_event[tables_format_v1] [SKIPPED] >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,1000,100,0] [GOOD] >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,1000,100,0.5] >> KqpNewEngine::UpsertEmptyInput [GOOD] >> KqpNotNullColumns::AlterAddNotNullColumn >> SnapshotTesting::Compaction [GOOD] >> SpaceCheckForDiskReassign::Basic >> ListObjectsInS3Export::ExportWithWrongEncryptionKey [GOOD] >> KqpOlapJson::SimpleExistsVariants[1,true,1,1000,100,0.5] [GOOD] >> KqpReturning::ReplaceSerial [GOOD] >> KqpReturning::ReturningSerial |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v0-std] [GOOD] >> KqpNewEngine::JoinPureUncomparableKeys [GOOD] >> KqpNewEngine::JoinWithPrecompute >> KqpNotNullColumns::InsertNotNullPg+useSink [GOOD] >> KqpNotNullColumns::InsertNotNullPg-useSink >> KqpNewEngine::BrokenLocksAtROTx [GOOD] >> KqpNewEngine::BrokenLocksAtROTxSharded >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,10,0,0.5] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,10,100,0] >> test_acl.py::TestSqsACLWithPath::test_apply_permissions[tables_format_v0] [GOOD] >> test_acl.py::TestSqsACLWithPath::test_apply_permissions[tables_format_v1] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::SimpleExistsVariants[1,true,1,1000,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 28305, MsgBus: 25158 2025-06-30T09:26:12.708222Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671310999223138:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:12.708241Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003ba5/r3tmp/tmpsfgaua/pdisk_1.dat 2025-06-30T09:26:12.794198Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28305, node 1 2025-06-30T09:26:12.810674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:12.810703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:12.811128Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:12.811132Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:12.811135Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:12.811185Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:12.812102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25158 TClient is connected to server localhost:25158 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:12.912320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:12.916415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-30T09:26:13.203757Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671315294191015:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:13.203785Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:13.253730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:26:13.264317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671315294191075:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:13.264398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671315294191075:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:26:13.264450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671315294191075:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:26:13.264482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671315294191075:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:26:13.264515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671315294191075:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:26:13.264551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671315294191075:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:26:13.264578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671315294191075:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:26:13.264604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671315294191075:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:26:13.264630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671315294191075:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:26:13.264649Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671315294191075:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:26:13.264672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671315294191075:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:26:13.264691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671315294191075:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:26:13.265726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:26:13.265739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:26:13.265764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:26:13.265773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:26:13.265812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:26:13.265822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:26:13.265851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:26:13.265860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:26:13.265875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:26:13.265885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:26:13.265930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:26:13.265940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:26:13.265974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:26:13.265984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:26:13.266008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:26:13.266017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:26:13.266030Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema: ... ription=CLASS_NAME=Granules;id=1; 2025-06-30T09:26:18.498928Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:26:18.498947Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:26:18.498953Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:26:18.498980Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:26:18.498986Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:26:18.499001Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:26:18.499013Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:26:18.499022Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:26:18.499034Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:26:18.499064Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:26:18.499075Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:26:18.499100Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:26:18.499113Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:26:18.499130Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:26:18.499141Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:26:18.499150Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:26:18.499158Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:26:18.499165Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:26:18.499236Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:26:18.499247Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:26:18.499264Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:26:18.499269Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:26:18.546687Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521671337110563204:2294];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=124184626028192;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275578546;max=18446744073709551615;plan=0;src=[6:7521671337110562859:2145];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:18.548329Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:18.549337Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:26:18.553541Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671337110563277:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:18.553575Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:18.557203Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:18.562440Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`true`, `COLUMNS_LIMIT`=`1`, `SPARSED_DETECTOR_KFF`=`1000`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:26:18.566079Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671337110563307:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:18.566106Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:18.568687Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:18.570813Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1"}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3"}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}')) 2025-06-30T09:26:18.575670Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671337110563338:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:18.575698Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:18.575746Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671337110563343:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:18.576625Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:18.580264Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671337110563345:2320], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:26:18.631911Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671337110563396:2426] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:18.654122Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_EXISTS(Col2, "$.a") ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\"}"]];[2u;["{\"a\":\"a2\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] OUTPUT: [[1u;["{\"a\":\"a1\"}"]];[2u;["{\"a\":\"a2\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] INDEX:0/0/0 HEADER:0/0/0 >> ListObjectsInS3Export::PagingParameters >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,1000,100,0.5] [GOOD] >> test_acl.py::TestSqsACLWithPath::test_apply_permissions[tables_format_v1] [GOOD] >> KqpNotNullColumns::AlterAddNotNullColumn [GOOD] >> KqpNotNullColumns::AlterAddNotNullColumnPg >> test_acl.py::TestSqsACLWithPath::test_modify_permissions[tables_format_v0] >> KqpNewEngine::StaleRO-EnableFollowers [GOOD] >> KqpNewEngine::StaleRO_Immediate >> KqpNotNullColumns::InsertNotNullPg-useSink [GOOD] >> KqpNotNullColumns::JoinBothTablesWithNotNullPk+StreamLookup >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_ya_count_queues[tables_format_v0] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_ya_count_queues[tables_format_v1] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::RestoreFirstLevelVariants[2,true,1024,1000,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 65347, MsgBus: 25542 2025-06-30T09:26:13.161515Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671314769730968:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:13.161574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003da2/r3tmp/tmp4wsJtR/pdisk_1.dat 2025-06-30T09:26:13.254528Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671314769730808:2080] 1751275573159216 != 1751275573159219 2025-06-30T09:26:13.261508Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65347, node 1 2025-06-30T09:26:13.276315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:13.276330Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:13.276334Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:13.276395Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25542 2025-06-30T09:26:13.318114Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:13.318144Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:13.319144Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25542 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:13.386662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:13.389997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2); 2025-06-30T09:26:13.730531Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671314769731431:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:13.730568Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:13.779366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:26:13.791537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671314769731495:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:13.791637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671314769731495:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:26:13.791698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671314769731495:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:26:13.791725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671314769731495:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:26:13.791750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671314769731495:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:26:13.791780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671314769731495:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:26:13.791807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671314769731495:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:26:13.791834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671314769731495:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:26:13.791863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671314769731495:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:26:13.791888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671314769731495:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:26:13.791917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671314769731495:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:26:13.791943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671314769731495:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:26:13.793149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:26:13.793164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:26:13.793182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:26:13.793188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:26:13.793213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:26:13.793219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:26:13.793489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:26:13.793501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:26:13.793511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:26:13.793517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:26:13.793548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:26:13.793554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:26:13.793579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:26:13.793586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:26:13.793619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:26:13.793626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_swi ... Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:26:18.991765Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:26:18.991787Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:26:18.991801Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:26:18.991815Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:26:18.991823Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-30T09:26:18.991831Z node 6 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-30T09:26:18.991838Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-30T09:26:18.991844Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-30T09:26:18.991899Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:26:18.991908Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:26:18.991921Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:26:18.991939Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:26:18.993412Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[6:7521671334567474682:2295];ev=NActors::IEventHandle;tablet_id=72075186224037889;tx_id=281474976715658;this=53721356709312;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275578993;max=18446744073709551615;plan=0;src=[6:7521671334567474348:2155];cookie=22:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:18.993971Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521671334567474681:2294];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=53721356706432;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275578993;max=18446744073709551615;plan=0;src=[6:7521671334567474348:2155];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:18.996528Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:18.996529Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:18.997775Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:26:18.998013Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:26:19.002082Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671338862442080:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:19.002112Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:19.004866Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:19.011052Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:19.011062Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `SCAN_FIRST_LEVEL_ONLY`=`true`, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`true`, `COLUMNS_LIMIT`=`1024`, `SPARSED_DETECTOR_KFF`=`1000`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:26:19.016375Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671338862442114:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:19.016405Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:19.020103Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:19.025268Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:19.025283Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1", "d" : null, "e.v" : {"c" : 1, "e" : {"c.a" : 2}}}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3", "d" : "d3", "e" : ["a", {"v" : ["c", 5]}]}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}')) 2025-06-30T09:26:19.033781Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671338862442149:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:19.033814Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:19.033870Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671338862442154:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:19.034826Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:19.040286Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671338862442156:2324], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:26:19.116917Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671338862442207:2453] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:19.138923Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:26:19.139011Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":\"{\\\"c\\\":1,\\\"e\\\":{\\\"c.a\\\":2}}\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\",\"e\":\"[\\\"a\\\",{\\\"v\\\":[\\\"c\\\",5]}]\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] OUTPUT: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":\"{\\\"c\\\":1,\\\"e\\\":{\\\"c.a\\\":2}}\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\",\"e\":\"[\\\"a\\\",{\\\"v\\\":[\\\"c\\\",5]}]\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] INDEX:0/0/0 HEADER:0/0/0 >> test_acl.py::TestSqsACLWithPath::test_modify_permissions[tables_format_v0] [GOOD] >> test_acl.py::TestSqsACLWithPath::test_modify_permissions[tables_format_v1] >> KqpLimits::TooBigQuery-useSink [GOOD] >> KqpLimits::WaitCAsStateOnAbort >> KqpReturning::ReturningSerial [GOOD] >> KqpReturning::ReturningWorks+QueryService >> KqpNewEngine::BrokenLocksAtROTxSharded [GOOD] >> KqpNewEngine::BrokenLocksOnUpdate >> KqpNewEngine::JoinWithPrecompute [GOOD] >> KqpNewEngine::JoinProjectMulti >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue_batch[tables_format_v1] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_queues_count_over_limit[tables_format_v0] >> test_acl.py::TestSqsACLWithPath::test_modify_permissions[tables_format_v1] [GOOD] >> KqpQuery::ReadOverloaded-StreamLookup [GOOD] >> TTopicApiDescribes::DescribeTopic [GOOD] >> KqpNewEngine::StaleRO_Immediate [GOOD] >> KqpNewEngine::StaleRO_IndexFollowers+EnableFollowers >> KqpNotNullColumns::JoinBothTablesWithNotNullPk+StreamLookup [GOOD] >> KqpNotNullColumns::JoinBothTablesWithNotNullPk-StreamLookup >> KqpNotNullColumns::AlterAddNotNullColumnPg [GOOD] >> KqpNotNullColumns::AlterDropNotNullColumn >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,10,100,0] [GOOD] >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,10,100,0.5] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpQuery::ReadOverloaded-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 23021, MsgBus: 16265 2025-06-30T09:25:35.735286Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671152521010878:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:35.735302Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004211/r3tmp/tmpSywLXu/pdisk_1.dat 2025-06-30T09:25:35.839290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:35.839323Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:35.847531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:35.856649Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23021, node 1 2025-06-30T09:25:35.882977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:35.882994Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:35.882996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:35.883047Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16265 TClient is connected to server localhost:16265 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:36.080506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:36.090141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:36.100108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:36.190464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:36.236228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:36.273231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:36.468805Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671156815979673:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.468856Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.521613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:36.534032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:36.560668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:36.574665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:36.589333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:36.606291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:36.627676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:36.664190Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671156815980324:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.664222Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.664404Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671156815980329:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.665509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:36.677520Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671156815980331:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:36.734920Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671156815980385:3402] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:36.738546Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 4988, MsgBus: 12808 2025-06-30T09:25:37.259642Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671161786438770:2246];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004211/r3tmp/tmpRiiwzB/pdisk_1.dat 2025-06-30T09:25:37.275034Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:25:37.284685Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4988, node 2 2025-06-30T09:25:37.289292Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mism ... EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:15.737629Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:15.738955Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:15.815656Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:16.014856Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:16.130796Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:16.308114Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:16.541856Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:16.844052Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1682:3277], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.844125Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.849225Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.017415Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.255871Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.472437Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.707274Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.919111Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:18.199220Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:18.438348Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:2353:3772], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:18.438410Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:18.438537Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:2358:3777], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:18.440215Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:18.592879Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:2360:3779], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:18.637094Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:2418:3818] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:18.863117Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:19.053857Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:19.338992Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:20.585758Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [4:3106:4332], TxId: 281474976715675, task: 1. Ctx: { TraceId : 01jz02ha76bqj414qk9s6bwvxd. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=MWY4ZjYwYjktYWI1MmIxMmEtNGVjZmVmYjAtZmMwYzgzYTA=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Source[0] fatal error: {
: Error: Table '/Root/SecondaryKeys' retry limit exceeded. } 2025-06-30T09:26:20.585807Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [4:3106:4332], TxId: 281474976715675, task: 1. Ctx: { TraceId : 01jz02ha76bqj414qk9s6bwvxd. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=MWY4ZjYwYjktYWI1MmIxMmEtNGVjZmVmYjAtZmMwYzgzYTA=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: OVERLOADED DEFAULT_ERROR: {
: Error: Table '/Root/SecondaryKeys' retry limit exceeded. }. 2025-06-30T09:26:20.586172Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:3107:4333], TxId: 281474976715675, task: 2. Ctx: { TraceId : 01jz02ha76bqj414qk9s6bwvxd. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=MWY4ZjYwYjktYWI1MmIxMmEtNGVjZmVmYjAtZmMwYzgzYTA=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:3100:4001], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-06-30T09:26:20.586342Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=MWY4ZjYwYjktYWI1MmIxMmEtNGVjZmVmYjAtZmMwYzgzYTA=, ActorId: [4:2660:4001], ActorState: ExecuteState, TraceId: 01jz02ha76bqj414qk9s6bwvxd, Create QueryResponse for error on request, msg: ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::DescribeTopic [GOOD] Test command err: 2025-06-30T09:26:11.853477Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671307569172605:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:11.853589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:26:11.865092Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671306037658802:2158];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0040f0/r3tmp/tmpxUI593/pdisk_1.dat 2025-06-30T09:26:11.915420Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:26:11.920094Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:26:11.919025Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:26:12.007400Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:12.025817Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:12.025854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:12.042512Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:26:12.043644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16905, node 1 2025-06-30T09:26:12.149139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:12.149172Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:12.151068Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/0040f0/r3tmp/yandexAkuLE1.tmp 2025-06-30T09:26:12.151079Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/0040f0/r3tmp/yandexAkuLE1.tmp 2025-06-30T09:26:12.151225Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/0040f0/r3tmp/yandexAkuLE1.tmp 2025-06-30T09:26:12.151280Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:12.151501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:12.236464Z INFO: TTestServer started on Port 26176 GrpcPort 16905 TClient is connected to server localhost:26176 PQClient connected to localhost:16905 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:12.359583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:26:12.379734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:12.492158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:26:12.519319Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671310332626274:2268], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:12.519348Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:12.518770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671311864140791:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:12.518800Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:12.524840Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671310332626286:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:12.530039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671311864140803:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:12.531383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:12.538551Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521671310332626289:2122] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:12.544236Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671311864140805:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:26:12.544429Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521671310332626288:2272], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:26:12.622373Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521671310332626316:2128] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:12.623287Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671311864140900:2781] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:12.823265Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521671310332626331:2276], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:26:12.826175Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521671311864140911:2312], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:26:12.826992Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YjM4NWNiYmQtZThmMzZkZDctYjc4OTU5NzktM2UyNmU2MTI=, ActorId: [1:7521671311864140789:2296], ActorState: ExecuteState, TraceId: 01jz02h396dfg76d8rvnfwasp3, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:26:12.827142Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:26:12.824899Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=NmI3NDI5Y2YtOGQ2OWNmNTItMTRlZTZjNzEtNWQ4NDUyMTU=, ActorId: [ ... attributes { key: "_message_group_seqno_retention_period_ms" value: "1382400000" } consumers { name: "shared/user" read_from { } attributes { key: "_service_type" value: "data-streams" } consumer_stats { min_partitions_last_read_time { seconds: 1751275579 nanos: 406000000 } max_read_time_lag { } max_write_time_lag { } bytes_read { } max_committed_time_lag { } } } topic_stats { min_last_write_time { seconds: 1751275579 nanos: 402000000 } max_write_time_lag { } bytes_written { } } } } } Describe topic with location 2025-06-30T09:26:20.349082Z node 1 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:26:20.349110Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ//rt3.dc1--topic-x 2025-06-30T09:26:20.349241Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7521671346223881512:2562]: Request location 2025-06-30T09:26:20.349313Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7521671346223881514:2563] connected; active server actors: 1 2025-06-30T09:26:20.349331Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 1, Generation 2 2025-06-30T09:26:20.349334Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 2, Generation 2 2025-06-30T09:26:20.349337Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 2, NodeId 1, Generation 2 2025-06-30T09:26:20.349340Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 1, Generation 2 2025-06-30T09:26:20.349343Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037899, partitionId 4, NodeId 2, Generation 2 2025-06-30T09:26:20.349345Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 5, NodeId 2, Generation 2 2025-06-30T09:26:20.349347Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 6, NodeId 2, Generation 2 2025-06-30T09:26:20.349350Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 7, NodeId 1, Generation 2 2025-06-30T09:26:20.349353Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 8, NodeId 1, Generation 2 2025-06-30T09:26:20.349356Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 9, NodeId 1, Generation 2 2025-06-30T09:26:20.349358Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 10, NodeId 2, Generation 2 2025-06-30T09:26:20.349360Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 11, NodeId 2, Generation 2 2025-06-30T09:26:20.349363Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 12, NodeId 1, Generation 2 2025-06-30T09:26:20.349365Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 13, NodeId 1, Generation 2 2025-06-30T09:26:20.349367Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 14, NodeId 2, Generation 2 2025-06-30T09:26:20.349377Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7521671346223881512:2562]: Got location 2025-06-30T09:26:20.349413Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7521671346223881514:2563] disconnected; active server actors: 1 2025-06-30T09:26:20.349425Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7521671346223881514:2563] disconnected no session Got response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribeTopicResult] { self { name: "rt3.dc1--topic-x" owner: "root@builtin" type: TOPIC created_at { plan_step: 1751275579396 tx_id: 281474976715682 } } partitioning_settings { min_active_partitions: 15 max_active_partitions: 1 auto_partitioning_settings { strategy: AUTO_PARTITIONING_STRATEGY_DISABLED partition_write_speed { stabilization_window { seconds: 300 } up_utilization_percent: 80 down_utilization_percent: 20 } } } partitions { active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 1 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 2 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 3 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 4 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 5 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 6 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 7 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 8 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 9 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 10 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 11 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 12 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 13 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 14 active: true partition_location { node_id: 2 generation: 2 } } retention_period { seconds: 64800 } partition_write_speed_bytes_per_second: 2097152 partition_write_burst_bytes: 2097152 attributes { key: "__max_partition_message_groups_seqno_stored" value: "6000000" } attributes { key: "_message_group_seqno_retention_period_ms" value: "1382400000" } consumers { name: "shared/user" read_from { } attributes { key: "_service_type" value: "data-streams" } } } } } Describe topic with no stats or location 2025-06-30T09:26:20.350408Z node 1 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:26:20.350457Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ//rt3.dc1--topic-x Got response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribeTopicResult] { self { name: "rt3.dc1--topic-x" owner: "root@builtin" type: TOPIC created_at { plan_step: 1751275579396 tx_id: 281474976715682 } } partitioning_settings { min_active_partitions: 15 max_active_partitions: 1 auto_partitioning_settings { strategy: AUTO_PARTITIONING_STRATEGY_DISABLED partition_write_speed { stabilization_window { seconds: 300 } up_utilization_percent: 80 down_utilization_percent: 20 } } } partitions { active: true } partitions { partition_id: 1 active: true } partitions { partition_id: 2 active: true } partitions { partition_id: 3 active: true } partitions { partition_id: 4 active: true } partitions { partition_id: 5 active: true } partitions { partition_id: 6 active: true } partitions { partition_id: 7 active: true } partitions { partition_id: 8 active: true } partitions { partition_id: 9 active: true } partitions { partition_id: 10 active: true } partitions { partition_id: 11 active: true } partitions { partition_id: 12 active: true } partitions { partition_id: 13 active: true } partitions { partition_id: 14 active: true } retention_period { seconds: 64800 } partition_write_speed_bytes_per_second: 2097152 partition_write_burst_bytes: 2097152 attributes { key: "__max_partition_message_groups_seqno_stored" value: "6000000" } attributes { key: "_message_group_seqno_retention_period_ms" value: "1382400000" } consumers { name: "shared/user" read_from { } attributes { key: "_service_type" value: "data-streams" } } } } } Describe bad topic 2025-06-30T09:26:20.351510Z node 1 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:26:20.351538Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ//bad-topic Got response: operation { ready: true status: SCHEME_ERROR issues { message: "path \'Root/PQ/bad-topic\' does not exist or you do not have access rights" issue_code: 500018 severity: 1 } } >> KqpLimits::ComputeNodeMemoryLimit [GOOD] >> KqpNewEngine::BrokenLocksOnUpdate [GOOD] >> KqpNewEngine::ComplexLookupLimit >> KqpNewEngine::JoinProjectMulti [GOOD] >> KqpNewEngine::JoinMultiConsumer >> KqpNotNullColumns::AlterDropNotNullColumn [GOOD] >> KqpNotNullColumns::AlterAddIndex >> KqpReturning::ReturningWorks+QueryService [GOOD] >> KqpReturning::ReturningWorks-QueryService >> TRtmrTest::CreateWithoutTimeCastBuckets |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_deduplication_table[tables_format_v0] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_deduplication_table[tables_format_v1] >> KqpNotNullColumns::JoinBothTablesWithNotNullPk-StreamLookup [GOOD] |84.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpLimits::ComputeNodeMemoryLimit [GOOD] Test command err: Trying to start YDB, gRPC: 63628, MsgBus: 19539 2025-06-30T09:25:36.114690Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671156204110936:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:36.114833Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00420f/r3tmp/tmpXm4SxQ/pdisk_1.dat 2025-06-30T09:25:36.190213Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:36.190316Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671156204110721:2080] 1751275536111415 != 1751275536111418 TServer::EnableGrpc on GrpcPort 63628, node 1 2025-06-30T09:25:36.205216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:36.205236Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:36.205239Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:36.205296Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:36.212215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:36.212249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:36.213256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19539 TClient is connected to server localhost:19539 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:36.288491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:36.301697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:36.659394Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671156204111684:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.659435Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.659649Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671156204111696:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:36.660674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:36.667850Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671156204111698:2319], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:36.728026Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671156204111749:2552] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:36.831275Z node 1 :KQP_COMPUTE WARN: log.cpp:784: fline=kqp_compute_actor_factory.cpp:41;problem=cannot_allocate_memory;tx_id=281474976715661;task_id=2;memory=1048576; 2025-06-30T09:25:36.831297Z node 1 :KQP_COMPUTE WARN: dq_compute_memory_quota.h:152: TxId: 281474976715661, task: 2. [Mem] memory 1048576 NOT granted 2025-06-30T09:25:36.833799Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7521671156204111792:2328], TxId: 281474976715661, task: 2. Ctx: { TraceId : 01jz02g08e2dwkhzzyqxdczawc. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODQyMjI5ODEtZWYxMTFjYmUtZTYyNDljYmMtMTg0YjM1MDk=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: OVERLOADED KIKIMR_PRECONDITION_FAILED: {
: Error: Mkql memory limit exceeded, allocated by task 2: 10, host: ghrun-x4qzxsyz2q, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976715661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-06-30T09:25:36.830449Z }, code: 2029 }. 2025-06-30T09:25:36.834624Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_channels.cpp:499: TxId: 281474976715661, task: 1. Output channel actor is unavailable 2025-06-30T09:25:36.834650Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7521671156204111790:2327], TxId: 281474976715661, task: 1. Ctx: { TraceId : 01jz02g08e2dwkhzzyqxdczawc. SessionId : ydb://session/3?node_id=1&id=ODQyMjI5ODEtZWYxMTFjYmUtZTYyNDljYmMtMTg0YjM1MDk=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [1:7521671156204111791:2327], status: UNAVAILABLE, reason: {
: Error: Output channel actor is unavailable } 2025-06-30T09:25:36.835648Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=ODQyMjI5ODEtZWYxMTFjYmUtZTYyNDljYmMtMTg0YjM1MDk=, ActorId: [1:7521671156204111681:2313], ActorState: ExecuteState, TraceId: 01jz02g08e2dwkhzzyqxdczawc, Create QueryResponse for error on request, msg:
: Error: Mkql memory limit exceeded, allocated by task 2: 10, host: ghrun-x4qzxsyz2q, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976715661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-06-30T09:25:36.830449Z } , code: 2029 Trying to start YDB, gRPC: 15729, MsgBus: 25477 2025-06-30T09:25:37.077859Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671157808998052:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:37.078808Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00420f/r3tmp/tmpTOujOU/pdisk_1.dat 2025-06-30T09:25:37.097067Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:37.097388Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7521671157808998023:2080] 1751275537077569 != 1751275537077572 TServer::EnableGrpc on GrpcPort 15729, node 2 2025-06-30T09:25:37.116012Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:37.116028Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:37.116032Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:37.116096Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25477 TClient is connected to server localhost:25477 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:37.182997Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:37.183035Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025 ... node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:38.821922Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1922 TClient is connected to server localhost:1922 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:38.868233Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:38.874589Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.888071Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.915353Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.929504Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:39.211667Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671167597557106:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.211707Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.230029Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.238559Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.253049Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.267732Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.324097Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.338535Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.352182Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:39.376438Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671167597557758:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.376467Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671167597557763:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.376472Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:39.377411Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:39.379848Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671167597557765:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:39.463022Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671167597557816:3396] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:39.788816Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:43.787175Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7521671163302588236:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:43.787254Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:25:53.802258Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7389: Cannot get console configs 2025-06-30T09:25:53.802295Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:19.555489Z node 4 :KQP_EXECUTER WARN: kqp_literal_executer.cpp:103: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jz02g37r8ykjy0segm13kb7n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NGYyYzE5M2EtNTBhMGQwMDktMTU0M2QxMmUtYTlhZjRlOTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, memory limit exceeded. 2025-06-30T09:26:19.556694Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=NGYyYzE5M2EtNTBhMGQwMDktMTU0M2QxMmUtYTlhZjRlOTc=, ActorId: [4:7521671167597558079:2469], ActorState: ExecuteState, TraceId: 01jz02g37r8ykjy0segm13kb7n, Create QueryResponse for error on request, msg: 2025-06-30T09:26:19.556783Z node 4 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jz02g37r8ykjy0segm13kb7n", SessionId: ydb://session/3?node_id=4&id=NGYyYzE5M2EtNTBhMGQwMDktMTU0M2QxMmUtYTlhZjRlOTc=, Slow query, duration: 39.852118s, status: PRECONDITION_FAILED, user: UNAUTHENTICATED, results: 0b, text: "\n SELECT ToDict(\n ListMap(\n ListFromRange(0ul, 5000000ul),\n ($x) -> { RETURN AsTuple($x, $x + 1); }\n )\n );\n ", parameters: 0b
: Warning: Type annotation, code: 1030
:2:13: Warning: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At tuple, At function: SqlProjectItem, At lambda
:2:20: Warning: At function: ToDict
:3:17: Warning: At function: OrderedMap
:5:26: Warning: At lambda
:5:38: Warning: At tuple
:5:53: Warning: At function: +
:5:53: Warning: Integral type implicit bitcast: Uint64 and Int32, code: 1107
: Error: Memory limit exceeded, code: 2029 |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning-UseSink+UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink+UseDataQuery |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::JoinBothTablesWithNotNullPk-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 10522, MsgBus: 14100 2025-06-30T09:26:15.572413Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671324505754877:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:15.572771Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000e0d/r3tmp/tmpSx9Rm0/pdisk_1.dat 2025-06-30T09:26:15.648860Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:15.659255Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 10522, node 1 2025-06-30T09:26:15.671040Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:15.671057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:15.671059Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:15.671100Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:15.675238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:15.675268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:15.676479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14100 TClient is connected to server localhost:14100 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:15.744168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:16.011982Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671328800722751:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.012014Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.050926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:16.077092Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671328800722851:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.077131Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.077140Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671328800722856:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.078042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:16.080191Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671328800722858:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:26:16.145954Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671328800722909:2388] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:16.185103Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521671328800722951:2314], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing key column in input: Key for table: /Root/TestInsertNotNullPk, code: 2029 2025-06-30T09:26:16.185218Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YTgwZTAzOWUtOTA4YWM4NjAtM2ViYjczY2ItNmM2OWMyMzg=, ActorId: [1:7521671328800722748:2288], ActorState: ExecuteState, TraceId: 01jz02h6vk5mtwhh5krdysf1w9, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: 2025-06-30T09:26:16.190273Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521671328800722960:2318], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:47: Error: Failed to convert type: Struct<'Key':Null,'Value':String> to Struct<'Key':Uint64,'Value':String?>
:1:47: Error: Failed to convert 'Key': Null to Uint64
:1:47: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-30T09:26:16.190392Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=YTgwZTAzOWUtOTA4YWM4NjAtM2ViYjczY2ItNmM2OWMyMzg=, ActorId: [1:7521671328800722748:2288], ActorState: ExecuteState, TraceId: 01jz02h6vtaw178ya22n202r2a, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 4799, MsgBus: 20683 2025-06-30T09:26:16.465170Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671329358800300:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:16.465189Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000e0d/r3tmp/tmpShUdcY/pdisk_1.dat 2025-06-30T09:26:16.480104Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4799, node 2 2025-06-30T09:26:16.500452Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:16.500469Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:16.500472Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:16.500522Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20683 TClient is connected to server localhost:20683 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:16.567602Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:16.567633Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:16.567994Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:16.568695Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:16.569309Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:16.831318Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService ... Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 30133, MsgBus: 18384 2025-06-30T09:26:21.000907Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671347542754211:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:21.000930Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000e0d/r3tmp/tmpedlFyo/pdisk_1.dat 2025-06-30T09:26:21.013060Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30133, node 7 2025-06-30T09:26:21.023632Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:21.023646Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:21.023648Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:21.023709Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18384 TClient is connected to server localhost:18384 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:21.104466Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:21.104501Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:21.104906Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:21.105496Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:21.109386Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:21.120770Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:21.140822Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:21.153229Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:21.404616Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671347542755769:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:21.404636Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:21.414152Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:21.423182Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:21.435252Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:21.448435Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:21.463836Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:21.520709Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:21.533099Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:21.549906Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671347542756422:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:21.549934Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671347542756427:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:21.549961Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:21.550934Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:21.560259Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671347542756429:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:21.615653Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671347542756480:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:21.750457Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:21.833444Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,10,100,0.5] [GOOD] |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> KqpExtractPredicateLookup::SqlInJoin [GOOD] >> KqpKv::BulkUpsert >> KqpNewEngine::ComplexLookupLimit [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_ya_count_queues[tables_format_v1] [GOOD] >> KqpNewEngine::JoinMultiConsumer [GOOD] >> KqpNewEngine::JoinSameKey >> KqpNotNullColumns::AlterAddIndex [GOOD] >> TIncrementalRestoreWithRebootsTests::BackupTablePathStateRestoration |84.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |84.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |84.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::SwitchAccessorCompactionVariants[2,true,1,10,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 15604, MsgBus: 13000 2025-06-30T09:26:10.372990Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671299460120042:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:10.374898Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003da3/r3tmp/tmpPR4j5B/pdisk_1.dat 2025-06-30T09:26:10.473448Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:10.473923Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671299460120019:2080] 1751275570372720 != 1751275570372723 TServer::EnableGrpc on GrpcPort 15604, node 1 2025-06-30T09:26:10.496591Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:10.496604Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:10.496607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:10.496655Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13000 2025-06-30T09:26:10.543008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:10.543054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:10.544256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13000 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:10.717859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:10.721232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2); 2025-06-30T09:26:10.979077Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671299460120643:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:10.979123Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:11.018569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:26:11.048727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671303755088006:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:11.048783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671303755088006:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:26:11.048857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671303755088006:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:26:11.048879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671303755088006:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:26:11.048902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671303755088006:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:26:11.048924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671303755088006:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:26:11.048947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671303755088006:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:26:11.048969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671303755088006:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:26:11.048990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671303755088006:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:26:11.049012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671303755088006:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:26:11.049036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671303755088006:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:26:11.049058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671303755088006:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:26:11.055199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:26:11.055224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-30T09:26:11.055241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-30T09:26:11.055248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-30T09:26:11.055276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-30T09:26:11.055282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-30T09:26:11.055296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-30T09:26:11.055303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-30T09:26:11.055312Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-30T09:26:11.055320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-30T09:26:11.055349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-30T09:26:11.055356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-30T09:26:11.055381Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-30T09:26:11.055390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-30T09:26:11.055405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-30T09:26:11.055412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_swi ... fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:26:21.531796Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[6:7521671350670460611:2295];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=124720386992480;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275581531;max=18446744073709551615;plan=0;src=[6:7521671350670460264:2147];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:21.532015Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[6:7521671350670460610:2294];ev=NActors::IEventHandle;tablet_id=72075186224037889;tx_id=281474976715658;this=124720386992320;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275581531;max=18446744073709551615;plan=0;src=[6:7521671350670460264:2147];cookie=22:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:21.533742Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:21.533746Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:21.534982Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:26:21.535163Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `COMPACTION_PLANNER.CLASS_NAME`=`l-buckets`) 2025-06-30T09:26:21.539618Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671350670460712:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:21.539649Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:21.542244Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:21.545188Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:21.545188Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1"}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3"}')), (4u, JsonDocument('{"b" : "b4", "a" : "a4"}')) 2025-06-30T09:26:21.550994Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671350670460747:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:21.551043Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:21.551089Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521671350670460752:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:21.552134Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:21.560048Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671350670460754:2319], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-30T09:26:21.635362Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671350670460805:2426] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:21.649841Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;local_tx_no=6;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037889;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:21.649841Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=6;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:21.655265Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-30T09:26:21.655329Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:26:21.661344Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:21.663762Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:26:21.663799Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`true`, `COLUMNS_LIMIT`=`1`, `SPARSED_DETECTOR_KFF`=`10`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:26:21.670983Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:21.677953Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715665; 2025-06-30T09:26:21.677959Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715665; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(11u, JsonDocument('{"a" : "1a1"}')), (12u, JsonDocument('{"a" : "1a2"}')), (13u, JsonDocument('{"b" : "1b3"}')), (14u, JsonDocument('{"b" : "1b4", "a" : "a4"}')) 2025-06-30T09:26:21.699415Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=18;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:21.699425Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;local_tx_no=18;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037889;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:21.705004Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-30T09:26:21.705092Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1) VALUES(10u) 2025-06-30T09:26:21.725527Z node 6 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=26;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:21.729029Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; WAIT_COMPACTION: 0 WAIT_COMPACTION: 0 2025-06-30T09:26:22.127746Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WAIT_COMPACTION: 0 COMPACTION_HAPPENED: 0 -> 2 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4\"}"]];[10u;#];[11u;["{\"a\":\"1a1\"}"]];[12u;["{\"a\":\"1a2\"}"]];[13u;["{\"b\":\"1b3\"}"]];[14u;["{\"a\":\"a4\",\"b\":\"1b4\"}"]]] OUTPUT: [[1u;["{\"a\":\"a1\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4\"}"]];[10u;#];[11u;["{\"a\":\"1a1\"}"]];[12u;["{\"a\":\"1a2\"}"]];[13u;["{\"b\":\"1b3\"}"]];[14u;["{\"a\":\"a4\",\"b\":\"1b4\"}"]]] INDEX:0/0/0 HEADER:0/0/0 >> TIncrementalRestoreWithRebootsTests::IncrementalRestoreStateRecoveryAfterReboot ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::ComplexLookupLimit [GOOD] Test command err: Trying to start YDB, gRPC: 29438, MsgBus: 14747 2025-06-30T09:26:14.418012Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671317006826344:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:14.418895Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00175d/r3tmp/tmp5QmhOK/pdisk_1.dat 2025-06-30T09:26:14.500264Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29438, node 1 2025-06-30T09:26:14.557055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:14.557069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:14.557071Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:14.557128Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:14.562723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:14.562770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:14.571698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14747 TClient is connected to server localhost:14747 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:14.655604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:14.658659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:14.664711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:14.744757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:14.799868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:14.862502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:15.009992Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671321301795204:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:15.010033Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:15.061513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.075559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.088263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.101365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.116128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.129296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.149566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.169365Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671321301795855:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:15.169391Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:15.169468Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671321301795860:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:15.170557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:15.173757Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671321301795862:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:15.263047Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671321301795913:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:15.420077Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 10853, MsgBus: 26990 2025-06-30T09:26:15.795635Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671321512715171:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:15.795670Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00175d/r3tmp/tmpg8o884/pdisk_1.dat 2025-06-30T09:26:15.813722Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10853, node 2 2025-06-30T09:26:15.823470Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, b ... 5;event=ev_write_error;status=STATUS_ABORTED;details=Distributed transaction aborted due to commit failure;tx_id=281474976715674; 2025-06-30T09:26:21.288228Z node 6 :TX_DATASHARD ERROR: datashard.cpp:751: Complete volatile write [1751275581328 : 281474976715674] from 72075186224037888 at tablet 72075186224037888, error: Status: STATUS_ABORTED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } Trying to start YDB, gRPC: 27679, MsgBus: 10026 2025-06-30T09:26:21.548006Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671350948218259:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:21.548055Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00175d/r3tmp/tmpwjgVHz/pdisk_1.dat 2025-06-30T09:26:21.563669Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27679, node 7 2025-06-30T09:26:21.574045Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:21.574064Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:21.574065Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:21.574114Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10026 TClient is connected to server localhost:10026 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:21.648683Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:21.648711Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:21.649806Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:21.651646Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:21.660936Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:21.671839Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:21.691165Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:21.705963Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:21.939951Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671350948219810:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:21.939979Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:21.953130Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:21.975855Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:21.988666Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.015644Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.033353Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.053028Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.070648Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.098850Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671355243187760:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:22.098912Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:22.102845Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671355243187765:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:22.105865Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:22.121913Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:26:22.122845Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671355243187767:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:22.209904Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671355243187818:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:22.453195Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.549609Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TIncrementalRestoreWithRebootsTests::BasicIncrementalRestoreWithReboots |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::AlterAddIndex [GOOD] Test command err: Trying to start YDB, gRPC: 25457, MsgBus: 22410 2025-06-30T09:26:14.878870Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671319485930753:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:14.878976Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000e1e/r3tmp/tmpdpgw7d/pdisk_1.dat TServer::EnableGrpc on GrpcPort 25457, node 1 2025-06-30T09:26:14.954781Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:14.971030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:14.971045Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:14.971047Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:14.971096Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:14.988220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:14.988252Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:14.989194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22410 TClient is connected to server localhost:22410 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:15.053245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:15.056736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:26:15.068860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.099787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:15.124166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:15.153722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:15.375471Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671323780899611:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:15.375498Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:15.424735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.435964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.450207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.463591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.472941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.485042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.499224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:15.527355Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671323780900261:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:15.527407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:15.527435Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671323780900266:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:15.528392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:15.533478Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671323780900268:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:15.626763Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671323780900319:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:15.887835Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 16749, MsgBus: 17859 2025-06-30T09:26:16.277904Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671327437099019:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:16.277931Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000e1e/r3tmp/tmpXwh2WO/pdisk_1.dat 2025-06-30T09:26:16.299653Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16749, node 2 2025-06-30T09:26:16.307098Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, b ... ot/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000e1e/r3tmp/tmpnxCWTG/pdisk_1.dat 2025-06-30T09:26:21.812176Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30000, node 7 2025-06-30T09:26:21.823459Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:21.823473Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:21.823475Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:21.823525Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63634 TClient is connected to server localhost:63634 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:21.895782Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:21.895823Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:21.896972Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:21.899937Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:21.907279Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:21.920069Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:21.982346Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:22.063564Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:22.084561Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.365072Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671352209938572:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:22.365100Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:22.375180Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.388497Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.401039Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.415908Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.429792Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.443709Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.458812Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.475397Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671352209939224:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:22.475434Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:22.475506Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671352209939229:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:22.476550Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:22.484734Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671352209939231:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:22.564118Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671352209939282:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:22.764888Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.790705Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:22.797977Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:22.804659Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> KqpKv::BulkUpsert [GOOD] >> KqpKv::ReadRows_ExternalBlobs+UseExtBlobsPrecharge >> TIncrementalRestoreWithRebootsTests::DatabaseConsistencyAfterMultipleReboots |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> KqpReturning::ReturningWorks-QueryService [GOOD] >> KqpReturning::ReturningColumnsOrder >> TIncrementalRestoreWithRebootsTests::BackupTablePathStateRestorationEdgeCases >> TIncrementalRestoreWithRebootsTests::MultiTableIncrementalRestoreWithReboots >> KqpLimits::WaitCAsStateOnAbort [GOOD] >> KqpLimits::WaitCAsTimeout |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> TRtmrTest::CreateWithoutTimeCastBuckets [GOOD] >> KqpNewEngine::JoinSameKey [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_ya_count_queues[tables_format_v1] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TRtmrTest::CreateWithoutTimeCastBuckets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:23.658900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:23.667544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:23.667596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:23.667606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:23.667626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:23.667633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:23.667660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:23.667688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:23.674947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:23.683069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:23.793269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:23.793305Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:23.802004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:23.802145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:23.808851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:23.828607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:23.837637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:23.861629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.861920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:23.876413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:23.876501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:23.906907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:23.906971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:23.907033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:23.907057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:23.907065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:23.907105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.909679Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:23.954151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:23.954250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.955999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:23.956040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:23.957022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:23.957057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:23.961322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.963474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:23.968065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.968106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:23.968116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:23.968128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:23.969336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.969356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:23.969363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:23.969977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.970017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.977754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.977807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:23.986006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:23.987390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:23.987475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:24.020129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:24.020209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:24.020240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:24.030573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:24.030630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:24.039369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:24.039456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:24.041094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:24.041113Z node 1 :FLAT_TX_SCHEMESHARD ... nd propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 100 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:24.108207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 100:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:100 msg type: 269090816 2025-06-30T09:26:24.108248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 100, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 100 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000002 2025-06-30T09:26:24.108355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:24.108388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 100 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:24.108401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_rtmr.cpp:130: TCreateRTMR TPropose, operationId: 100:0 HandleReply TEvOperationPlan, at schemeshard: 72057594046678944 2025-06-30T09:26:24.108437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 100:0 128 -> 240 2025-06-30T09:26:24.108491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:24.108508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:26:24.109251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:24.109267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:24.109333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:26:24.120096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:24.120148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-30T09:26:24.120169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 2 FAKE_COORDINATOR: Erasing txId 100 2025-06-30T09:26:24.120396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.120414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-30T09:26:24.120441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-30T09:26:24.120449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-30T09:26:24.120456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-30T09:26:24.120460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-30T09:26:24.120466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-30T09:26:24.120477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-30T09:26:24.120486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 100:0 2025-06-30T09:26:24.120492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 100:0 2025-06-30T09:26:24.120529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:26:24.120538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-30T09:26:24.120543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-30T09:26:24.120548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-30T09:26:24.120809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-30T09:26:24.120836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-30T09:26:24.120869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-30T09:26:24.120880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-30T09:26:24.120888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:26:24.121135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 100 2025-06-30T09:26:24.121148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 100 2025-06-30T09:26:24.121151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-30T09:26:24.121154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-30T09:26:24.121157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-30T09:26:24.121166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-30T09:26:24.122943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-30T09:26:24.123058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-30T09:26:24.123150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-30T09:26:24.123179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-30T09:26:24.123297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-30T09:26:24.123331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-30T09:26:24.123340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:316:2305] TestWaitNotification: OK eventTxId 100 2025-06-30T09:26:24.123475Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/rtmr1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:24.123551Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/rtmr1" took 88us result status StatusSuccess 2025-06-30T09:26:24.123695Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/rtmr1" PathDescription { Self { Name: "rtmr1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeRtmrVolume CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 RTMRVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } RtmrVolumeDescription { Name: "rtmr1" PathId: 2 PartitionsCount: 0 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::JoinSameKey [GOOD] Test command err: Trying to start YDB, gRPC: 26721, MsgBus: 15383 2025-06-30T09:26:15.542068Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671321153836421:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:15.542094Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000e14/r3tmp/tmpg9f2jq/pdisk_1.dat 2025-06-30T09:26:15.630393Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26721, node 1 2025-06-30T09:26:15.689469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:15.689501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:15.690004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:15.702911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:15.702929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:15.702931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:15.702989Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15383 TClient is connected to server localhost:15383 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:15.792639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:15.797623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:15.805049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:15.876631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:15.907746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:15.921757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:16.120593Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671325448805278:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.120627Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.179485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:16.191450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:16.207944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:16.265025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:16.276006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:16.290623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:16.304801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:16.321063Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671325448805932:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.321096Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671325448805937:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.321114Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.322265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:16.331341Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671325448805939:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:16.400032Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671325448805990:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:16.544014Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 14317, MsgBus: 13410 2025-06-30T09:26:16.849964Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671328827706833:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:16.849982Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000e14/r3tmp/tmppHhjCE/pdisk_1.dat 2025-06-30T09:26:16.869136Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14317, node 2 2025-06-30T09:26:16.881950Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, b ... f is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:22.406558Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671352641523707:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:22.489501Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671352641523758:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:22.737686Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 4960, MsgBus: 25501 2025-06-30T09:26:23.222929Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671358641165612:2154];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000e14/r3tmp/tmphaCIJB/pdisk_1.dat 2025-06-30T09:26:23.228125Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:26:23.241753Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4960, node 7 2025-06-30T09:26:23.253163Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:23.253182Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:23.253185Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:23.253255Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25501 TClient is connected to server localhost:25501 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:23.327429Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:23.327468Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:23.328153Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:23.328960Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:23.341542Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:23.375863Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:23.420767Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:23.434660Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:23.736987Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671358641167083:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:23.737022Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:23.747605Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:23.756959Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:23.766934Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:23.780540Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:23.795362Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:23.808957Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:23.823250Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:23.839671Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671358641167734:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:23.839713Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:23.839716Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671358641167739:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:23.840768Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:23.849181Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671358641167741:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:23.937615Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671358641167792:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue_batch[tables_format_v0] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue_batch[tables_format_v1] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_queues_count_over_limit[tables_format_v0] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_queues_count_over_limit[tables_format_v1] |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> KqpKv::ReadRows_ExternalBlobs+UseExtBlobsPrecharge [GOOD] >> KqpKv::ReadRows_ExternalBlobs-UseExtBlobsPrecharge >> KqpCost::ScanQueryRangeFullScan-SourceRead |84.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_rtmr/test-results/unittest/{meta.json ... results_accumulator.log} |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpReturning::ReturningColumnsOrder [GOOD] >> KqpReturning::ReturningTypes |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> TxUsage::WriteToTopic_Demo_18_RestartNo_Query [GOOD] >> TIncrementalRestoreWithRebootsTests::BackupTablePathStateRestorationAfterMultipleReboots |84.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |84.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |84.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr/test-results/unittest/{meta.json ... results_accumulator.log} |84.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> KqpCost::QuerySeviceRangeFullScan >> TxUsage::WriteToTopic_Demo_19_RestartNo_Table |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan-SourceRead [GOOD] >> KqpReturning::ReturningTypes [GOOD] >> KqpCost::IndexLookupAndTake-useSink |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpKv::ReadRows_ExternalBlobs-UseExtBlobsPrecharge [GOOD] >> KqpKv::ReadRows_Decimal >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_queues_count_over_limit[tables_format_v1] [GOOD] |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan-SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 26787, MsgBus: 5773 2025-06-30T09:26:25.149737Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671365683637978:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:25.150086Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0034eb/r3tmp/tmptQr9rt/pdisk_1.dat 2025-06-30T09:26:25.234781Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:25.247011Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-30T09:26:25.248470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:25.248497Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:25.250000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26787, node 1 2025-06-30T09:26:25.257557Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:25.257574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:25.257578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:25.257674Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5773 TClient is connected to server localhost:5773 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:25.363568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:25.368805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:26:25.381571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.450036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:25.471794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:25.484505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:25.632232Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671365683639461:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:25.632263Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:25.680218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.689308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.746092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.755153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.768010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.782367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.796492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.812323Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671365683640114:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:25.812347Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:25.812363Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671365683640119:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:25.813208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:25.815773Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671365683640121:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:25.898956Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671365683640172:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:26.053115Z node 1 :KQP_GATEWAY DEBUG: kqp_metadata_loader.cpp:888: Load table metadata from cache by path, request Path: /Root/Test 2025-06-30T09:26:26.069897Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:37: Start KqpSnapshotManager at [1:7521671369978607707:2460] 2025-06-30T09:26:26.069915Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:58: KqpSnapshotManager: got snapshot request from [1:7521671369978607693:2460] 2025-06-30T09:26:26.079999Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:172: KqpSnapshotManager: snapshot 1751275586123:281474976715672 created 2025-06-30T09:26:26.080119Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:608: ActorId: [1:7521671369978607718:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hgfy5brba0ahqm78sc9b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2IwZjI5Nz ... rds: 0, average read rows: 3, average read bytes: 0, 2025-06-30T09:26:26.084856Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:430;event=wait_all_scanner_finished;scans=0; 2025-06-30T09:26:26.084866Z node 1 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:699: SelfId: [1:7521671369978607725:2469]. EVLOGKQP(max_in_flight:1) InFlightScans:InFlightShards:;wScans=0;wShards=0; {SHARD(72075186224037914):CHUNKS=1;D=0.000000s;PacksCount=1;RowsCount=3;BytesCount=0;MinPackSize=3;MaxPackSize=3;CAVG=0.000000s;CMIN=0.000000s;CMAX=0.000000s;}; 2025-06-30T09:26:26.084883Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7521671369978607722:2467], TxId: 281474976715673, task: 1. Ctx: { TraceId : 01jz02hgfy5brba0ahqm78sc9b. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-30T09:26:26.084889Z node 1 :KQP_COMPUTE DEBUG: log.h:466: kqp_scan_compute_actor.cpp:208 :TEvFetcherFinished: [1:7521671369978607725:2469] 2025-06-30T09:26:26.084897Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715673, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-30T09:26:26.084903Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7521671369978607723:2468], TxId: 281474976715673, task: 2. Ctx: { TraceId : 01jz02hgfy5brba0ahqm78sc9b. SessionId : ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646923 2025-06-30T09:26:26.084920Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715673, task: 2. Finish input channelId: 1, from: [1:7521671369978607722:2467] 2025-06-30T09:26:26.084923Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7521671369978607718:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hgfy5brba0ahqm78sc9b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7521671369978607722:2467], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 414 Tasks { TaskId: 1 CpuTimeUs: 309 Tables { TablePath: "/Root/Test" } ComputeCpuTimeUs: 15 BuildCpuTimeUs: 294 Sources { IngressName: "CS" Ingress { } } HostName: "ghrun-x4qzxsyz2q" NodeId: 1 CreateTimeMs: 1751275586082 CurrentWaitInputTimeUs: 22 UpdateTimeMs: 1751275586084 } MaxMemoryUsage: 1048576 } 2025-06-30T09:26:26.084936Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7521671369978607723:2468], TxId: 281474976715673, task: 2. Ctx: { TraceId : 01jz02hgfy5brba0ahqm78sc9b. SessionId : ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:26:26.084944Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:7521671369978607718:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hgfy5brba0ahqm78sc9b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [1:7521671369978607722:2467], CA [1:7521671369978607723:2468], 2025-06-30T09:26:26.084962Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715673, task: 1. Tasks execution finished 2025-06-30T09:26:26.084969Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7521671369978607722:2467], TxId: 281474976715673, task: 1. Ctx: { TraceId : 01jz02hgfy5brba0ahqm78sc9b. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-30T09:26:26.084987Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715673, task: 1. pass away 2025-06-30T09:26:26.085004Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715673;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-30T09:26:26.085006Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7521671369978607723:2468], TxId: 281474976715673, task: 2. Ctx: { TraceId : 01jz02hgfy5brba0ahqm78sc9b. SessionId : ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-30T09:26:26.085050Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7521671369978607718:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hgfy5brba0ahqm78sc9b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7521671369978607722:2467], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 736 Tasks { TaskId: 1 CpuTimeUs: 340 FinishTimeMs: 1751275586084 OutputRows: 1 OutputBytes: 19 Tables { TablePath: "/Root/Test" ReadRows: 3 ReadBytes: 96 } ComputeCpuTimeUs: 46 BuildCpuTimeUs: 294 Sources { IngressName: "CS" Ingress { } } HostName: "ghrun-x4qzxsyz2q" NodeId: 1 StartTimeMs: 1751275586084 CreateTimeMs: 1751275586082 UpdateTimeMs: 1751275586084 } MaxMemoryUsage: 1048576 } 2025-06-30T09:26:26.085066Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715673. Ctx: { TraceId: 01jz02hgfy5brba0ahqm78sc9b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7521671369978607722:2467] 2025-06-30T09:26:26.085086Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:7521671369978607718:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hgfy5brba0ahqm78sc9b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [1:7521671369978607723:2468], 2025-06-30T09:26:26.085092Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715673, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-30T09:26:26.085146Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:357: ActorId: [1:7521671369978607718:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hgfy5brba0ahqm78sc9b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [1:7521671369978607693:2460], seqNo: 1, nRows: 1 2025-06-30T09:26:26.085948Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:423: TxId: 281474976715673, send ack to channelId: 2, seqNo: 1, enough: 0, freeSpace: 8388470, to: [1:7521671369978607726:2468] 2025-06-30T09:26:26.085964Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7521671369978607723:2468], TxId: 281474976715673, task: 2. Ctx: { TraceId : 01jz02hgfy5brba0ahqm78sc9b. SessionId : ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:26:26.085974Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715673, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-30T09:26:26.085975Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715673, task: 2. Tasks execution finished 2025-06-30T09:26:26.085976Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7521671369978607723:2468], TxId: 281474976715673, task: 2. Ctx: { TraceId : 01jz02hgfy5brba0ahqm78sc9b. SessionId : ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-30T09:26:26.085990Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715673, task: 2. pass away 2025-06-30T09:26:26.086003Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715673;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-30T09:26:26.086033Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715673, taskId: 2. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-30T09:26:26.086042Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7521671369978607718:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hgfy5brba0ahqm78sc9b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7521671369978607723:2468], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1885 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 206 FinishTimeMs: 1751275586085 InputRows: 1 InputBytes: 19 OutputRows: 1 OutputBytes: 19 ResultRows: 1 ResultBytes: 19 ComputeCpuTimeUs: 68 BuildCpuTimeUs: 138 HostName: "ghrun-x4qzxsyz2q" NodeId: 1 CreateTimeMs: 1751275586082 UpdateTimeMs: 1751275586085 } MaxMemoryUsage: 1048576 } 2025-06-30T09:26:26.086055Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715673. Ctx: { TraceId: 01jz02hgfy5brba0ahqm78sc9b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7521671369978607723:2468] 2025-06-30T09:26:26.086096Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [1:7521671369978607718:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hgfy5brba0ahqm78sc9b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-30T09:26:26.086111Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [1:7521671369978607718:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hgfy5brba0ahqm78sc9b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2IwZjI5NzQtNGM4ODc2M2EtYzIwMjE1Yi05YzY4MjcyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.002621s ReadRows: 3 ReadBytes: 96 ru: 3 rate limiter was not found force flag: 1 2025-06-30T09:26:26.086375Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275586123, txId: 281474976715672] shutting down >> KqpCost::IndexLookupAtLeast8BytesInStorage+useSink >> KqpCost::QuerySeviceRangeFullScan [GOOD] >> KqpCost::ScanScriptingRangeFullScan-SourceRead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpReturning::ReturningTypes [GOOD] Test command err: Trying to start YDB, gRPC: 17949, MsgBus: 9990 2025-06-30T09:26:16.544152Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671329441233941:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:16.544355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000dfd/r3tmp/tmpR6ElIF/pdisk_1.dat 2025-06-30T09:26:16.623893Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17949, node 1 2025-06-30T09:26:16.643069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:16.643117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:16.644264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:16.650983Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:16.651002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:16.651005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:16.651063Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9990 TClient is connected to server localhost:9990 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:16.739599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:26:16.753261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:16.785170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:16.847329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:16.867978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:16.959317Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671329441235423:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:16.959341Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:17.033441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.041698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.053230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.066671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.080958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.095277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.109035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.126456Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671333736203372:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:17.126491Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:17.126493Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671333736203377:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:17.127380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:17.131086Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671333736203379:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:26:17.191646Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671333736203430:3402] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:17.366705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.376676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:17.391418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation ... d: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:24.634229Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671363617489650:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:24.802529Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) [[[2];["321"]];[["111"];[2]]] Trying to start YDB, gRPC: 21541, MsgBus: 61471 2025-06-30T09:26:25.158586Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671364559892551:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:25.158609Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/000dfd/r3tmp/tmp1B9SXF/pdisk_1.dat 2025-06-30T09:26:25.179901Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21541, node 7 2025-06-30T09:26:25.193793Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:25.193810Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:25.193812Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:25.193876Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61471 2025-06-30T09:26:25.259087Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:25.259118Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:25.261162Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61471 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:25.281411Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:25.284845Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:25.289579Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:25.332493Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:25.395079Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:25.410272Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:25.659315Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671364559894133:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:25.659359Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:25.669002Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.677063Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.691030Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.705153Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.719161Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.733024Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.748358Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:25.763704Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671364559894787:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:25.763732Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:25.763736Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671364559894792:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:25.764577Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:25.774330Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671364559894794:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:25.833722Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671364559894845:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } |84.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::QuerySeviceRangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 28282, MsgBus: 25830 2025-06-30T09:26:25.529057Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671366207402664:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:25.529099Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0034c7/r3tmp/tmpbO3zto/pdisk_1.dat 2025-06-30T09:26:25.605152Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28282, node 1 2025-06-30T09:26:25.620315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:25.620330Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:25.620333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:25.620394Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:25.632619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:25.632646Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:25.633744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25830 TClient is connected to server localhost:25830 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:25.692541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:25.696882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:25.762181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:25.788398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:25.804476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:25.953657Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671366207404238:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:25.953692Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:26.008553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:26.017960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:26.026410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:26.041506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:26.055118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:26.070155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:26.083810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:26.099440Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671370502372186:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:26.099493Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:26.099517Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671370502372191:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:26.100369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:26.102441Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671370502372193:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:26:26.174503Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671370502372244:3401] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } |84.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |84.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |84.6%| [LD] {RESULT} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut >> KqpKv::ReadRows_Decimal [GOOD] |84.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |84.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |84.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Table [GOOD] >> KqpCost::OlapPointLookup >> KqpCost::OltpWriteRow-isSink >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Query >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink+UseDataQuery [GOOD] |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpKv::ReadRows_Decimal [GOOD] Test command err: Trying to start YDB, gRPC: 14821, MsgBus: 7996 2025-06-30T09:25:44.311397Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671191265444309:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:44.311530Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d28/r3tmp/tmpaD55ps/pdisk_1.dat 2025-06-30T09:25:44.406188Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:44.406274Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671191265444285:2080] 1751275544311157 != 1751275544311160 TServer::EnableGrpc on GrpcPort 14821, node 1 2025-06-30T09:25:44.450958Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:44.450974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:44.450976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:44.451018Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7996 2025-06-30T09:25:44.475725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:44.475770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:44.477142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7996 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:44.542592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:44.549373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:44.558083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.639836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.697765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.720347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:44.823050Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671191265445887:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.823086Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:44.892281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.951337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.963506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.972467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:44.991355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:45.008258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:45.070480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:45.093807Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671195560413845:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:45.093841Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:45.093959Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671195560413850:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:45.095299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:45.100528Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671195560413852:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:45.185560Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671195560413903:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:45.319170Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:45.487684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:45.501354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 720575940466 ... Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:24.401782Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:24.401816Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:24.401820Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:24.401912Z node 21 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:24.402063Z node 21 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [21:33:2080] 1751275583790224 != 1751275583790227 2025-06-30T09:26:24.465013Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:24.465070Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:24.465684Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:24.467191Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:24.568883Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) GRpc shutdown warning: failed to shutdown all connections, left infly: 1, spent: 0.001992 sec Trying to start YDB, gRPC: 21822, MsgBus: 6442 2025-06-30T09:26:25.587903Z node 22 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [22:289:2332], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-30T09:26:25.588004Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-30T09:26:25.588019Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d28/r3tmp/tmpEF4EjJ/pdisk_1.dat 2025-06-30T09:26:25.681728Z node 22 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 22 Type# 268639257 TServer::EnableGrpc on GrpcPort 21822, node 22 TClient is connected to server localhost:6442 TClient is connected to server localhost:6442 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:25.738984Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:25.739014Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:25.739020Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:25.739094Z node 22 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:25.739218Z node 22 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [22:33:2080] 1751275585114266 != 1751275585114270 2025-06-30T09:26:25.802705Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:25.802798Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:25.803272Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:25.804609Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:25.902114Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) GRpc shutdown warning: failed to shutdown all connections, left infly: 1, spent: 0.001864 sec Trying to start YDB, gRPC: 26115, MsgBus: 20950 2025-06-30T09:26:26.453424Z node 23 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[23:7521671370966595938:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:26.453494Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001d28/r3tmp/tmpIAQj9U/pdisk_1.dat 2025-06-30T09:26:26.472652Z node 23 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26115, node 23 2025-06-30T09:26:26.483713Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:26.483726Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:26.483729Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:26.483789Z node 23 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20950 TClient is connected to server localhost:20950 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:26.554075Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:26.554112Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:26.555375Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:26.559263Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:26.877946Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7521671370966596513:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:26.877976Z node 23 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:26.885041Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:26.916998Z node 23 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: Type mismatch, got type Uint64 for column Key22, but expected Decimal(22,9) 2025-06-30T09:26:26.917868Z node 23 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: Type mismatch, got type Decimal(35,10) for column Key22, but expected Decimal(22,9) >> KqpCost::IndexLookupAndTake-useSink [GOOD] >> KqpCost::ScanScriptingRangeFullScan-SourceRead [GOOD] >> KqpCost::IndexLookupJoin+StreamLookupJoin >> TIncrementalRestoreWithRebootsTests::IncrementalRestoreStateRecoveryAfterReboot [GOOD] >> KqpCost::IndexLookupAtLeast8BytesInStorage+useSink [GOOD] |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |84.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |84.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |84.7%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::Range >> TIncrementalRestoreWithRebootsTests::BasicIncrementalRestoreWithReboots [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink+UseDataQuery [GOOD] Test command err: Trying to start YDB, gRPC: 19523, MsgBus: 31047 2025-06-30T09:25:41.180861Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671178595385338:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:41.180881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ded/r3tmp/tmpXAhnIn/pdisk_1.dat 2025-06-30T09:25:41.267641Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19523, node 1 2025-06-30T09:25:41.285193Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:41.285219Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:41.285221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:41.285265Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31047 TClient is connected to server localhost:31047 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-30T09:25:41.339851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:41.339891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-30T09:25:41.341206Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:41.354664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:41.361345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:41.369255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:41.397703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:41.427873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:41.441540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:41.695117Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671178595386891:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.695163Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.756370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.771649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.833179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.848508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.862438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.871615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.886666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:41.903968Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671178595387544:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.903999Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.904080Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671178595387549:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:41.905062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:41.913105Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671178595387551:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:42.000448Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671178595387602:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:42.186807Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:42.217310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.229679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:42.242917Z node 1 ... leExistsActor;event=undelivered;self_id=[28:7521671369998488056:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:26.579334Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ded/r3tmp/tmpT1fCe5/pdisk_1.dat 2025-06-30T09:26:26.600151Z node 28 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7502, node 28 2025-06-30T09:26:26.613362Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:26.613382Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:26.613385Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:26.613447Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61968 TClient is connected to server localhost:61968 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:26.679692Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:26.679739Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:26.680970Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:26.685564Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:26:26.699004Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:26.712561Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:26.744715Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:26.762181Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:27.023840Z node 28 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [28:7521671374293456900:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.023869Z node 28 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.037303Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.047587Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.058210Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.077452Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.091473Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.107874Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.119397Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.138683Z node 28 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [28:7521671374293457551:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.138730Z node 28 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.138775Z node 28 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [28:7521671374293457556:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.139800Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:27.145903Z node 28 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [28:7521671374293457558:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:27.240760Z node 28 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [28:7521671374293457609:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:27.386102Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.396644Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.408903Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) [[["267f2d7f-45a1-4c10-add6-ef4edc40e982"]]] [[["267f2d7f-45a1-4c10-add6-ef4edc40e982"]]] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAndTake-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 5394, MsgBus: 19558 2025-06-30T09:26:26.481010Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671368567538940:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:26.481044Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0034a8/r3tmp/tmpitQPkb/pdisk_1.dat 2025-06-30T09:26:26.531996Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:26.534797Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671368567538915:2080] 1751275586480841 != 1751275586480844 TServer::EnableGrpc on GrpcPort 5394, node 1 2025-06-30T09:26:26.548269Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:26.548284Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:26.548287Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:26.548340Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19558 2025-06-30T09:26:26.583616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:26.583653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:26.584716Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19558 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:26.619530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:26.625644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:26.637407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:26.712987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:26.751370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:26.766062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:26.882834Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671368567540536:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:26.882870Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:26.947200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:26.957777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.019375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.031029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.042596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.056925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.071848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.136091Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671372862508490:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.136144Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.136304Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671372862508495:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.137381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:27.139888Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671372862508497:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:27.210325Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671372862508549:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:27.402940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.482935Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; /Root/SecondaryKeys/Index/indexImplTable 2 16 /Root/SecondaryKeys 1 8 >> KqpCost::ScanQueryRangeFullScan+SourceRead |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_send_message[std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanScriptingRangeFullScan-SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 28796, MsgBus: 30644 2025-06-30T09:26:26.668915Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671370401696654:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:26.668939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003493/r3tmp/tmp1VsWya/pdisk_1.dat TServer::EnableGrpc on GrpcPort 28796, node 1 2025-06-30T09:26:26.732968Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:26.733572Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671370401696633:2080] 1751275586668777 != 1751275586668780 2025-06-30T09:26:26.738296Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:26.738307Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:26.738309Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:26.738346Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30644 2025-06-30T09:26:26.771302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:26.771335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:26.772345Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30644 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:26.812765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:26.822442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:26.898006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:26.921954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:26.936571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:27.070176Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671374696665527:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.070220Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.133142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.142697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.153800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.168280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.182561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.196021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.210544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.227064Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671374696666180:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.227103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671374696666185:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.227107Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.228087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:27.238541Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671374696666187:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:27.339366Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671374696666238:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:27.542001Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521671374696666493:2465] TxId: 281474976715673. Ctx: { TraceId: 01jz02hhxfbeb51npw480pjpcx, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjM5OGU2MzEtNjVkMmViYTgtODBhMzhjZjYtY2Y5NTUwZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-30T09:26:27.545258Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275587586, txId: 281474976715672] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> TIncrementalRestoreWithRebootsTests::IncrementalRestoreStateRecoveryAfterReboot [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:26:23.687585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:23.687623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:23.687641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:23.687648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:23.687666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:23.687672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:23.687684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:23.687701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:23.687828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:23.687942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:23.702445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:26:23.702478Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:23.702570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:23.706777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:23.707141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:23.707206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:23.709428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:23.709526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:23.709687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.709781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:23.710684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:23.710760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:23.711068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:23.711080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:23.711104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:23.711113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:23.711120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:23.711163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.713461Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:23.733935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:23.734006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.734068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:23.734075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:23.734111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:23.734122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:23.735031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.735098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:23.735180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.735190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:23.735195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:23.735199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:23.735705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.735718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:23.735723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:23.736243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.736257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.736265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.736274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:23.737194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:23.737855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:23.737907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:23.738177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.738214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:23.738224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.738303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:23.738314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.738357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:23.738375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... 2025-06-30T09:26:27.801641Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1007 ready parts: 1/1 2025-06-30T09:26:27.801645Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1007:0 progress is 1/1 2025-06-30T09:26:27.801648Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1007 ready parts: 1/1 2025-06-30T09:26:27.801652Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1007, ready parts: 1/1, is published: true 2025-06-30T09:26:27.801657Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1007 ready parts: 1/1 2025-06-30T09:26:27.801662Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1007:0 2025-06-30T09:26:27.801666Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1007:0 2025-06-30T09:26:27.801710Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 3 2025-06-30T09:26:27.801717Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 TestModificationResult got TxId: 1007, wait until txId: 1007 TestWaitNotification wait txId: 1007 2025-06-30T09:26:27.802409Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1007: send EvNotifyTxCompletion 2025-06-30T09:26:27.802420Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1007 2025-06-30T09:26:27.802495Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1007, at schemeshard: 72057594046678944 2025-06-30T09:26:27.802522Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1007: got EvNotifyTxCompletionResult 2025-06-30T09:26:27.802527Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1007: satisfy waiter [16:653:2612] TestWaitNotification: OK eventTxId 1007 2025-06-30T09:26:27.802619Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/RecoveryTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:27.802680Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/RecoveryTable" took 82us result status StatusSuccess 2025-06-30T09:26:27.802827Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/RecoveryTable" PathDescription { Self { Name: "RecoveryTable" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1007 CreateStep: 5000008 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "RecoveryTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 No operations in database - this is expected after operation completion Verifying path states after reboot for regular restore operation... 2025-06-30T09:26:27.803899Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/RecoveryTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:27.803949Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/RecoveryTable" took 62us result status StatusSuccess 2025-06-30T09:26:27.804055Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/RecoveryTable" PathDescription { Self { Name: "RecoveryTable" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1007 CreateStep: 5000008 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "RecoveryTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'RecoveryTable' state: EPathStateNoChanges 2025-06-30T09:26:27.804116Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/StateRecoveryCollection" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:27.804139Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/StateRecoveryCollection" took 24us result status StatusSuccess 2025-06-30T09:26:27.804197Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/StateRecoveryCollection" PathDescription { Self { Name: "StateRecoveryCollection" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 1004 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "backup_001_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1005 CreateStep: 5000006 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "StateRecoveryCollection" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/RecoveryTable" } } Cluster { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Backup collection 'StateRecoveryCollection' state: EPathStateNoChanges State recovery test completed successfully - operation state preserved across reboots ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAtLeast8BytesInStorage+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 24197, MsgBus: 8598 2025-06-30T09:26:26.633602Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671371538849231:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:26.633642Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003494/r3tmp/tmp8366dT/pdisk_1.dat 2025-06-30T09:26:26.699851Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:26.704604Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671371538849211:2080] 1751275586633429 != 1751275586633432 TServer::EnableGrpc on GrpcPort 24197, node 1 2025-06-30T09:26:26.721629Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:26.721644Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:26.721647Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:26.721710Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8598 TClient is connected to server localhost:8598 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:26:26.779034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:26.779073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:26.781002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:26.792952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:26.805684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:26.832743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:26.862669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:26.878810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:27.122363Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671375833818107:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.122415Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.177929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.187439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.195679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.251135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.259115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.272899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.287123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.303594Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671375833818761:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.303636Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.303653Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671375833818766:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.304566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:27.313942Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671375833818768:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:27.377736Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671375833818819:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:27.525414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.636031Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 >> TIncrementalRestoreWithRebootsTests::BackupTablePathStateRestoration [GOOD] >> KqpLimits::WaitCAsTimeout [GOOD] >> KqpParams::BadParameterType ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> TIncrementalRestoreWithRebootsTests::BasicIncrementalRestoreWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:26:23.729280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:23.729311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:23.729318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:23.729325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:23.729340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:23.729345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:23.729356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:23.729373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:23.729496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:23.729611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:23.745143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:26:23.745167Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:23.745263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:23.749261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:23.749639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:23.749685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:23.752111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:23.752218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:23.752359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.752450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:23.753512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:23.753558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:23.753869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:23.753883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:23.753907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:23.753916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:23.753924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:23.753964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.755551Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:23.781932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:23.782014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.782086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:23.782096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:23.782146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:23.782161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:23.783211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.783300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:23.783369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.783381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:23.783389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:23.783395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:23.783991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.784006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:23.784016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:23.784674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.784694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.784701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.784711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:23.785548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:23.786417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:23.786474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:23.786677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.786706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:23.786712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.786794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:23.786804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.786838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:23.786852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... id#1007:0 progress is 1/1 2025-06-30T09:26:28.045725Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1007 ready parts: 1/1 2025-06-30T09:26:28.045730Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1007:0 progress is 1/1 2025-06-30T09:26:28.045733Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1007 ready parts: 1/1 2025-06-30T09:26:28.045738Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1007, ready parts: 1/1, is published: true 2025-06-30T09:26:28.045743Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1007 ready parts: 1/1 2025-06-30T09:26:28.045749Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1007:0 2025-06-30T09:26:28.045753Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1007:0 2025-06-30T09:26:28.045785Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 3 2025-06-30T09:26:28.045793Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 TestModificationResult got TxId: 1007, wait until txId: 1007 TestWaitNotification wait txId: 1007 2025-06-30T09:26:28.046231Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1007: send EvNotifyTxCompletion 2025-06-30T09:26:28.046241Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1007 2025-06-30T09:26:28.046303Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1007, at schemeshard: 72057594046678944 2025-06-30T09:26:28.046319Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1007: got EvNotifyTxCompletionResult 2025-06-30T09:26:28.046324Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1007: satisfy waiter [16:656:2615] TestWaitNotification: OK eventTxId 1007 2025-06-30T09:26:28.046400Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/RebootTestTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:28.046440Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/RebootTestTable" took 50us result status StatusSuccess 2025-06-30T09:26:28.046565Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/RebootTestTable" PathDescription { Self { Name: "RebootTestTable" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1007 CreateStep: 5000008 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "RebootTestTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 No operations in database - this is expected after operation completion Verifying path states after reboot for regular restore operation... 2025-06-30T09:26:28.047574Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/RebootTestTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:28.047614Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/RebootTestTable" took 50us result status StatusSuccess 2025-06-30T09:26:28.047715Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/RebootTestTable" PathDescription { Self { Name: "RebootTestTable" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1007 CreateStep: 5000008 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "RebootTestTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'RebootTestTable' state: EPathStateNoChanges 2025-06-30T09:26:28.047799Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/RebootTestCollection" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:28.047827Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/RebootTestCollection" took 30us result status StatusSuccess 2025-06-30T09:26:28.047902Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/RebootTestCollection" PathDescription { Self { Name: "RebootTestCollection" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 1004 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "backup_001_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1005 CreateStep: 5000006 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "RebootTestCollection" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/RebootTestTable" } } Cluster { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Backup collection 'RebootTestCollection' state: EPathStateNoChanges Basic restore with reboots test completed successfully >> KqpCost::OlapPointLookup [GOOD] >> TIncrementalRestoreWithRebootsTests::BackupTablePathStateRestorationEdgeCases [GOOD] |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> TIncrementalRestoreWithRebootsTests::BackupTablePathStateRestoration [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:26:23.527506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:23.527540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:23.527547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:23.527554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:23.527570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:23.527574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:23.527585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:23.527601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:23.527736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:23.527836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:23.544082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:26:23.544108Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:23.544194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:23.547677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:23.548034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:23.548067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:23.550694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:23.550885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:23.551141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.551321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:23.552801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:23.552868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:23.553276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:23.553301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:23.553331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:23.553345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:23.553356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:23.553423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.555904Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:23.578758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:23.578860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.578933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:23.578941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:23.578984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:23.578997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:23.583180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.583273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:23.583352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.583367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:23.583375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:23.583382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:23.584418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.584451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:23.584462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:23.585260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.585279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.585288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.585297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:23.586128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:23.586919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:23.586984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:23.587255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.587307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:23.587318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.587408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:23.587421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.587469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:23.587487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... ot@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "BackupTableTest2" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 9 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Full backup table '/MyRoot/.backups/collections/BackupTableTestCollection/backup_001_full/BackupTableTest2' state: EPathStateNoChanges Verifying path states after reboot for regular restore operation... 2025-06-30T09:26:28.504802Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/BackupTableTest1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:28.504822Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/BackupTableTest1" took 21us result status StatusSuccess 2025-06-30T09:26:28.504922Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/BackupTableTest1" PathDescription { Self { Name: "BackupTableTest1" PathId: 9 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1008 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "BackupTableTest1" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 9 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 9 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'BackupTableTest1' state: EPathStateNoChanges 2025-06-30T09:26:28.505002Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/BackupTableTest2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:28.505020Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/BackupTableTest2" took 19us result status StatusSuccess 2025-06-30T09:26:28.505126Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/BackupTableTest2" PathDescription { Self { Name: "BackupTableTest2" PathId: 10 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1008 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "BackupTableTest2" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 9 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 10 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'BackupTableTest2' state: EPathStateNoChanges 2025-06-30T09:26:28.505205Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/BackupTableTestCollection" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:28.505229Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/BackupTableTestCollection" took 27us result status StatusSuccess 2025-06-30T09:26:28.505320Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/BackupTableTestCollection" PathDescription { Self { Name: "BackupTableTestCollection" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 1004 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "backup_001_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1005 CreateStep: 5000006 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 9 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "BackupTableTestCollection" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/BackupTableTest1" } Entries { Type: ETypeTable Path: "/MyRoot/BackupTableTest2" } } Cluster { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Backup collection 'BackupTableTestCollection' state: EPathStateNoChanges Backup table path state restoration test completed successfully >> KqpCost::OltpWriteRow-isSink [GOOD] >> KqpCost::IndexLookupJoin+StreamLookupJoin [GOOD] >> KqpCost::Range [GOOD] |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapPointLookup [GOOD] Test command err: Trying to start YDB, gRPC: 18945, MsgBus: 5346 2025-06-30T09:26:27.495686Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671375387415679:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:27.495727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003481/r3tmp/tmpFVavD2/pdisk_1.dat TServer::EnableGrpc on GrpcPort 18945, node 1 2025-06-30T09:26:27.551910Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:27.552214Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671375387415656:2080] 1751275587495505 != 1751275587495508 2025-06-30T09:26:27.555217Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:27.555232Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:27.555235Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:27.555287Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5346 TClient is connected to server localhost:5346 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:27.626530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:27.626570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:27.627686Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:27.628465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:27.636699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:27.657101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:27.683512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:27.699428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:27.937747Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671375387417252:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.937791Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:27.985166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:27.995512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.008016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.022414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.036466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.049928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.065120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.082072Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671379682385202:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.082103Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.082109Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671379682385207:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.083025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:28.090818Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671379682385209:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:28.157433Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671379682385260:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:28.348026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:26:28.385499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7521671379682385593:2472];tablet_id=72075186224037930;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:28.389449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[1:7521671379682385597:2476];tablet_id=72075186224037926;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:28.389528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224 ... et_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-30T09:26:28.435925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2025-06-30T09:26:28.435937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2025-06-30T09:26:28.435941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-30T09:26:28.442364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[1:7521671379682385597:2476];ev=NActors::IEventHandle;tablet_id=72075186224037926;tx_id=281474976715672;this=91724265708416;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275588440;max=18446744073709551615;plan=0;src=[1:7521671375387415981:2144];cookie=392:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.442537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;self_id=[1:7521671379682385723:2478];ev=NActors::IEventHandle;tablet_id=72075186224037924;tx_id=281474976715672;this=91724266094080;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275588442;max=18446744073709551615;plan=0;src=[1:7521671375387415981:2144];cookie=372:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.442657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7521671379682385596:2475];ev=NActors::IEventHandle;tablet_id=72075186224037927;tx_id=281474976715672;this=91724268889184;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275588442;max=18446744073709551615;plan=0;src=[1:7521671375387415981:2144];cookie=402:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.443222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7521671379682385594:2473];ev=NActors::IEventHandle;tablet_id=72075186224037922;tx_id=281474976715672;this=91724265708896;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275588443;max=18446744073709551615;plan=0;src=[1:7521671375387415981:2144];cookie=352:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.444295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[1:7521671379682385595:2474];ev=NActors::IEventHandle;tablet_id=72075186224037923;tx_id=281474976715672;this=91724268823168;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275588444;max=18446744073709551615;plan=0;src=[1:7521671375387415981:2144];cookie=362:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.444733Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7521671379682385593:2472];ev=NActors::IEventHandle;tablet_id=72075186224037930;tx_id=281474976715672;this=91724268824288;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275588444;max=18446744073709551615;plan=0;src=[1:7521671375387415981:2144];cookie=432:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.444872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7521671379682385591:2470];ev=NActors::IEventHandle;tablet_id=72075186224037929;tx_id=281474976715672;this=91724268825408;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275588444;max=18446744073709551615;plan=0;src=[1:7521671375387415981:2144];cookie=422:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.444977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;self_id=[1:7521671379682385598:2477];ev=NActors::IEventHandle;tablet_id=72075186224037928;tx_id=281474976715672;this=91724268826528;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275588444;max=18446744073709551615;plan=0;src=[1:7521671375387415981:2144];cookie=412:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.445103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7521671379682385592:2471];ev=NActors::IEventHandle;tablet_id=72075186224037931;tx_id=281474976715672;this=91724268827488;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275588445;max=18446744073709551615;plan=0;src=[1:7521671375387415981:2144];cookie=442:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.445563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7521671379682385590:2469];ev=NActors::IEventHandle;tablet_id=72075186224037925;tx_id=281474976715672;this=91724266090720;method=TTxController::StartProposeOnExecute;tx_info=281474976715672:TX_KIND_SCHEMA;min=1751275588445;max=18446744073709551615;plan=0;src=[1:7521671375387415981:2144];cookie=382:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.447224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.448808Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.450589Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:26:28.450703Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:26:28.450935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.450958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.451914Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:26:28.452088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.453311Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:26:28.453443Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:26:28.453463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.453697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.454834Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:26:28.454899Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:26:28.455090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.455252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.456463Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:26:28.456610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-30T09:26:28.457420Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:26:28.457498Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-30T09:26:28.497860Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:28.510361Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715674; 2025-06-30T09:26:28.510470Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715674; 2025-06-30T09:26:28.510698Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7521671379682385593:2472];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037930;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037926;receive=72075186224037931; 2025-06-30T09:26:28.510846Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715674; 2 >> KqpCost::ScanQueryRangeFullScan+SourceRead [GOOD] |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v1-fifo] [GOOD] |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OltpWriteRow+isSink >> KqpCost::ScanScriptingRangeFullScan+SourceRead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::Range [GOOD] Test command err: Trying to start YDB, gRPC: 14571, MsgBus: 28324 2025-06-30T09:26:28.122594Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671379088232929:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:28.122907Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003450/r3tmp/tmpPN3lPR/pdisk_1.dat 2025-06-30T09:26:28.210570Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671379088232714:2080] 1751275588119867 != 1751275588119870 2025-06-30T09:26:28.214124Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14571, node 1 2025-06-30T09:26:28.223483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:28.223528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:28.224454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:28.226540Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:28.226553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:28.226556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:28.226626Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28324 TClient is connected to server localhost:28324 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:28.306704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:28.320669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.404698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.451536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.518773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.625926Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671379088234325:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.625970Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.681738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.691315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.700437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.714685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.729766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.743855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.758012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.775813Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671379088234976:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.775860Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.775968Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671379088234981:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.777110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:28.784793Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671379088234983:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:28.853273Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671379088235034:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupJoin+StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 14639, MsgBus: 28745 2025-06-30T09:26:27.923427Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671375403832759:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:27.923486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003452/r3tmp/tmpXoZMxQ/pdisk_1.dat 2025-06-30T09:26:27.998302Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:27.998412Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671375403832738:2080] 1751275587923263 != 1751275587923266 TServer::EnableGrpc on GrpcPort 14639, node 1 2025-06-30T09:26:28.014241Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:28.014266Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:28.014271Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:28.014336Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:28.026180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:28.026218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:28.027335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28745 TClient is connected to server localhost:28745 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:28.082008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:28.085283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:26:28.090156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.112718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.139701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.153215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.406168Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671379698801628:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.406207Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.461061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.470352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.483893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.498041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.512511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.526379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.540659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.559642Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671379698802282:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.559676Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.559886Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671379698802287:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.560917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:28.570058Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671379698802289:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:26:28.656414Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671379698802340:3399] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:28.848111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.858003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.870061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.925552Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; /Root/Join1_2 1 19 /Root/Join1_1 8 136 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OltpWriteRow-isSink [GOOD] Test command err: Trying to start YDB, gRPC: 26611, MsgBus: 62902 2025-06-30T09:26:27.663503Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671375332780101:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:27.663520Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00346a/r3tmp/tmpShVhNg/pdisk_1.dat 2025-06-30T09:26:27.718784Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:27.718950Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671375332780081:2080] 1751275587663392 != 1751275587663395 TServer::EnableGrpc on GrpcPort 26611, node 1 2025-06-30T09:26:27.730585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:27.730600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:27.730603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:27.730654Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62902 2025-06-30T09:26:27.766113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:27.766152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:27.769744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62902 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:27.799063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:27.823415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:27.893189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:27.925128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:27.940143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.065252Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671379627748995:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.065286Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.110494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.118794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.127379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.140919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.154585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.168785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.182938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.199615Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671379627749646:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.199651Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.199723Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671379627749651:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.200718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:28.210918Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671379627749653:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:28.276689Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671379627749704:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:28.563478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) query_phases { duration_us: 233 cpu_time_us: 233 } query_phases { duration_us: 1845 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 429 affected_shards: 1 } compilation { duration_us: 17691 cpu_time_us: 16209 } process_cpu_time_us: 385 total_duration_us: 20418 total_cpu_time_us: 17256 query_phases { duration_us: 170 cpu_time_us: 170 } query_phases { duration_us: 1763 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 476 affected_shards: 1 } compilation { duration_us: 14711 cpu_time_us: 13390 } process_cpu_time_us: 293 total_duration_us: 17277 total_cpu_time_us: 14329 2025-06-30T09:26:28.665643Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:28.709170Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7521671379627750092:2498], TxId: 281474976715677, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jz02hk11472x6308hpa86qay. SessionId : ydb://session/3?node_id=1&id=MzQxMGVlZGYtNjZjMGFhN2ItNmRiZjkwY2EtOWRjNWM3MzU=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-30T09:26:28.709338Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7521671379627750093:2499], TxId: 281474976715677, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=MzQxMGVlZGYtNjZjMGFhN2ItNmRiZjkwY2EtOWRjNWM3MzU=. TraceId : 01jz02hk11472x6308hpa86qay. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7521671379627750089:2460], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-30T09:26:28.709445Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=MzQxMGVlZGYtNjZjMGFhN2ItNmRiZjkwY2EtOWRjNWM3MzU=, ActorId: [1:7521671379627749928:2460], ActorState: ExecuteState, TraceId: 01jz02hk11472x6308hpa86qay, Create QueryResponse for error on request, msg: query_phases { duration_us: 284 cpu_time_us: 284 } query_phases { duration_us: 1217 table_access { name: "/Root/TestTable" reads { rows: 1 bytes: 8 } partitions_count: 1 } cpu_time_us: 1339 affected_shards: 1 } query_phases { duration_us: 19042 cpu_time_us: 19082 } compilation { duration_us: 45948 cpu_time_us: 33623 } process_cpu_time_us: 584 total_duration_us: 67616 total_cpu_time_us: 54912 query_phases { duration_us: 281 cpu_time_us: 281 } query_phases { duration_us: 2217 table_access { name: "/Root/TestTable" partitions_count: 1 } cpu_time_us: 1035 affected_shards: 1 } query_phases { duration_us: 531 cpu_time_us: 430 } query_phases { duration_us: 1990 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 455 affected_shards: 1 } compilation { duration_us: 37164 cpu_time_us: 35310 } process_cpu_time_us: 649 total_duration_us: 45789 total_cpu_time_us: 38160 query_phases { duration_us: 310 cpu_time_us: 310 } query_phases { duration_us: 1358 table_access { name: "/Root/TestTable" partitions_count: 1 } cpu_time_us: 1309 affected_shards: 1 } query_phases { duration_us: 474 cpu_time_us: 288 affected_shards: 1 } compilation { duration_us: 38500 cpu_time_us: 33847 } process_cpu_time_us: 627 total_duration_us: 44633 total_cpu_time_us: 36381 query_phases { duration_us: 280 cpu_time_us: 280 } query_phases { duration_us: 1127 table_access { name: "/Root/TestTable" reads { rows: 1 bytes: 8 } partitions_count: 1 } cpu_time_us: 1074 affected_shards: 1 } query_phases { duration_us: 1822 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 451 affected_shards: 1 } compilation { duration_us: 32486 cpu_time_us: 30709 } process_cpu_time_us: 558 total_duration_us: 37252 total_cpu_time_us: 33072 query_phases { duration_us: 190 cpu_time_us: 190 } query_phases { duration_us: 1773 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 422 affected_shards: 1 } compilation { duration_us: 13857 cpu_time_us: 12179 } process_cpu_time_us: 322 total_duration_us: 16353 total_cpu_time_us: 13113 query_phases { duration_us: 204 cpu_time_us: 204 } query_phases { duration_us: 1941 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 458 affected_shards: 1 } compilation { duration_us: 14007 cpu_time_us: 12755 } process_cpu_time_us: 357 total_duration_us: 16776 total_cpu_time_us: 13774 |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> TIncrementalRestoreWithRebootsTests::BackupTablePathStateRestorationEdgeCases [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:26:24.168436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:24.168466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:24.168472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:24.168478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:24.168494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:24.168499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:24.168511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:24.168529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:24.168648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:24.168745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:24.185557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:26:24.185611Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:24.185731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:24.190175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:24.190559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:24.190606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:24.196546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:24.196650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:24.196831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:24.196982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:24.199028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:24.199111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:24.199510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:24.199529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:24.199559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:24.199571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:24.199579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:24.199640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.201834Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:24.231062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:24.231162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.231253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:24.231265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:24.231316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:24.231333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:24.232324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:24.232400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:24.232474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.232489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:24.232496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:24.232503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:24.233232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.233258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:24.233270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:24.233839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.233856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.233864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:24.233873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:24.234684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:24.235358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:24.235412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:24.235657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:24.235719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:24.235731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:24.235811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:24.235820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:24.235866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:24.235882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... sion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableWithNumbers123" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 9 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Full backup table '/MyRoot/.backups/collections/EdgeCaseCollection/backup_001_full/TableWithNumbers123' state: EPathStateNoChanges Verifying path states after reboot for regular restore operation... 2025-06-30T09:26:29.128721Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table_With_Underscores" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:29.128735Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table_With_Underscores" took 14us result status StatusSuccess 2025-06-30T09:26:29.128790Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table_With_Underscores" PathDescription { Self { Name: "Table_With_Underscores" PathId: 9 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1008 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_With_Underscores" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 9 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 9 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'Table_With_Underscores' state: EPathStateNoChanges 2025-06-30T09:26:29.128873Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableWithNumbers123" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:29.128891Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableWithNumbers123" took 36us result status StatusSuccess 2025-06-30T09:26:29.128958Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableWithNumbers123" PathDescription { Self { Name: "TableWithNumbers123" PathId: 10 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1008 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableWithNumbers123" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 9 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 10 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'TableWithNumbers123' state: EPathStateNoChanges 2025-06-30T09:26:29.129027Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/EdgeCaseCollection" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:29.129049Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/EdgeCaseCollection" took 24us result status StatusSuccess 2025-06-30T09:26:29.129133Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/EdgeCaseCollection" PathDescription { Self { Name: "EdgeCaseCollection" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 1004 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "backup_001_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1005 CreateStep: 5000006 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 9 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "EdgeCaseCollection" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/Table_With_Underscores" } Entries { Type: ETypeTable Path: "/MyRoot/TableWithNumbers123" } } Cluster { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Backup collection 'EdgeCaseCollection' state: EPathStateNoChanges No operations in database - this is expected after operation completion Edge cases backup table path state restoration test completed successfully >> KqpCost::IndexLookupAndTake+useSink >> KqpParams::BadParameterType [GOOD] >> TIncrementalRestoreWithRebootsTests::BackupTablePathStateRestorationAfterMultipleReboots [GOOD] >> TIncrementalRestoreWithRebootsTests::MultiTableIncrementalRestoreWithReboots [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan+SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 4056, MsgBus: 10293 2025-06-30T09:26:28.390848Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671379407515196:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:28.390974Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00344b/r3tmp/tmpjHleWP/pdisk_1.dat 2025-06-30T09:26:28.481404Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:28.481567Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671379407515010:2080] 1751275588388063 != 1751275588388066 TServer::EnableGrpc on GrpcPort 4056, node 1 2025-06-30T09:26:28.506952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:28.506967Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:28.506969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:28.507018Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10293 2025-06-30T09:26:28.550690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:28.550722Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:28.551395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10293 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:28.590151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:28.610589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.676416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.702314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.714780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.865866Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671379407516610:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.865900Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:28.910378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.923632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.937072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.947241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.960472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.974676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:28.988958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:29.004248Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671383702484556:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:29.004280Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671383702484561:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:29.004290Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:29.005195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:29.015003Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671383702484563:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:29.114380Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671383702484614:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:29.299810Z node 1 :KQP_GATEWAY DEBUG: kqp_metadata_loader.cpp:888: Load table metadata from cache by path, request Path: /Root/Test 2025-06-30T09:26:29.320795Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:37: Start KqpSnapshotManager at [1:7521671383702484853:2460] 2025-06-30T09:26:29.320813Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:58: KqpSnapshotManager: got snapshot request from [1:7521671383702484839:2460] 2025-06-30T09:26:29.323866Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:172: KqpSnapshotManager: snapshot 1751275589371:281474976715672 created 2025-06-30T09:26:29.323944Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:608: ActorId: [1:7521671383702484864:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hknccmtk7sqhrk78bj5j, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, ... ropping batch for read #0 2025-06-30T09:26:29.328154Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715673, task: 1, CA Id [1:7521671383702484868:2467]. effective maxinflight 1 sorted 1 2025-06-30T09:26:29.328158Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715673, task: 1, CA Id [1:7521671383702484868:2467]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-06-30T09:26:29.328163Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715673, task: 1, CA Id [1:7521671383702484868:2467]. returned async data processed rows 3 left freeSpace 8388548 received rows 3 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-30T09:26:29.328263Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7521671383702484868:2467], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jz02hknccmtk7sqhrk78bj5j. SessionId : ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-30T09:26:29.328276Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7521671383702484868:2467], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jz02hknccmtk7sqhrk78bj5j. SessionId : ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:26:29.328276Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7521671383702484869:2468], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=. TraceId : 01jz02hknccmtk7sqhrk78bj5j. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646923 2025-06-30T09:26:29.328287Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715673, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-30T09:26:29.328297Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715673, task: 2. Finish input channelId: 1, from: [1:7521671383702484868:2467] 2025-06-30T09:26:29.328321Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7521671383702484868:2467], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jz02hknccmtk7sqhrk78bj5j. SessionId : ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646927 2025-06-30T09:26:29.328322Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7521671383702484869:2468], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=. TraceId : 01jz02hknccmtk7sqhrk78bj5j. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:26:29.328335Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7521671383702484868:2467], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jz02hknccmtk7sqhrk78bj5j. SessionId : ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:26:29.328342Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715673, task: 1. Tasks execution finished 2025-06-30T09:26:29.328344Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7521671383702484868:2467], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jz02hknccmtk7sqhrk78bj5j. SessionId : ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-30T09:26:29.328383Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715673, task: 1. pass away 2025-06-30T09:26:29.328414Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7521671383702484869:2468], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=. TraceId : 01jz02hknccmtk7sqhrk78bj5j. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-30T09:26:29.328428Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715673;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-30T09:26:29.328473Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7521671383702484864:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hknccmtk7sqhrk78bj5j, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7521671383702484868:2467], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 3506 Tasks { TaskId: 1 CpuTimeUs: 364 FinishTimeMs: 1751275589328 OutputRows: 1 OutputBytes: 19 Tables { TablePath: "/Root/Test" ReadRows: 1 ReadBytes: 20 AffectedPartitions: 1 } IngressRows: 3 ComputeCpuTimeUs: 53 BuildCpuTimeUs: 311 HostName: "ghrun-x4qzxsyz2q" NodeId: 1 StartTimeMs: 1751275589328 CreateTimeMs: 1751275589324 UpdateTimeMs: 1751275589328 } MaxMemoryUsage: 1048576 } 2025-06-30T09:26:29.328497Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715673. Ctx: { TraceId: 01jz02hknccmtk7sqhrk78bj5j, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7521671383702484868:2467] 2025-06-30T09:26:29.328499Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715673, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-30T09:26:29.328522Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:7521671383702484864:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hknccmtk7sqhrk78bj5j, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [1:7521671383702484869:2468], 2025-06-30T09:26:29.328574Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:357: ActorId: [1:7521671383702484864:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hknccmtk7sqhrk78bj5j, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [1:7521671383702484839:2460], seqNo: 1, nRows: 1 2025-06-30T09:26:29.329268Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:423: TxId: 281474976715673, send ack to channelId: 2, seqNo: 1, enough: 0, freeSpace: 8388470, to: [1:7521671383702484871:2468] 2025-06-30T09:26:29.329292Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7521671383702484869:2468], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=. TraceId : 01jz02hknccmtk7sqhrk78bj5j. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-30T09:26:29.329302Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715673, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-30T09:26:29.329304Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715673, task: 2. Tasks execution finished 2025-06-30T09:26:29.329306Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7521671383702484869:2468], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=. TraceId : 01jz02hknccmtk7sqhrk78bj5j. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-30T09:26:29.329338Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715673, task: 2. pass away 2025-06-30T09:26:29.329369Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715673;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-30T09:26:29.329389Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7521671383702484864:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hknccmtk7sqhrk78bj5j, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7521671383702484869:2468], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 740 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 256 FinishTimeMs: 1751275589329 InputRows: 1 InputBytes: 19 OutputRows: 1 OutputBytes: 19 ResultRows: 1 ResultBytes: 19 ComputeCpuTimeUs: 72 BuildCpuTimeUs: 184 HostName: "ghrun-x4qzxsyz2q" NodeId: 1 CreateTimeMs: 1751275589326 UpdateTimeMs: 1751275589329 } MaxMemoryUsage: 1048576 } 2025-06-30T09:26:29.329407Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715673. Ctx: { TraceId: 01jz02hknccmtk7sqhrk78bj5j, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7521671383702484869:2468] 2025-06-30T09:26:29.329443Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2189: ActorId: [1:7521671383702484864:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hknccmtk7sqhrk78bj5j, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-30T09:26:29.329450Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715673, taskId: 2. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-30T09:26:29.329453Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [1:7521671383702484864:2460] TxId: 281474976715673. Ctx: { TraceId: 01jz02hknccmtk7sqhrk78bj5j, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2JhNmY4M2YtZWIwOGMwYWEtOTQzMmRlYWQtZjRmNGM1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.004246s ReadRows: 1 ReadBytes: 20 ru: 2 rate limiter was not found force flag: 1 2025-06-30T09:26:29.329714Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275589371, txId: 281474976715672] shutting down |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::CreateExternalTableWithReboots ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpParams::BadParameterType [GOOD] Test command err: Trying to start YDB, gRPC: 28551, MsgBus: 4881 2025-06-30T09:25:33.554542Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671140590948250:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:33.554693Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004224/r3tmp/tmpsrnx1B/pdisk_1.dat 2025-06-30T09:25:33.611314Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:33.611768Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671140590948028:2080] 1751275533551037 != 1751275533551040 TServer::EnableGrpc on GrpcPort 28551, node 1 2025-06-30T09:25:33.632870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:33.632887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:33.632890Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:33.632944Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:33.653401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:33.653445Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:33.654110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4881 TClient is connected to server localhost:4881 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:33.769166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:33.791549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:33.859797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:33.936303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.009767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.135023Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671144885916924:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.135071Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.191280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.208732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.219626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.236461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.250334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.265198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.278491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:34.351041Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671144885917580:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.351089Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.351242Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671144885917585:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.352600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:34.357446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:25:34.357555Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671144885917587:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:34.431814Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671144885917638:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:34.554116Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:34.684398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.555562Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521671140590948250:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:38.55752 ... PoolId : default. Database : /Root. }. Handle abort execution event from: [3:2699:4000], status: UNSPECIFIED, reason: {
: Error: Terminate execution } 2025-06-30T09:26:28.421417Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=3&id=NTQ1N2I3Y2MtYjgyNTU3ZjUtYjRkOWY1MzgtZjYyMTk3NmY=, ActorId: [3:2658:4000], ActorState: ExecuteState, TraceId: 01jz02hjed4qbgc5ngf3wpscxz, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 28195, MsgBus: 5674 2025-06-30T09:26:28.774708Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671376808738719:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:28.774974Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004224/r3tmp/tmpzhSLtW/pdisk_1.dat 2025-06-30T09:26:28.805502Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:28.808169Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671376808738695:2080] 1751275588774483 != 1751275588774486 TServer::EnableGrpc on GrpcPort 28195, node 4 2025-06-30T09:26:28.819397Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:28.819415Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:28.819418Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:28.819472Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5674 TClient is connected to server localhost:5674 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:28.888746Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:28.888780Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:28.889172Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:28.889697Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:28.905851Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.922555Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.949511Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:28.966347Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:29.181478Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671381103707585:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:29.181503Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:29.189946Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:29.198792Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:29.212257Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:29.225927Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:29.239733Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:29.254713Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:29.268469Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:29.285155Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671381103708236:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:29.285194Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671381103708241:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:29.285198Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:29.286382Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:29.295401Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671381103708243:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:29.374913Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671381103708294:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:29.573827Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=M2YyOTg5MGMtYjBlYjIxZC02NDAxM2Q0OS0zN2JkZGVmMQ==, ActorId: [4:7521671381103708557:2469], ActorState: ExecuteState, TraceId: 01jz02hkxgctmbgrss7xxbpt8q, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1417: ydb/core/kqp/query_data/kqp_query_data.cpp:271: Parameter $group type mismatch, expected: { Kind: Data Data { Scheme: 2 } }, actual: Type (Data), schemeType: Int32, schemeTypeId: 1 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> TIncrementalRestoreWithRebootsTests::BackupTablePathStateRestorationAfterMultipleReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:26:25.458321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:25.458352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:25.458358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:25.458365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:25.458380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:25.458384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:25.458395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:25.458411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:25.458530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:25.458612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:25.474240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:26:25.474261Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:25.474349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:25.478368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:25.478801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:25.478851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:25.481207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:25.481274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:25.481429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:25.481528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:25.482609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:25.482658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:25.482951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:25.482963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:25.482979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:25.482986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:25.482991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:25.483024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:25.484507Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:25.505641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:25.505717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:25.505787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:25.505795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:25.505846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:25.505861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:25.506812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:25.506874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:25.506931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:25.506942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:25.506948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:25.506954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:25.507454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:25.507467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:25.507474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:25.507860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:25.507869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:25.507876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:25.507883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:25.508668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:25.509189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:25.509237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:25.509492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:25.509529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:25.509539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:25.509626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:25.509638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:25.509679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:25.509695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... mpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "backup_001_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1005 CreateStep: 5000006 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "RebootStabilityCollection" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/RebootStabilityTable" } } Cluster { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Backup collection 'RebootStabilityCollection' state: EPathStateNoChanges No operations in database - this is expected after operation completion Testing backup table path state persistence after reboot cycle 3 Verifying backup table path states for regular restore operation in collection: RebootStabilityCollection 2025-06-30T09:26:29.835975Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/RebootStabilityCollection/backup_001_full/RebootStabilityTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:29.836010Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/RebootStabilityCollection/backup_001_full/RebootStabilityTable" took 46us result status StatusSuccess 2025-06-30T09:26:29.836104Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/RebootStabilityCollection/backup_001_full/RebootStabilityTable" PathDescription { Self { Name: "RebootStabilityTable" PathId: 7 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1006 CreateStep: 5000007 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "RebootStabilityTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 7 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Full backup table '/MyRoot/.backups/collections/RebootStabilityCollection/backup_001_full/RebootStabilityTable' state: EPathStateNoChanges Verifying path states after reboot for regular restore operation... 2025-06-30T09:26:29.836196Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/RebootStabilityTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:29.836215Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/RebootStabilityTable" took 21us result status StatusSuccess 2025-06-30T09:26:29.836285Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/RebootStabilityTable" PathDescription { Self { Name: "RebootStabilityTable" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1007 CreateStep: 5000008 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "RebootStabilityTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'RebootStabilityTable' state: EPathStateNoChanges 2025-06-30T09:26:29.836363Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/RebootStabilityCollection" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:29.836382Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/RebootStabilityCollection" took 22us result status StatusSuccess 2025-06-30T09:26:29.836456Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/RebootStabilityCollection" PathDescription { Self { Name: "RebootStabilityCollection" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 1004 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "backup_001_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1005 CreateStep: 5000006 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "RebootStabilityCollection" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/RebootStabilityTable" } } Cluster { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Backup collection 'RebootStabilityCollection' state: EPathStateNoChanges No operations in database - this is expected after operation completion Backup table path state restoration after multiple reboots test completed successfully ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> TIncrementalRestoreWithRebootsTests::MultiTableIncrementalRestoreWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:26:24.322352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:24.322383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:24.322390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:24.322397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:24.322415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:24.322421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:24.322433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:24.322451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:24.322582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:24.322683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:24.340929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:26:24.340959Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:24.341111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:24.345342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:24.345631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:24.345666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:24.347657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:24.347727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:24.347881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:24.347982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:24.348858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:24.348897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:24.349128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:24.349138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:24.349154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:24.349160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:24.349165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:24.349204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.350585Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:24.373080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:24.373172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.373247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:24.373256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:24.373307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:24.373329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:24.374214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:24.374274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:24.374331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.374342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:24.374349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:24.374356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:24.374982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.375006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:24.375014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:24.377314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.377335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:24.377343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:24.377353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:24.378309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:24.378942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:24.378988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:24.379216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:24.379265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:24.379275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:24.379353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:24.379364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:24.379403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:24.379419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... result: Status: StatusSuccess Path: "/MyRoot/Table1" PathDescription { Self { Name: "Table1" PathId: 10 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1009 CreateStep: 5000010 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table1" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 11 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 10 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'Table1' state: EPathStateNoChanges 2025-06-30T09:26:29.754629Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:29.754644Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table2" took 17us result status StatusSuccess 2025-06-30T09:26:29.754711Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table2" PathDescription { Self { Name: "Table2" PathId: 11 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1009 CreateStep: 5000010 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table2" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 11 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 11 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'Table2' state: EPathStateNoChanges 2025-06-30T09:26:29.754805Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:29.754821Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table3" took 18us result status StatusSuccess 2025-06-30T09:26:29.754888Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table3" PathDescription { Self { Name: "Table3" PathId: 12 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1009 CreateStep: 5000010 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table3" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 11 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 12 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'Table3' state: EPathStateNoChanges 2025-06-30T09:26:29.754963Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MultiTableCollection" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:29.754989Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MultiTableCollection" took 29us result status StatusSuccess 2025-06-30T09:26:29.755070Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MultiTableCollection" PathDescription { Self { Name: "MultiTableCollection" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 1004 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "backup_001_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1005 CreateStep: 5000006 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 11 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "MultiTableCollection" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/Table1" } Entries { Type: ETypeTable Path: "/MyRoot/Table2" } Entries { Type: ETypeTable Path: "/MyRoot/Table3" } } Cluster { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Backup collection 'MultiTableCollection' state: EPathStateNoChanges Multi-table restore with reboots test completed successfully >> KqpCost::ScanScriptingRangeFullScan+SourceRead [GOOD] |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> KqpCost::OltpWriteRow+isSink [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanScriptingRangeFullScan+SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 6702, MsgBus: 7305 2025-06-30T09:26:29.519397Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671383820394712:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:29.519449Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003432/r3tmp/tmpHowUS2/pdisk_1.dat 2025-06-30T09:26:29.583651Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:29.583765Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671383820394689:2080] 1751275589519150 != 1751275589519153 TServer::EnableGrpc on GrpcPort 6702, node 1 2025-06-30T09:26:29.594896Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:29.594917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:29.594919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:29.594995Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7305 TClient is connected to server localhost:7305 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:29.662448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:29.662478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:29.663485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:29.664258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:29.667072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:29.678921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:29.750662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:29.817351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:29.840022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:29.995705Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671383820396295:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:29.995738Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.062227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.072469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.088081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.101758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.116574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.130180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.144442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.165327Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671388115364246:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.165351Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.165462Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671388115364251:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.166409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:30.170759Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671388115364253:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:30.262427Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671388115364305:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:30.454583Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7521671388115364560:2465] TxId: 281474976715673. Ctx: { TraceId: 01jz02hmrg08r249cdmywpbe39, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmY0ODUxYzMtNTQ4MzY4ZTktOWY0NzU3ZWYtMjZkYTc2Mzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-30T09:26:30.458522Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275590498, txId: 281474976715672] shutting down |84.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> KqpLimits::TooBigQuery+useSink [GOOD] >> KqpLimits::TooBigKey+useSink >> KqpCost::IndexLookupAndTake+useSink [GOOD] |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TIncrementalRestoreWithRebootsTests::DatabaseConsistencyAfterMultipleReboots [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAndTake+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 16478, MsgBus: 15711 2025-06-30T09:26:29.806086Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671381881860243:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:29.806168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00342f/r3tmp/tmpA6AJad/pdisk_1.dat 2025-06-30T09:26:29.858451Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:29.862834Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671381881860219:2080] 1751275589805889 != 1751275589805892 TServer::EnableGrpc on GrpcPort 16478, node 1 2025-06-30T09:26:29.878703Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:29.878719Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:29.878721Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:29.878792Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15711 2025-06-30T09:26:29.907288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:29.907321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:29.908383Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15711 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:29.946152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:29.948946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:26:29.959151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.029700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:30.096125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.116867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:30.230590Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671386176829130:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.230624Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.285616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.296010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.304639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.318148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.332146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.346856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.360940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.377205Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671386176829781:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.377246Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.377248Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671386176829786:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.378291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:30.387065Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671386176829788:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:30.453870Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671386176829839:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:30.653422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.807693Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; /Root/SecondaryKeys/Index/indexImplTable 2 16 /Root/SecondaryKeys 1 8 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OltpWriteRow+isSink [GOOD] Test command err: Trying to start YDB, gRPC: 3618, MsgBus: 13801 2025-06-30T09:26:29.577527Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671383441875117:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:29.577555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003435/r3tmp/tmphrSOBQ/pdisk_1.dat 2025-06-30T09:26:29.627271Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3618, node 1 2025-06-30T09:26:29.644441Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:29.644459Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:29.644462Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:29.644516Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13801 2025-06-30T09:26:29.680172Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:29.680213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:29.681676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13801 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:29.713422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:29.722136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:29.793277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:29.836102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:29.856341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:30.083658Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671387736843993:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.083708Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.151268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.162208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.174205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.186065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.243451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.300138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.311447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:30.328658Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671387736844652:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.328693Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.328758Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671387736844657:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:30.329708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:30.337899Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671387736844659:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:30.433141Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671387736844710:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:30.579538Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:30.633812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) query_phases { duration_us: 2125 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 552 affected_shards: 1 } compilation { duration_us: 11329 cpu_time_us: 10529 } process_cpu_time_us: 189 total_duration_us: 14094 total_cpu_time_us: 11270 query_phases { duration_us: 2168 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 430 affected_shards: 1 } compilation { duration_us: 12439 cpu_time_us: 11623 } process_cpu_time_us: 182 total_duration_us: 15285 total_cpu_time_us: 12235 2025-06-30T09:26:30.722705Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:105;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=5; 2025-06-30T09:26:30.724972Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037922 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-30T09:26:30.725049Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037922 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-30T09:26:30.725136Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [1:7521671387736845139:2470], Table: `/Root/TestTable` ([72057594046644480:17:1]), SessionActorId: [1:7521671387736844983:2470]Got CONSTRAINT VIOLATION for table `/Root/TestTable`. ShardID=72075186224037922, Sink=[1:7521671387736845139:2470].{
: Error: Conflict with existing key., code: 2012 } 2025-06-30T09:26:30.725288Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7521671387736845132:2470], SessionActorId: [1:7521671387736844983:2470], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[1:7521671387736844983:2470]. isRollback=0 2025-06-30T09:26:30.725378Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1938: SessionId: ydb://session/3?node_id=1&id=OGYzMmM1NWYtOTg3MjkxOTktOWE1YjY2MmUtOGQzZGFjZmM=, ActorId: [1:7521671387736844983:2470], ActorState: ExecuteState, TraceId: 01jz02hn1m2rw3nnfyzrted43p, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [1:7521671387736845133:2470] from: [1:7521671387736845132:2470] 2025-06-30T09:26:30.725406Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [1:7521671387736845133:2470] TxId: 281474976715676. Ctx: { TraceId: 01jz02hn1m2rw3nnfyzrted43p, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGYzMmM1NWYtOTg3MjkxOTktOWE1YjY2MmUtOGQzZGFjZmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-30T09:26:30.725479Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=OGYzMmM1NWYtOTg3MjkxOTktOWE1YjY2MmUtOGQzZGFjZmM=, ActorId: [1:7521671387736844983:2470], ActorState: ExecuteState, TraceId: 01jz02hn1m2rw3nnfyzrted43p, Create QueryResponse for error on request, msg: query_phases { duration_us: 3806 cpu_time_us: 506 } compilation { duration_us: 11835 cpu_time_us: 10886 } process_cpu_time_us: 197 total_duration_us: 16308 total_cpu_time_us: 11589 query_phases { duration_us: 2375 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 571 affected_shards: 1 } compilation { duration_us: 11061 cpu_time_us: 10342 } process_cpu_time_us: 181 total_duration_us: 14106 total_cpu_time_us: 11094 query_phases { duration_us: 1189 cpu_time_us: 569 affected_shards: 1 } compilation { duration_us: 14531 cpu_time_us: 13717 } process_cpu_time_us: 152 total_duration_us: 16298 total_cpu_time_us: 14438 query_phases { duration_us: 2609 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 521 affected_shards: 1 } compilation { duration_us: 13060 cpu_time_us: 12271 } process_cpu_time_us: 172 total_duration_us: 16292 total_cpu_time_us: 12964 query_phases { duration_us: 4523 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 2903 affected_shards: 1 } compilation { duration_us: 11955 cpu_time_us: 10428 } process_cpu_time_us: 205 total_duration_us: 17406 total_cpu_time_us: 13536 query_phases { duration_us: 2411 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 604 affected_shards: 1 } compilation { duration_us: 10466 cpu_time_us: 9660 } process_cpu_time_us: 165 total_duration_us: 14492 total_cpu_time_us: 10429 |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::CreateDroppedExternalTableWithReboots |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::DropExternalTableWithReboots >> KqpNewEngine::StaleRO_IndexFollowers+EnableFollowers [GOOD] >> KqpNewEngine::SqlInFromCompact |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore_reboots/unittest >> TIncrementalRestoreWithRebootsTests::DatabaseConsistencyAfterMultipleReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-30T09:26:23.915601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:23.915626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:23.915630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:23.915635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:23.915648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:23.915651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:23.915659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:23.915671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:23.915766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:23.915835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:23.926507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:26:23.926531Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:23.926621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:23.930027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:23.930361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:23.930401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:23.932513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:23.932595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:23.932740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.932881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:23.934140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:23.934199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:23.934524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:23.934537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:23.934561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:23.934572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:23.934580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:23.934632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.936462Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:23.961324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:23.961418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.961509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:23.961519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:23.961577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:23.961613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:23.962441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.962494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:23.962545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.962553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:23.962557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:23.962562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:23.963107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.963128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:23.963135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:23.963674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.963693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:23.963701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.963711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:23.964536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:23.965020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:23.965066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:23.965303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:23.965330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:23.965339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.965408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:23.965418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:23.965459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:23.965474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 7205759 ... 30T09:26:31.404225Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ConsistencyTable2" PathDescription { Self { Name: "ConsistencyTable2" PathId: 10 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1008 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "ConsistencyTable2" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 13 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 10 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'ConsistencyTable2' state: EPathStateNoChanges 2025-06-30T09:26:31.404310Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/ConsistencyTestCollection" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:31.404339Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/ConsistencyTestCollection" took 31us result status StatusSuccess 2025-06-30T09:26:31.404423Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/ConsistencyTestCollection" PathDescription { Self { Name: "ConsistencyTestCollection" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 1004 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "backup_001_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1005 CreateStep: 5000006 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 13 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "ConsistencyTestCollection" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/ConsistencyTable1" } Entries { Type: ETypeTable Path: "/MyRoot/ConsistencyTable2" } } Cluster { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Backup collection 'ConsistencyTestCollection' state: EPathStateNoChanges Verifying path states after reboot for regular restore operation... 2025-06-30T09:26:31.404516Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/AnotherTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:31.404538Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/AnotherTable" took 25us result status StatusSuccess 2025-06-30T09:26:31.404608Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/AnotherTable" PathDescription { Self { Name: "AnotherTable" PathId: 14 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1012 CreateStep: 5000013 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "AnotherTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 13 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 14 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Target table 'AnotherTable' state: EPathStateNoChanges 2025-06-30T09:26:31.404698Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/ConsistencyTestCollection2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:31.404723Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/ConsistencyTestCollection2" took 27us result status StatusSuccess 2025-06-30T09:26:31.404784Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/ConsistencyTestCollection2" PathDescription { Self { Name: "ConsistencyTestCollection2" PathId: 11 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 1009 CreateStep: 5000010 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "backup_001_full" PathId: 12 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1010 CreateStep: 5000011 ParentPathId: 11 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 13 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "ConsistencyTestCollection2" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/AnotherTable" } } Cluster { } } } PathId: 11 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Backup collection 'ConsistencyTestCollection2' state: EPathStateNoChanges Database consistency verification completed successfully >> KqpLimits::TooBigKey+useSink [GOOD] >> KqpLimits::TooBigKey-useSink |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_deduplication_table[tables_format_v1] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_reads_table[tables_format_v0-200] >> TExternalTableTestReboots::DropReplacedExternalTableWithReboots |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue_batch[tables_format_v1] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_queues_count_over_limit[tables_format_v0] |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> KqpLimits::CancelAfterRwTx-useSink [GOOD] >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookupDepededRead |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TA] $(B)/ydb/core/kqp/ut/cost/test-results/unittest/{meta.json ... results_accumulator.log} >> TExternalTableTestReboots::CreateDroppedExternalTableAndDropWithReboots |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> KqpLimits::TooBigKey-useSink [GOOD] >> KqpLimits::TooBigColumn-useSink >> TxUsage::WriteToTopic_Demo_19_RestartNo_Table [GOOD] |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> KqpNewEngine::SqlInFromCompact [GOOD] >> KqpNewEngine::SqlInAsScalar >> TxUsage::WriteToTopic_Demo_19_RestartNo_Query |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> KqpLimits::TooBigColumn-useSink [GOOD] |84.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v1-std] [GOOD] >> KqpNewEngine::SqlInAsScalar [GOOD] >> KqpStats::SysViewClientLost [FAIL] >> KqpStats::SysViewCancelled >> TxUsage::WriteToTopic_Demo_27_Table [GOOD] >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpLimits::TooBigColumn-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 20637, MsgBus: 21053 2025-06-30T09:25:38.066006Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671162355746086:2246];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:38.066446Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00420d/r3tmp/tmp0vR06D/pdisk_1.dat 2025-06-30T09:25:38.154549Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:38.155516Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671162355745841:2080] 1751275538051143 != 1751275538051146 TServer::EnableGrpc on GrpcPort 20637, node 1 2025-06-30T09:25:38.186811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:38.186827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:38.186830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:38.186878Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:38.213267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:38.213314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:21053 2025-06-30T09:25:38.223151Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21053 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:38.323972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:38.330097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:38.341955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.374495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:38.411425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.439863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:38.699991Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671162355747454:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.700035Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.758978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.778934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.797138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.810333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.837558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.905524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.934401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:38.971766Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671162355748109:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.971812Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.972115Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671162355748114:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:38.973281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:38.977185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-30T09:25:38.977296Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671162355748116:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:39.065555Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:39.069403Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671166650715464:3399] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:39.323907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:43.065070Z node 1 :METADATA_PROV ... ion cookie mismatch for subscription [4:7521671399468182909:2080] 1751275593485081 != 1751275593485084 TServer::EnableGrpc on GrpcPort 22049, node 4 2025-06-30T09:26:33.526086Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:33.526105Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:33.526109Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:33.526164Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62604 TClient is connected to server localhost:62604 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:33.592455Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:33.592496Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:33.592995Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:33.593567Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:26:33.599343Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:33.603739Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:33.625791Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:33.672087Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:33.687747Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:33.919817Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671399468184505:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:33.919853Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:33.930407Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:33.941340Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:33.950817Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:33.959362Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:33.972247Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:34.028926Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:34.041916Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:34.058551Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671403763152456:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:34.058577Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671403763152461:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:34.058595Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:34.059385Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:34.069078Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671403763152463:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:34.143384Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671403763152514:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:34.487772Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:34.568298Z node 4 :TX_DATASHARD ERROR: check_data_tx_unit.cpp:186: Transaction write column value of 20971522 bytes is larger than the allowed threshold 2025-06-30T09:26:34.568369Z node 4 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715672 at tablet 72075186224037911 status: EXEC_ERROR errors: BAD_ARGUMENT (Transaction write column value of 20971522 bytes is larger than the allowed threshold) | 2025-06-30T09:26:34.568468Z node 4 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [4:7521671403763152753:2460] TxId: 281474976715672. Ctx: { TraceId: 01jz02hrm223kxhv9v68jx1vd0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NDg2MTUwYTQtYjhkNjU4YTItMWJlYzZkYjAtMjVkOGQ3YTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [BAD_ARGUMENT] Transaction write column value of 20971522 bytes is larger than the allowed threshold; 2025-06-30T09:26:34.568584Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=NDg2MTUwYTQtYjhkNjU4YTItMWJlYzZkYjAtMjVkOGQ3YTc=, ActorId: [4:7521671403763152738:2460], ActorState: ExecuteState, TraceId: 01jz02hrm223kxhv9v68jx1vd0, Create QueryResponse for error on request, msg:
: Error: Error executing transaction (ExecError): Execution failed
: Error: [BAD_ARGUMENT] Transaction write column value of 20971522 bytes is larger than the allowed threshold ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::SqlInAsScalar [GOOD] Test command err: Trying to start YDB, gRPC: 1037, MsgBus: 62759 2025-06-30T09:25:50.197085Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671214764659023:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:50.198427Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0019fb/r3tmp/tmp1HgUGv/pdisk_1.dat 2025-06-30T09:25:50.288945Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:50.301919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:50.301951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 1037, node 1 2025-06-30T09:25:50.312666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:50.343039Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:50.343052Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:50.343055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:50.343104Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62759 TClient is connected to server localhost:62759 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:50.458352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:50.462310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:50.786269Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671214764659615:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.786300Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.858895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:50.894895Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671214764659715:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.894921Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.894988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671214764659720:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:50.897970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:50.901064Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671214764659722:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:50.956479Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671214764659773:2386] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 8840, MsgBus: 25551 2025-06-30T09:25:51.470895Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671221116666002:2093];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:51.471103Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0019fb/r3tmp/tmpLL7Yrc/pdisk_1.dat 2025-06-30T09:25:51.526528Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:51.559514Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:51.559546Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8840, node 2 2025-06-30T09:25:51.567118Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:51.611002Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:51.611018Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:51.611021Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:51.611076Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25551 TClient is connected to server localhost:25551 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:51.767463Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:51.771273Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:52.180804Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671225411633827:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.180848Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.183330Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:52.243785Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521671225411634177:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.243842Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:52.243970Z node 2 :K ... esource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:33.227810Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:33.283278Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:33.304706Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 22417, MsgBus: 12396 2025-06-30T09:26:33.766337Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7521671399889731994:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:33.766358Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0019fb/r3tmp/tmpmRXCwG/pdisk_1.dat 2025-06-30T09:26:33.789996Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22417, node 7 2025-06-30T09:26:33.801964Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:33.801985Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:33.801988Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:33.802059Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12396 TClient is connected to server localhost:12396 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:33.876717Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:33.876758Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:33.877540Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:33.877704Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:26:33.878869Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:33.887676Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:33.905516Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:33.979527Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:33.994057Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:34.236278Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671404184700849:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:34.236320Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:34.248178Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:34.258360Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:34.268187Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:34.280426Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:34.294354Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:34.311048Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:34.322719Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:34.338611Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671404184701502:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:34.338641Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:34.338668Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671404184701507:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:34.339847Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:34.349427Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671404184701509:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:34.448784Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671404184701560:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> TBackupTests::BackupUuidColumn[Raw] |84.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] |84.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init >> TBackupTests::ShouldSucceedOnLargeData[Raw] >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] >> TxUsage::WriteToTopic_Demo_27_Query |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_ya_count_queues[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |84.9%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/cost/test-results/unittest/{meta.json ... results_accumulator.log} |84.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] [GOOD] >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] [GOOD] >> TBackupTests::BackupUuidColumn[Raw] [GOOD] >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] [GOOD] |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] [GOOD] |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_generates_event[tables_format_v1] [SKIPPED] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:35.451779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:35.451810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.451815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:35.451820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:35.451824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:35.451827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:35.451835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.451856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:35.451966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:35.452045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:35.465710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:35.465739Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:35.468881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:35.468943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:35.468982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:35.470642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:35.470710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:35.470875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.470958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:35.471641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.471684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:35.471932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.471944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.471969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:35.471978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.471985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:35.472004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.473388Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:35.494787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:35.494865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.494930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:35.494939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:35.494998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:35.495015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:35.495850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.495905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:35.495971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.495992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:35.495998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:35.496004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:35.496701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.496718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:35.496729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:35.497150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.497162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.497168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.497176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:35.497988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:35.498655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:35.498711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:35.498966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.499002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:35.499010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.499091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:35.499101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.499140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:35.499158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:35.499747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.499756Z node 1 :FLAT_TX_SCHEMESHARD ... AT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true S3_MOCK::HttpServeWrite: /data_01.csv.zst / / 20 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:1595 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 737D0518-AF7C-4884-86E1-36C49E431998 amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-30T09:26:35.706730Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:480:2437], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } 2025-06-30T09:26:35.707295Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:486:2441], result# PutObjectResult { ETag: f0d3871f5c9cc0f5c2e4afaffb7eeef2 } 2025-06-30T09:26:35.707312Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:486:2441], success# 1, error# , multipart# 0, uploadId# (empty maybe) FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-30T09:26:35.707648Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:485:2439], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:1595 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 87DE5755-18A8-4F84-94F8-4F23BF3D1A10 amz-sdk-request: attempt=1 content-length: 638 content-md5: Myp3UygaBNGp6+7AMgyRnQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 638 2025-06-30T09:26:35.709249Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:480:2437], result# PutObjectResult { ETag: 332a7753281a04d1a9ebeec0320c919d } FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:26:35.709539Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:479:2436] 2025-06-30T09:26:35.709634Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:480:2437], sender# [1:479:2436], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-06-30T09:26:35.710248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:1595 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 401DFA98-0C04-41D4-BABC-117289BA60EC amz-sdk-request: attempt=1 content-length: 20 content-md5: 2qFn9G0TW8wfvJ9C+A5Jbw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 20 2025-06-30T09:26:35.710533Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:480:2437], result# PutObjectResult { ETag: daa167f46d135bcc1fbc9f42f80e496f } 2025-06-30T09:26:35.710546Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:480:2437], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-30T09:26:35.710592Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:479:2436], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-30T09:26:35.714267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 322 RawX2: 4294969600 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.714292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:26:35.714323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 322 RawX2: 4294969600 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.714342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 322 RawX2: 4294969600 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.714360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.714400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.714525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 330 RawX2: 4294969606 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.714533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-30T09:26:35.714550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 330 RawX2: 4294969606 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.714563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 330 RawX2: 4294969606 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.714571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.714576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.714582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:26:35.714588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-30T09:26:35.714595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 129 -> 240 2025-06-30T09:26:35.714611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.719383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.719596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.719783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.719806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:26:35.719844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:35.719853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.719862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:35.719868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.719876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:26:35.719905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:375:2341] message: TxId: 102 2025-06-30T09:26:35.719918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.719928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:26:35.719937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:26:35.719989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:26:35.720762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:26:35.720780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:458:2417] TestWaitNotification: OK eventTxId 102 |84.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/apps/pgwire/pgwire |84.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/pgwire/pgwire |84.9%| [LD] {RESULT} $(B)/ydb/apps/pgwire/pgwire ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:35.453611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:35.453633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.453638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:35.453642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:35.453646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:35.453649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:35.453656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.453665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:35.453779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:35.453863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:35.465999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:35.466020Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:35.468708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:35.468769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:35.468802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:35.470421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:35.470495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:35.470594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.470676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:35.471530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.471595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:35.471884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.471898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.471928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:35.471935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.471940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:35.471955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.473277Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:35.492396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:35.492465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.492517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:35.492524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:35.492569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:35.492579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:35.493279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.493310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:35.493360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.493374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:35.493378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:35.493382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:35.493784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.493799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:35.493808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:35.494138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.494145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.494149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.494156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:35.494679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:35.495151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:35.495193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:35.495397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.495423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:35.495431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.495500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:35.495508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.495539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:35.495551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:35.496173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.496188Z node 1 :FLAT_TX_SCHEMESHARD ... = content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_01.csv / / 11 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:30393 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 89857624-31E8-42E2-833F-8A86345272F9 amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-30T09:26:35.686102Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:480:2437], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } 2025-06-30T09:26:35.686549Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:486:2441], result# PutObjectResult { ETag: 8ec321cb31fe732aef669066d1d41519 } 2025-06-30T09:26:35.686559Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:486:2441], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-30T09:26:35.686642Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:485:2439], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:30393 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: F1119DDD-5177-4305-88A2-7FF93B2AC13A amz-sdk-request: attempt=1 content-length: 638 content-md5: Myp3UygaBNGp6+7AMgyRnQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 638 FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:26:35.687893Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:480:2437], result# PutObjectResult { ETag: 332a7753281a04d1a9ebeec0320c919d } 2025-06-30T09:26:35.688171Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:479:2436] 2025-06-30T09:26:35.688204Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:480:2437], sender# [1:479:2436], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:30393 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: CCB82F40-E601-48AE-9348-C9A8A4E8D656 amz-sdk-request: attempt=1 2025-06-30T09:26:35.688833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-06-30T09:26:35.689089Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:480:2437], result# PutObjectResult { ETag: 6e3e0a41fdab8add833862f1bd2954c3 } 2025-06-30T09:26:35.689099Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:480:2437], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-30T09:26:35.689135Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:479:2436], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-30T09:26:35.692205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 322 RawX2: 4294969600 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.692230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:26:35.692262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 322 RawX2: 4294969600 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.692281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 322 RawX2: 4294969600 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.692301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.692345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.692464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 330 RawX2: 4294969606 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.692470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-30T09:26:35.692481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 330 RawX2: 4294969606 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.692492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 330 RawX2: 4294969606 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.692497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.692500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.692504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:26:35.692508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-30T09:26:35.692513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 129 -> 240 2025-06-30T09:26:35.692524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.693084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.693175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.693266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.693275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:26:35.693289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:35.693293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.693297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:35.693299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.693303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:26:35.693317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:375:2341] message: TxId: 102 2025-06-30T09:26:35.693325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.693330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:26:35.693334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:26:35.693365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:26:35.694022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:26:35.694043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:458:2417] TestWaitNotification: OK eventTxId 102 >> TBackupTests::ShouldSucceedOnLargeData[Zstd] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::BackupUuidColumn[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:35.589720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:35.589744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.589749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:35.589753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:35.589758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:35.589761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:35.589767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.589778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:35.589890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:35.589948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:35.602889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:35.602917Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:35.606113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:35.606166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:35.606208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:35.608275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:35.608358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:35.608485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.608570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:35.609181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.609223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:35.609439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.609447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.609469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:35.609476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.609480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:35.609493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.610727Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:35.630959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:35.631030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.631092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:35.631101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:35.631156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:35.631172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:35.631926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.631978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:35.632036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.632056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:35.632062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:35.632068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:35.632570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.632584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:35.632593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:35.632947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.632958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.632964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.632972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:35.633811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:35.634262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:35.634304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:35.634497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.634524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:35.634533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.634609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:35.634617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.634651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:35.634664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:35.635204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.635215Z node 1 :FLAT_TX_SCHEMESHARD ... schemeshard: 72057594046678944 2025-06-30T09:26:35.741913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-30T09:26:35.741932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 128 -> 129 2025-06-30T09:26:35.741957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:26:35.743872Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:421:2390], attempt# 0 2025-06-30T09:26:35.747147Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:421:2390], sender# [1:420:2387] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-30T09:26:35.748580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.748590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:26:35.748653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.748658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-30T09:26:35.748669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.748676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:10255 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: CD2A465C-9458-4770-A9D9-CB5454689E3C amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-30T09:26:35.748926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:26:35.748937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:26:35.748943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:26:35.748948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-30T09:26:35.748953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:26:35.748967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:26:35.749256Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:421:2390], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:10255 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 8BC72674-9B4C-44A9-B37C-75DC09A7682B amz-sdk-request: attempt=1 content-length: 357 content-md5: IxJB3qM/y2xlsv8qcwTF7g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-30T09:26:35.750721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:26:35.750892Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:421:2390], result# PutObjectResult { ETag: 231241dea33fcb6c65b2ff2a7304c5ee } 2025-06-30T09:26:35.750911Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:420:2387] 2025-06-30T09:26:35.750936Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:421:2390], sender# [1:420:2387], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:10255 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 1CA370B9-5385-4D6F-B218-DEF4CF5AD7B8 amz-sdk-request: attempt=1 content-length: 39 content-md5: GLX1nc5/cKhlAfxBHlykQA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 39 2025-06-30T09:26:35.751851Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:421:2390], result# PutObjectResult { ETag: 18b5f59dce7f70a86501fc411e5ca440 } 2025-06-30T09:26:35.751868Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:421:2390], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-30T09:26:35.751916Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:420:2387], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-30T09:26:35.764904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-30T09:26:35.764954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:26:35.764987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-30T09:26:35.765003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-30T09:26:35.765020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.765025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.765030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:26:35.765039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 129 -> 240 2025-06-30T09:26:35.765094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.765852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.765976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.765988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:26:35.766008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:35.766013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.766019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:35.766023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.766028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:26:35.766048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:342:2319] message: TxId: 102 2025-06-30T09:26:35.766060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.766066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:26:35.766072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:26:35.766109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:26:35.766725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:26:35.766759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:404:2374] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:35.611642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:35.611668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.611675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:35.611682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:35.611689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:35.611695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:35.611705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.611718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:35.611844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:35.611926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:35.626438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:35.626460Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:35.629312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:35.629388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:35.629429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:35.631428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:35.631509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:35.631651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.631737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:35.632614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.632681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:35.633003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.633018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.633045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:35.633054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.633061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:35.633080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.634693Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:35.651295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:35.651359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.651414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:35.651420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:35.651465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:35.651477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:35.652218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.652273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:35.652343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.652361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:35.652367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:35.652374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:35.652936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.652952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:35.652962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:35.653414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.653427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.653434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.653442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:35.654130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:35.654601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:35.654639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:35.654853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.654882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:35.654891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.654961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:35.654969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.655004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:35.655016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:35.655503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.655513Z node 1 :FLAT_TX_SCHEMESHARD ... hard: 72057594046678944 2025-06-30T09:26:35.780175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-30T09:26:35.780198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 128 -> 129 2025-06-30T09:26:35.780230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:26:35.783100Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:421:2390], attempt# 0 2025-06-30T09:26:35.787650Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:421:2390], sender# [1:420:2387] 2025-06-30T09:26:35.788569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.788587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:26:35.788646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.788653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-30T09:26:35.788744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.788754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:30722 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 487666E7-3FF6-4047-B5FE-31C2F1BC966B amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agentFAKE_COORDINATOR: Erasing txId 102 : aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-30T09:26:35.788924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:26:35.788941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:26:35.788950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:26:35.788956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-30T09:26:35.788963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:26:35.788981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-30T09:26:35.789059Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:421:2390], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:30722 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: E7E5ABA5-EFD9-478E-A004-012C08F032E9 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-30T09:26:35.790401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-30T09:26:35.790432Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:421:2390], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-06-30T09:26:35.790445Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:420:2387] 2025-06-30T09:26:35.790489Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:421:2390], sender# [1:420:2387], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:30722 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: BF204AC8-B590-4555-AC13-2DD70129275D amz-sdk-request: attempt=1 content-length: 20 content-md5: 2qFn9G0TW8wfvJ9C+A5Jbw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 20 2025-06-30T09:26:35.791243Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:421:2390], result# PutObjectResult { ETag: daa167f46d135bcc1fbc9f42f80e496f } 2025-06-30T09:26:35.791254Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:421:2390], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-30T09:26:35.791288Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:420:2387], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-30T09:26:35.793904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.793934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:26:35.793967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.793985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.794002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.794009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.794015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:26:35.794025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 129 -> 240 2025-06-30T09:26:35.794076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.794757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.794833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.794843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:26:35.794863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:35.794884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.794891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:35.794895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.794902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:26:35.794923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:342:2319] message: TxId: 102 2025-06-30T09:26:35.794939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.794946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:26:35.794951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:26:35.794988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:26:35.795655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:26:35.795672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:404:2374] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:35.628792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:35.628812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.628816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:35.628820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:35.628825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:35.628828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:35.628834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.628845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:35.628933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:35.628985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:35.639157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:35.639177Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:35.641714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:35.641757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:35.641783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:35.643214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:35.643292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:35.643410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.643501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:35.644210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.644256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:35.644466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.644477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.644502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:35.644509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.644514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:35.644528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.645685Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:35.660008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:35.660054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.660092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:35.660098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:35.660141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:35.660155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:35.660742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.660778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:35.660829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.660846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:35.660864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:35.660869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:35.661467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.661480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:35.661489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:35.661912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.661922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.661927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.661932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:35.662395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:35.662785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:35.662810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:35.662970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.662990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:35.662995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.663043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:35.663049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.663070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:35.663079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:35.663477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.663486Z node 1 :FLAT_TX_SCHEMESHARD ... at schemeshard: 72057594046678944 2025-06-30T09:26:35.782010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-30T09:26:35.782026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 128 -> 129 2025-06-30T09:26:35.782046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:26:35.783798Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:421:2390], attempt# 0 2025-06-30T09:26:35.787170Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:421:2390], sender# [1:420:2387] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-30T09:26:35.788360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.788376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-30T09:26:35.788465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.788474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-30T09:26:35.788494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.788504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:35.788820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:26:35.788835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:26:35.788841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:26:35.788846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-30T09:26:35.788853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:26:35.788875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:9031 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B638ED32-6777-45EA-9009-89ECA4A7BBC9 amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-30T09:26:35.789268Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:421:2390], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } 2025-06-30T09:26:35.790041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:9031 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 4608206E-86FD-4301-9AFC-AA39061BA41A amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-30T09:26:35.790555Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:421:2390], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-06-30T09:26:35.790599Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:420:2387] 2025-06-30T09:26:35.790628Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:421:2390], sender# [1:420:2387], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:9031 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 2BBFFC88-1730-4303-A564-8C66752721B0 amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-06-30T09:26:35.791356Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:421:2390], result# PutObjectResult { ETag: 6e3e0a41fdab8add833862f1bd2954c3 } 2025-06-30T09:26:35.791374Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:421:2390], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-30T09:26:35.791427Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:420:2387], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-30T09:26:35.805061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.805100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:26:35.805144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.805166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-30T09:26:35.805187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.805193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.805200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:26:35.805211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 129 -> 240 2025-06-30T09:26:35.805280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.806058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.806183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.806196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:26:35.806215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:35.806222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.806228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:35.806232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.806238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:26:35.806261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:342:2319] message: TxId: 102 2025-06-30T09:26:35.806269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:35.806276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:26:35.806282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:26:35.806320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:26:35.809923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:26:35.809957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:404:2374] TestWaitNotification: OK eventTxId 102 |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonValue >> test_queues_managing.py::TestQueuesManagingWithTenant::test_queues_count_over_limit[tables_format_v0] [GOOD] >> TestProgram::YqlKernelStartsWithScalar |84.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |84.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |84.9%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelStartsWithScalar [GOOD] >> TSubscriberSyncQuorumTest::OneRingGroup >> TestProgram::JsonValue [GOOD] >> TestProgram::YqlKernelEndsWith [GOOD] >> TSubscriberSyncQuorumTest::OneRingGroup [GOOD] >> TSubscriberSyncQuorumTest::OneWriteOnlyRingGroup >> KqpLimits::OutOfSpaceYQLUpsertFail-useSink [GOOD] >> KqpLimits::ManyPartitionsSortingLimit |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonValue [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\"\000\t\211\004?\020\235?\002\001\235?\004\000\"\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\"\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?p6Json2.SqlValueConvertToUtf8\202\003?r\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?d\t\211\014?b\311\002?b\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\214\005\205\004\203\010\203\005@\032\036\003?\222\002\003?\224\000\003\001\003?\216\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\244\203\005@\200\203\005@\202\022\000\003?\260\026Json2.Parse\202\003?\262\000\002\017\003?\246\000\003?\250\000\003?\252\000\003?\254\000?:\036\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\312\203\005@\200\203\005@\202\022\000\003?\326\"Json2.CompilePath\202\003?\330\000\002\017\003?\314\000\003?\316\000\003?\320\000\003?\322\000?2\036\010\000?l\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\370\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\"\000\t\211\004?\020\235?\002\001\235?\004\000\"\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\"\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?p6Json2.SqlValueConvertToUtf8\202\003?r\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?d\t\211\014?b\311\002?b\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\214\005\205\004\203\010\203\005@\032\036\003?\222\002\003?\224\000\003\001\003?\216\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\244\203\005@\200\203\005@\202\022\000\003?\260\026Json2.Parse\202\003?\262\000\002\017\003?\246\000\003?\250\000\003?\252\000\003?\254\000?:\036\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\312\203\005@\200\203\005@\202\022\000\003?\326\"Json2.CompilePath\202\003?\330\000\002\017\003?\314\000\003?\316\000\003?\320\000\003?\322\000?2\036\010\000?l\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\370\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "{"key":10}", "{"key":0.1}", "{"key":false}", "{"another":"value"}", "[]" ] Check output for Utf8 FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203\014?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r$Json2.SqlValueBool\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203\014?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r$Json2.SqlValueBool\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\" ... } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203B\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?6 VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r(Json2.SqlValueNumber\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000\t\211\004?6\203\005@?F\030Invoke\000\003?\374\016Convert?\372\001\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "{"key":10}", "{"key":0.1}", "{"key":false}", "{"another":"value"}", "[]" ] Check output for Float FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r(Json2.SqlValueNumber\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r(Json2.SqlValueNumber\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "{"key":10}", "{"key":0.1}", "{"key":false}", "{"another":"value"}", "[]" ] Check output for Double FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelStartsWithScalar [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Bytes: "Lorem" } } } Command { Assign { Column { Id: 16 } Function { Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\000\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\024StartsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Bytes: "Lorem" } } } Command { Assign { Column { Id: 16 } Function { Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\000\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\024StartsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"Lorem\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"7,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:7,15"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"7\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"7,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"string","id":7}]},"o":"7","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"string","id":7}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"Lorem"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; >> TSubscriberSyncQuorumTest::OneWriteOnlyRingGroup [GOOD] >> TestProgram::Like ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEndsWith [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \020EndsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \020EndsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"7,9\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:7,9"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"9\",\"p\":{\"address\":{\"name\":\"substring\",\"id\":9}},\"o\":\"9\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"7,9\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"7,9","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"9","p":{"address":{"name":"substring","id":9}},"o":"9","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"7,9","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; >> TestProgram::JsonValueBinary >> TestProgram::JsonValueBinary [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Query [GOOD] |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::Like [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberSyncQuorumTest::OneWriteOnlyRingGroup [GOOD] Test command err: ... waiting for initial path lookups 2025-06-30T09:26:37.165928Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:19:2066][TestPath] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:26:37.166552Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:2:2049] 2025-06-30T09:26:37.166594Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:5:2052] 2025-06-30T09:26:37.166606Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:8:2055] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR Poisoning replica: [1:2199047594611:0] 2025-06-30T09:26:37.166703Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][1:19:2066][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:18:2065], cookie# 12345 2025-06-30T09:26:37.166729Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:20:2066] 2025-06-30T09:26:37.166789Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:21:2066] 2025-06-30T09:26:37.166807Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:19:2066][TestPath] Set up state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:26:37.166820Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:22:2066] 2025-06-30T09:26:37.166831Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:19:2066][TestPath] Ignore empty state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:26:37.166852Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:20:2066], cookie# 12345 2025-06-30T09:26:37.166865Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:21:2066], cookie# 12345 2025-06-30T09:26:37.166875Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:22:2066], cookie# 12345 2025-06-30T09:26:37.166889Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:2:2049], cookie# 12345 2025-06-30T09:26:37.166900Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:5:2052], cookie# 12345 2025-06-30T09:26:37.166918Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:20:2066], cookie# 12345 2025-06-30T09:26:37.166928Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:19:2066][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:26:37.166936Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:21:2066], cookie# 12345 2025-06-30T09:26:37.166942Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:19:2066][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:26:37.166949Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:22:2066], cookie# 12345 2025-06-30T09:26:37.166957Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][1:19:2066][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-30T09:26:37.166968Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:22:2066] 2025-06-30T09:26:37.166976Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:19:2066][TestPath] Ignore empty state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Poisoning replica: [1:24339059:0] 2025-06-30T09:26:37.167004Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][1:19:2066][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:18:2065], cookie# 12346 2025-06-30T09:26:37.167027Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:20:2066], cookie# 12346 2025-06-30T09:26:37.167037Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:21:2066], cookie# 12346 2025-06-30T09:26:37.167046Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:22:2066], cookie# 12346 2025-06-30T09:26:37.167052Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:19:2066][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-30T09:26:37.167061Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:5:2052], cookie# 12346 2025-06-30T09:26:37.167071Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:20:2066], cookie# 12346 2025-06-30T09:26:37.167077Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][1:19:2066][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 2 2025-06-30T09:26:37.167087Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:20:2066] 2025-06-30T09:26:37.167094Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:19:2066][TestPath] Ignore empty state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:26:37.167102Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:21:2066], cookie# 12346 2025-06-30T09:26:37.167109Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1002: [main][1:19:2066][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 1, failures# 2, partial# 1 2025-06-30T09:26:37.167115Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][1:19:2066][TestPath] Sync is incomplete in one of the ring groups: cookie# 12346 ... waiting for initial path lookups 2025-06-30T09:26:37.369301Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][2:28:2075][TestPath] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:26:37.369413Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:2:2049] 2025-06-30T09:26:37.369423Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:5:2052] 2025-06-30T09:26:37.369427Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:8:2055] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR Poisoning replica: [2:2199047594611:0] Poisoning replica: [2:3298559222387:0] Poisoning replica: [2:4398070850163:0] Poisoning replica: [2:5497582477939:0] 2025-06-30T09:26:37.369486Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][2:28:2075][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [2:27:2074], cookie# 12345 2025-06-30T09:26:37.369493Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:29:2075] 2025-06-30T09:26:37.369514Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:30:2075] 2025-06-30T09:26:37.369523Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][2:28:2075][TestPath] Set up state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:26:37.369529Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:31:2075] 2025-06-30T09:26:37.369535Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][2:28:2075][TestPath] Ignore empty state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:26:37.369547Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [2:29:2075], cookie# 12345 2025-06-30T09:26:37.369553Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [2:30:2075], cookie# 12345 2025-06-30T09:26:37.369558Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [2:31:2075], cookie# 12345 2025-06-30T09:26:37.369568Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [2:2:2049], cookie# 12345 2025-06-30T09:26:37.369572Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [2:5:2052], cookie# 12345 2025-06-30T09:26:37.369596Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [2:29:2075], cookie# 12345 2025-06-30T09:26:37.369602Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][2:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-30T09:26:37.369606Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [2:30:2075], cookie# 12345 2025-06-30T09:26:37.369609Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][2:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-30T09:26:37.369612Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [2:31:2075], cookie# 12345 2025-06-30T09:26:37.369616Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1000: [main][2:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-30T09:26:37.369633Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:31:2075] 2025-06-30T09:26:37.369637Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][2:28:2075][TestPath] Ignore empty state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Poisoning replica: [2:1099535966835:0] 2025-06-30T09:26:37.369651Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:903: [main][2:28:2075][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [2:27:2074], cookie# 12346 2025-06-30T09:26:37.369662Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [2:29:2075], cookie# 12346 2025-06-30T09:26:37.369667Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [2:30:2075], cookie# 12346 2025-06-30T09:26:37.369671Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [2:31:2075], cookie# 12346 2025-06-30T09:26:37.369674Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][2:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-30T09:26:37.369678Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [2:2:2049], cookie# 12346 2025-06-30T09:26:37.369683Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [2:29:2075], cookie# 12346 2025-06-30T09:26:37.369687Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:987: [main][2:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 1, failures# 1 2025-06-30T09:26:37.369690Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:929: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [2:30:2075], cookie# 12346 2025-06-30T09:26:37.369694Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1002: [main][2:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 1, failures# 2, partial# 1 2025-06-30T09:26:37.369696Z node 2 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1007: [main][2:28:2075][TestPath] Sync is incomplete in one of the ring groups: cookie# 12346 2025-06-30T09:26:37.369700Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:30:2075] 2025-06-30T09:26:37.369704Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][2:28:2075][TestPath] Ignore empty state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> KqpLimits::ManyPartitionsSortingLimit [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonValueBinary [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\032\000\t\211\004?\020\235?\002\001\235?\004\000\032\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\032\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?nNJson2.JsonDocumentSqlValueConvertToUtf8\202\003?p\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?b?:\t\211\014?d\211\002?d\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\206\203\005@\200\203\005@\202\022\000\003?\222\"Json2.CompilePath\202\003?\224\000\002\017\003?\210\000\003?\212\000\003?\214\000\003?\216\000?2\036\010\000?j\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\264\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\032\000\t\211\004?\020\235?\002\001\235?\004\000\032\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\032\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?nNJson2.JsonDocumentSqlValueConvertToUtf8\202\003?p\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?b?:\t\211\014?d\211\002?d\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\206\203\005@\200\203\005@\202\022\000\003?\222\"Json2.CompilePath\202\003?\224\000\002\017\003?\210\000\003?\212\000\003?\214\000\003?\216\000?2\036\010\000?j\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\264\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 7B226B6579223A31307D, 7B226B6579223A302E317D, 7B226B6579223A66616C73657D, 7B22616E6F74686572223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 0102000021000000140000008403000001000000800300006B6579000000000000002440, 0102000021000000140000008403000001000000800300006B6579009A9999999999B93F, 0102000021000000140000000000000001000000800300006B657900, 01020000210000001400000003030000020000008004000040050000616E6F746865720076616C756500, 010100000000000000000000 ] Check output for Utf8 FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203\014?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?p N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t" ... { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203B\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?6 VisitAll\000\t\211\020?J\211\006?J\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?p@Json2.JsonDocumentSqlValueNumber\202\003?r\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?d?<\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\210\203\005@\200\203\005@\202\022\000\003?\224\"Json2.CompilePath\202\003?\226\000\002\017\003?\212\000\003?\214\000\003?\216\000\003?\220\000?4\036\010\000?l\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000\t\211\004?6\203\005@?F\030Invoke\000\003?\270\016Convert?\266\001\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 7B226B6579223A31307D, 7B226B6579223A302E317D, 7B226B6579223A66616C73657D, 7B22616E6F74686572223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 0102000021000000140000008403000001000000800300006B6579000000000000002440, 0102000021000000140000008403000001000000800300006B6579009A9999999999B93F, 0102000021000000140000000000000001000000800300006B657900, 01020000210000001400000003030000020000008004000040050000616E6F746865720076616C756500, 010100000000000000000000 ] Check output for Float FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?p@Json2.JsonDocumentSqlValueNumber\202\003?r\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?d?<\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\210\203\005@\200\203\005@\202\022\000\003?\224\"Json2.CompilePath\202\003?\226\000\002\017\003?\212\000\003?\214\000\003?\216\000\003?\220\000?4\036\010\000?l\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\266\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?p@Json2.JsonDocumentSqlValueNumber\202\003?r\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?d?<\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\210\203\005@\200\203\005@\202\022\000\003?\224\"Json2.CompilePath\202\003?\226\000\002\017\003?\212\000\003?\214\000\003?\216\000\003?\220\000?4\036\010\000?l\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\266\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 7B226B6579223A31307D, 7B226B6579223A302E317D, 7B226B6579223A66616C73657D, 7B22616E6F74686572223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 0102000021000000140000008403000001000000800300006B6579000000000000002440, 0102000021000000140000008403000001000000800300006B6579009A9999999999B93F, 0102000021000000140000000000000001000000800300006B657900, 01020000210000001400000003030000020000008004000040050000616E6F746865720076616C756500, 010100000000000000000000 ] Check output for Double FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_acl.py::TestSqsACLWithPath::test_modify_permissions[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::Like [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Bytes: "001" } } } Command { Assign { Column { Id: 16 } Constant { Bytes: "uid" } } } Command { Assign { Column { Id: 17 } Function { Id: 33 Arguments { Id: 7 } Arguments { Id: 16 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Assign { Column { Id: 18 } Function { Id: 34 Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 1 } } } Command { Assign { Column { Id: 19 } Function { Id: 18 Arguments { Id: 17 } FunctionType: SIMPLE_ARROW } } } Command { Assign { Column { Id: 20 } Function { Id: 18 Arguments { Id: 18 } FunctionType: SIMPLE_ARROW } } } Command { Assign { Column { Id: 21 } Function { Id: 11 Arguments { Id: 19 } Arguments { Id: 20 } FunctionType: SIMPLE_ARROW } } } Command { Projection { Columns { Id: 21 } } } Kernels: "O\006\006Arg\022BlockFunc\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\004\203\014?\006\001\235?\004\001\235?\010\001\n\000\t\211\004?\016\235?\000\001\235?\002\000\n\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\004?\020\235?\006\001?$\n\000\t\211\006?$\203\005@?\024?\026\006\000\003?(\024StartsWith?\034? \001\t\211\006?$\203\005@?\024?\026\006\000\003?0\020EndsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Bytes: "001" } } } Command { Assign { Column { Id: 16 } Constant { Bytes: "uid" } } } Command { Assign { Column { Id: 17 } Function { Id: 33 Arguments { Id: 7 } Arguments { Id: 16 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Assign { Column { Id: 18 } Function { Id: 34 Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 1 } } } Command { Assign { Column { Id: 19 } Function { Id: 18 Arguments { Id: 17 } FunctionType: SIMPLE_ARROW } } } Command { Assign { Column { Id: 20 } Function { Id: 18 Arguments { Id: 18 } FunctionType: SIMPLE_ARROW } } } Command { Assign { Column { Id: 21 } Function { Id: 11 Arguments { Id: 19 } Arguments { Id: 20 } FunctionType: SIMPLE_ARROW } } } Command { Projection { Columns { Id: 21 } } } Kernels: "O\006\006Arg\022BlockFunc\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\004\203\014?\006\001\235?\004\001\235?\010\001\n\000\t\211\004?\016\235?\000\001\235?\002\000\n\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\004?\020\235?\006\001?$\n\000\t\211\006?$\203\005@?\024?\026\006\000\003?(\024StartsWith?\034? \001\t\211\006?$\203\005@?\024?\026\006\000\003?0\020EndsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N6(0):{\"p\":{\"v\":\"001\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N0(0):{\"p\":{\"v\":\"uid\"},\"o\":\"16\",\"t\":\"Const\"}\n"]; N2[shape=box, label="N4(15):{\"i\":\"7,16\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"17\",\"t\":\"Calculation\"}\nREMOVE:16"]; N1 -> N2[label="1"]; N4 -> N2[label="2"]; N3[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"7\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N10 -> N3[label="1"]; N4[shape=box, label="N3(7):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N3 -> N4[label="1"]; N5[shape=box, label="N7(15):{\"i\":\"7,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"18\",\"t\":\"Calculation\"}\nREMOVE:7,15"]; N0 -> N5[label="1"]; N4 -> N5[label="2"]; N6[shape=box, label="N5(23):{\"i\":\"17\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"19\",\"t\":\"Calculation\"}\nREMOVE:17"]; N2 -> N6[label="1"]; N7[shape=box, label="N8(23):{\"i\":\"18\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"20\",\"t\":\"Calculation\"}\nREMOVE:18"]; N5 -> N7[label="1"]; N8[shape=box, label="N9(54):{\"i\":\"19,20\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"21\",\"t\":\"Calculation\"}\nREMOVE:19,20"]; N6 -> N8[label="1"]; N7 -> N8[label="2"]; N9[shape=box, label="N10(54):{\"i\":\"21\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N8 -> N9[label="1"]; N10[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N1->N10->N3->N4->N2->N6->N0->N5->N7->N8->N9[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[]},{"owner_id":2,"inputs":[{"from":1},{"from":4}]},{"owner_id":3,"inputs":[{"from":10}]},{"owner_id":4,"inputs":[{"from":3}]},{"owner_id":5,"inputs":[{"from":0},{"from":4}]},{"owner_id":6,"inputs":[{"from":2}]},{"owner_id":7,"inputs":[{"from":5}]},{"owner_id":8,"inputs":[{"from":6},{"from":7}]},{"owner_id":9,"inputs":[{"from":8}]},{"owner_id":10,"inputs":[]}],"nodes":{"1":{"p":{"p":{"v":"uid"},"o":"16","t":"Const"},"w":0,"id":1},"3":{"p":{"i":"0","p":{"data":[{"name":"string","id":7}]},"o":"7","t":"FetchOriginalData"},"w":2,"id":3},"8":{"p":{"i":"19,20","p":{"kernel":{"class_name":"SIMPLE"}},"o":"21","t":"Calculation"},"w":54,"id":8},"2":{"p":{"i":"7,16","p":{"kernel":{"class_name":"SIMPLE"}},"o":"17","t":"Calculation"},"w":15,"id":2},"0":{"p":{"p":{"v":"001"},"o":"15","t":"Const"},"w":0,"id":0},"5":{"p":{"i":"7,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"18","t":"Calculation"},"w":15,"id":5},"9":{"p":{"i":"21","t":"Projection"},"w":54,"id":9},"7":{"p":{"i":"18","p":{"kernel":{"class_name":"SIMPLE"}},"o":"20","t":"Calculation"},"w":23,"id":7},"4":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":7,"id":4},"10":{"p":{"p":{"data":[{"name":"string","id":7}]},"o":"0","t":"ReserveMemory"},"w":0,"id":10},"6":{"p":{"i":"17","p":{"kernel":{"class_name":"SIMPLE"}},"o":"19","t":"Calculation"},"w":23,"id":6}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow11BooleanTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow11BooleanTypeE; >> TestProgram::JsonExistsBinary [GOOD] |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |84.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |84.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |84.9%| [LD] {RESULT} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut >> KqpNewEngine::StaleRO_IndexFollowers-EnableFollowers [FAIL] >> KqpNewEngine::UnionAllPure >> TestProgram::YqlKernel [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonExistsBinary [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\203\021H\214\n\210\203\001H\214\002?6\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?J\203\005@\200\203\005@\202\022\000\003?d6Json2.JsonDocumentSqlExists\202\003?f\000\002\017\003?L\000\003?N\000\003?P\000\003?R\000\027?T?<\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?|\203\005@\200\203\005@\202\022\000\003?\210\"Json2.CompilePath\202\003?\212\000\002\017\003?~\000\003?\200\000\003?\202\000\003?\204\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\203\021H\214\n\210\203\001H\214\002?6\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?J\203\005@\200\203\005@\202\022\000\003?d6Json2.JsonDocumentSqlExists\202\003?f\000\002\017\003?L\000\003?N\000\003?P\000\003?R\000\027?T?<\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?|\203\005@\200\203\005@\202\022\000\003?\210\"Json2.CompilePath\202\003?\212\000\002\017\003?~\000\003?\200\000\003?\202\000\003?\204\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 010100000000000000000000 ] FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::CountUIDByVAT >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-false >> TSchemeShardAuditSettings::CreateExtSubdomain >> TSchemeShardAuditSettings::AlterSubdomain >> TestProgram::CountUIDByVAT [GOOD] >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-true >> TSchemeShardAuditSettings::CreateSubdomain |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_auditsettings/unittest |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TestProgram::YqlKernelEndsWithScalar [GOOD] |84.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernel [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 3 } Arguments { Id: 4 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\002\213\002?\000\001\235?\002\001\235?\004\001\002\000\t\211\002?\n\235?\000\001\002\000\t\251\000?\020\014Arg\000\000\t\211\002?\014?\020\002\000\t\211\006?\020\203\005@?\020?\020$BlockFunc\000\003?\034\006Add?\026?\026\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 3 } Arguments { Id: 4 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\002\213\002?\000\001\235?\002\001\235?\004\001\002\000\t\211\002?\n\235?\000\001\002\000\t\251\000?\020\014Arg\000\000\t\211\002?\014?\020\002\000\t\211\006?\020\203\005@?\020?\020$BlockFunc\000\003?\034\006Add?\026?\026\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"sum\",\"id\":3},{\"name\":\"vat\",\"id\":4}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"3,4\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:3,4"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"3\",\"p\":{\"address\":{\"name\":\"sum\",\"id\":3}},\"o\":\"3\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"4\",\"p\":{\"address\":{\"name\":\"vat\",\"id\":4}},\"o\":\"4\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"sum\",\"id\":3},{\"name\":\"vat\",\"id\":4}]},\"o\":\"3,4\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"3","p":{"address":{"name":"sum","id":3}},"o":"3","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"sum","id":3},{"name":"vat","id":4}]},"o":"3,4","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"sum","id":3},{"name":"vat","id":4}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"4","p":{"address":{"name":"vat","id":4}},"o":"4","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"3,4","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_auditsettings/unittest |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_auditsettings/unittest |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_auditsettings/unittest |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::CountUIDByVAT [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { GroupBy { Aggregates { Column { Id: 10001 } Function { Id: 2 Arguments { Id: 2 } } } KeyColumns { Id: 4 } } } Command { Projection { Columns { Id: 10001 } Columns { Id: 4 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { GroupBy { Aggregates { Column { Id: 10001 } Function { Id: 2 Arguments { Id: 2 } } } KeyColumns { Id: 4 } } } Command { Projection { Columns { Id: 10001 } Columns { Id: 4 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2},{\"name\":\"vat\",\"id\":4}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"2,4\",\"o\":\"10001\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N2(9):{\"i\":\"4\",\"p\":{\"address\":{\"name\":\"vat\",\"id\":4}},\"o\":\"4\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"10001,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N4 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2},{\"name\":\"vat\",\"id\":4}]},\"o\":\"2,4\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N4->N2->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":4},{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2},{"name":"vat","id":4}]},"o":"2,4","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"uid","id":2},{"name":"vat","id":4}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"10001,4","t":"Projection"},"w":27,"id":5},"4":{"p":{"i":"4","p":{"address":{"name":"vat","id":4}},"o":"4","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"2,4","o":"10001","t":"Aggregation"},"w":18,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; >> TestScript::StepMerging [GOOD] >> KqpStats::SysViewCancelled [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEndsWithScalar [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Bytes: "amet." } } } Command { Assign { Column { Id: 16 } Function { Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\000\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\020EndsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Bytes: "amet." } } } Command { Assign { Column { Id: 16 } Function { Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\000\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\020EndsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"amet.\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"7,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:7,15"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"7\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"7,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"string","id":7}]},"o":"7","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"string","id":7}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"amet."},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |85.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |85.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |85.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |85.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |85.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestScript::StepMerging [GOOD] >> TestProgram::YqlKernelEquals [GOOD] >> TestProgram::CountWithNulls |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> KqpNewEngine::UnionAllPure [GOOD] >> KqpNewEngine::StreamLookupForDataQuery+StreamLookupJoin >> TestProgram::CountWithNulls [GOOD] >> TestProgram::YqlKernelContains ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEquals [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 10 } Arguments { Id: 11 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\020\203B\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\001\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\014Equals?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 10 } Arguments { Id: 11 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\020\203B\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\001\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\014Equals?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"i16\",\"id\":10},{\"name\":\"float\",\"id\":11}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"10,11\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:10,11"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"10\",\"p\":{\"address\":{\"name\":\"i16\",\"id\":10}},\"o\":\"10\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"11\",\"p\":{\"address\":{\"name\":\"float\",\"id\":11}},\"o\":\"11\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"i16\",\"id\":10},{\"name\":\"float\",\"id\":11}]},\"o\":\"10,11\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"10","p":{"address":{"name":"i16","id":10}},"o":"10","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"i16","id":10},{"name":"float","id":11}]},"o":"10,11","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"i16","id":10},{"name":"float","id":11}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"11","p":{"address":{"name":"float","id":11}},"o":"11","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"10,11","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"i16\",\"id\":10},{\"name\":\"float\",\"id\":11}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"10,11\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:10,11"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"10\",\"p\":{\"address\":{\"name\":\"i16\",\"id\":10}},\"o\":\"10\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"11\",\"p\":{\"address\":{\"name\":\"float\",\"id\":11}},\"o\":\"11\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"i16\",\"id\":10},{\"name\":\"float\",\"id\":11}]},\"o\":\"10,11\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; } FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; >> TestProgram::JsonExists [GOOD] >> TestProgram::YqlKernelContains [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpLimits::ManyPartitionsSortingLimit [GOOD] Test command err: Trying to start YDB, gRPC: 27389, MsgBus: 29262 2025-06-30T09:25:11.946565Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671047234935980:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:11.946588Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00424f/r3tmp/tmppqpTT7/pdisk_1.dat 2025-06-30T09:25:12.223970Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 27389, node 1 TClient is connected to server localhost:29262 2025-06-30T09:25:12.253962Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:12.253977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:12.253980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:12.253984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:12.254054Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:12.254180Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671047234935958:2080] 1751275511946386 != 1751275511946389 TClient is connected to server localhost:29262 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:25:12.297793Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:12.297855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:12.303494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:12.312857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:12.336524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:12.429116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:12.552544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:12.592053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:12.661043Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671051529905665:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:12.661073Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:12.722440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:12.737593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:12.752657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:12.779149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:12.794172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:12.861408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:12.900569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:12.950227Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:12.956365Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671051529906819:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:12.956395Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:12.956516Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671051529906824:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:12.957966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:12.972312Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671051529906826:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:13.034039Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671055824874212:4706] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:13.227831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:16.946884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521671047234935980:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:16.947014Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784 ... 997Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:19.070896Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:19.076021Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7521671339111479216:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:26:19.135839Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7521671339111479283:2647] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:19.617199Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:23.615407Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7521671334816510951:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:23.615463Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:26:33.640856Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7389: Cannot get console configs 2025-06-30T09:26:33.640882Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:37.076217Z node 3 :HIVE ERROR: tx__update_tablet_groups.cpp:135: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{55289043151680}: tablet 72075186224037888 could not find a group for channel 1 pool /Root:test 2025-06-30T09:26:37.076245Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:272: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{55289043151680}: tablet 72075186224037888 wasn't changed 2025-06-30T09:26:37.076248Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{55289043151680}: tablet 72075186224037888 skipped channel 1 2025-06-30T09:26:37.077113Z node 3 :HIVE ERROR: tx__update_tablet_groups.cpp:135: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{55289040055584}: tablet 72075186224037888 could not find a group for channel 0 pool /Root:test 2025-06-30T09:26:37.077132Z node 3 :HIVE ERROR: tx__update_tablet_groups.cpp:135: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{55289040055584}: tablet 72075186224037888 could not find a group for channel 1 pool /Root:test 2025-06-30T09:26:37.077135Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:272: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{55289040055584}: tablet 72075186224037888 wasn't changed 2025-06-30T09:26:37.077138Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{55289040055584}: tablet 72075186224037888 skipped channel 0 2025-06-30T09:26:37.077143Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{55289040055584}: tablet 72075186224037888 skipped channel 1 2025-06-30T09:26:37.278813Z node 3 :TX_DATASHARD ERROR: check_data_tx_unit.cpp:83: Cannot perform transaction: out of disk space at tablet 72075186224037888 txId 281474976715768 2025-06-30T09:26:37.278875Z node 3 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976715768 at tablet 72075186224037888 errors: OUT_OF_SPACE (Cannot perform transaction: out of disk space at tablet 72075186224037888 txId 281474976715768) | 2025-06-30T09:26:37.278912Z node 3 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715768 at tablet 72075186224037888 status: ERROR errors: OUT_OF_SPACE (Cannot perform transaction: out of disk space at tablet 72075186224037888 txId 281474976715768) | 2025-06-30T09:26:37.279002Z node 3 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [3:7521671416420900104:2297] TxId: 281474976715768. Ctx: { TraceId: 01jz02hvcvcszzwaxh7r2z3q83, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjVjNGZhOWItMzBiZmMyMGUtM2Y0ZjM1ZDYtYTVmZTg2ODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ERROR: [OUT_OF_SPACE] Cannot perform transaction: out of disk space at tablet 72075186224037888 txId 281474976715768; 2025-06-30T09:26:37.279143Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=3&id=YjVjNGZhOWItMzBiZmMyMGUtM2Y0ZjM1ZDYtYTVmZTg2ODY=, ActorId: [3:7521671339111479186:2297], ActorState: ExecuteState, TraceId: 01jz02hvcvcszzwaxh7r2z3q83, Create QueryResponse for error on request, msg: Got out of space. Successfully inserted 30 x 0 lines, each of size 1048576bytes Trying to start YDB, gRPC: 16820, MsgBus: 21170 2025-06-30T09:26:37.448259Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671419529301540:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:37.448293Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00424f/r3tmp/tmpeLjEGk/pdisk_1.dat 2025-06-30T09:26:37.464388Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:37.464647Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7521671419529301516:2080] 1751275597448157 != 1751275597448160 TServer::EnableGrpc on GrpcPort 16820, node 4 2025-06-30T09:26:37.474531Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:37.474549Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:37.474551Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:37.474604Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21170 TClient is connected to server localhost:21170 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:37.553839Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:37.553884Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:37.554425Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:37.554902Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:26:37.559057Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:26:37.574813Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:37.908645Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671419529306356:2598], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:37.908666Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671419529306345:2595], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:37.908690Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:37.909537Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:37.911763Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671419529306359:2599], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:26:37.980611Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671419529306412:5095] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::CountWithNulls [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { GroupBy { Aggregates { Column { Id: 10001 } Function { Id: 2 Arguments { Id: 2 } } } } } Command { Projection { Columns { Id: 10001 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { GroupBy { Aggregates { Column { Id: 10001 } Function { Id: 2 Arguments { Id: 2 } } } } } Command { Projection { Columns { Id: 10001 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N3(15):{\"i\":\"2\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"10001\",\"t\":\"Calculation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N1[shape=box, label="N1(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N4 -> N1[label="1"]; N2[shape=box, label="N2(7):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N1 -> N2[label="1"]; N3[shape=box, label="N4(15):{\"i\":\"10001\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N3[label="1"]; N4[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N4->N1->N2->N0->N3[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":4}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[{"from":0}]},{"owner_id":4,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2}]},"o":"2","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"i":"10001","t":"Projection"},"w":15,"id":3},"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":7,"id":2},"4":{"p":{"p":{"data":[{"name":"uid","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":4},"0":{"p":{"i":"2","p":{"kernel":{"class_name":"SIMPLE"}},"o":"10001","t":"Calculation"},"w":15,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |85.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonExists [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\214\002\214\n\210\203\001H?>?6\016\000\203\004\203\005@\203\004\203\004\207\214\002\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?H\203\005@\200\203\005@\202\022\000\003?d\036Json2.SqlExists\202\003?f\000\002\017\003?J\000\003?L\000\003?N\000\003?P\000\027?T\t\211\014?R\311\002?R\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\200\005\205\004\203\010\203\005@\032\036\003?\206\002\003?\210\000\003\001\003?\202\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\230\203\005@\200\203\005@\202\022\000\003?\244\026Json2.Parse\202\003?\246\000\002\017\003?\232\000\003?\234\000\003?\236\000\003?\240\000?<\036\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\276\203\005@\200\203\005@\202\022\000\003?\312\"Json2.CompilePath\202\003?\314\000\002\017\003?\300\000\003?\302\000\003?\304\000\003?\306\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\214\002\214\n\210\203\001H?>?6\016\000\203\004\203\005@\203\004\203\004\207\214\002\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?H\203\005@\200\203\005@\202\022\000\003?d\036Json2.SqlExists\202\003?f\000\002\017\003?J\000\003?L\000\003?N\000\003?P\000\027?T\t\211\014?R\311\002?R\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\200\005\205\004\203\010\203\005@\032\036\003?\206\002\003?\210\000\003\001\003?\202\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\230\203\005@\200\203\005@\202\022\000\003?\244\026Json2.Parse\202\003?\246\000\002\017\003?\232\000\003?\234\000\003?\236\000\003?\240\000?<\036\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\276\203\005@\200\203\005@\202\022\000\003?\312\"Json2.CompilePath\202\003?\314\000\002\017\003?\300\000\003?\302\000\003?\304\000\003?\306\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "[]" ] FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelContains [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\005@\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \034StringContains?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\005@\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \034StringContains?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"7,9\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:7,9"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"9\",\"p\":{\"address\":{\"name\":\"substring\",\"id\":9}},\"o\":\"9\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"7,9\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"7,9","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"9","p":{"address":{"name":"substring","id":9}},"o":"9","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"7,9","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |85.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |85.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |85.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |85.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration >> TSchemeShardAuditSettings::CreateSubdomain [GOOD] >> TSchemeShardAuditSettings::CreateExtSubdomain [GOOD] >> TestProgram::SimpleFunction [GOOD] |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_fifo_queue_wo_postfix[tables_format_v0] >> TxUsage::WriteToTopic_Demo_19_RestartNo_Query [GOOD] >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-false [GOOD] >> TSchemeShardAuditSettings::AlterSubdomain [GOOD] >> KqpNewEngine::StreamLookupForDataQuery+StreamLookupJoin [GOOD] >> KqpNewEngine::StreamLookupForDataQuery-StreamLookupJoin ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::CreateSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:40.452046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:40.452083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:40.452088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:40.452093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:40.452115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:40.452118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:40.452127Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:40.456660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:40.456811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:40.467397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:40.526467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:40.526494Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:40.548908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:40.548976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:40.562199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:40.565251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:40.573306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:40.573490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.573683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:40.596000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:40.596081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:40.619494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:40.619540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:40.629002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:40.629038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:40.629073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:40.629124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.643197Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:40.703785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:40.713900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.766354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:40.766401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:40.766505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:40.766544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:40.772224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.779122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:40.779352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.779371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:40.779378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:40.779386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:40.780453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:40.781055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.781070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.781078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.781097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:40.781891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:40.782515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:40.782558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:40.791404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.791456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:40.791465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.791567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:40.791578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.800582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:40.800654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:40.801619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:40.801632Z node 1 :FLAT_TX_SCHEMESHARD ... eration_side_effects.cpp:654: Send tablet strongly msg operationId: 112:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:112 msg type: 269090816 2025-06-30T09:26:40.965811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 112, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 112 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 112 at step: 5000013 2025-06-30T09:26:40.966277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000013, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.966308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 112 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000013 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:40.966317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_unsafe.cpp:47: TDropForceUnsafe TPropose, operationId: 112:0 HandleReply TEvOperationPlan, step: 5000013, at schemeshard: 72057594046678944 2025-06-30T09:26:40.966326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5309: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 7] name: USER_0 type: EPathTypeSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 112 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:40.966334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5325: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-30T09:26:40.966372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 112:0 128 -> 130 2025-06-30T09:26:40.966404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:40.966415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-30T09:26:40.966483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-30T09:26:40.966586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 FAKE_COORDINATOR: Erasing txId 112 2025-06-30T09:26:40.968476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:40.968489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:40.974929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-30T09:26:40.975004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:40.975013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 1 2025-06-30T09:26:40.975024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 7 2025-06-30T09:26:40.975143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.975154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:418: [72057594046678944] TDeleteParts opId# 112:0 ProgressState 2025-06-30T09:26:40.975170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#112:0 progress is 1/1 2025-06-30T09:26:40.975175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-30T09:26:40.975182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#112:0 progress is 1/1 2025-06-30T09:26:40.975186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-30T09:26:40.975192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 112, ready parts: 1/1, is published: false 2025-06-30T09:26:40.975198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-30T09:26:40.975203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 112:0 2025-06-30T09:26:40.975211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 112:0 2025-06-30T09:26:40.975232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-30T09:26:40.975238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 112, publications: 2, subscribers: 0 2025-06-30T09:26:40.975243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 112, [OwnerId: 72057594046678944, LocalPathId: 1], 27 2025-06-30T09:26:40.975247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 112, [OwnerId: 72057594046678944, LocalPathId: 7], 18446744073709551615 2025-06-30T09:26:40.975452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 27 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:26:40.975469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 27 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:26:40.975475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 112 2025-06-30T09:26:40.975480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 27 2025-06-30T09:26:40.975486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:26:40.975752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:26:40.975769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:26:40.975775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 112 2025-06-30T09:26:40.975780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 18446744073709551615 2025-06-30T09:26:40.975786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-30T09:26:40.975804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 112, subscribers: 0 2025-06-30T09:26:40.987010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:40.987042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-30T09:26:40.987080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 1 2025-06-30T09:26:40.987201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:40.987208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-30T09:26:40.987223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:40.989516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-30T09:26:40.990215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-30T09:26:40.990250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:26:40.990269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 112, wait until txId: 112 TestWaitNotification wait txId: 112 2025-06-30T09:26:40.990379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 112: send EvNotifyTxCompletion 2025-06-30T09:26:40.990389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 112 2025-06-30T09:26:40.990497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 112, at schemeshard: 72057594046678944 2025-06-30T09:26:40.990523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2025-06-30T09:26:40.990530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [1:666:2655] TestWaitNotification: OK eventTxId 112 |85.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::CreateExtSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:40.452050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:40.452087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:40.452093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:40.452100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:40.452112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:40.452117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:40.452128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:40.456662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:40.456811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:40.467395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:40.527167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:40.527196Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:40.548101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:40.548178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:40.562212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:40.564768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:40.573256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:40.573424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.573561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:40.595891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:40.595970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:40.619552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:40.619589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:40.628975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:40.629012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:40.629033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:40.629085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.642616Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:40.702655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:40.713883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.766342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:40.766392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:40.766505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:40.766532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:40.772052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.779079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:40.779320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.779366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:40.779374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:40.779381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:40.780609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:40.781233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.781246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.781252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.781269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:40.782048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:40.782610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:40.782645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:40.791424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.791473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:40.791484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.791577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:40.791588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.800615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:40.800669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:40.801611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:40.801626Z node 1 :FLAT_TX_SCHEMESHARD ... 46316545 FAKE_COORDINATOR: Add transaction: 112 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 112 at step: 5000013 2025-06-30T09:26:40.975427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000013, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.975447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 112 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000013 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:40.975453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:157: TDropExtSubdomain TPropose, operationId: 112:0 HandleReply TEvOperationPlan, step: 5000013, at schemeshard: 72057594046678944 2025-06-30T09:26:40.975466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5309: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 7] name: USER_0 type: EPathTypeExtSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 112 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:40.975469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5325: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-30T09:26:40.975475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 112:0 128 -> 134 2025-06-30T09:26:40.975628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-30T09:26:40.976055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-30T09:26:40.976158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.976165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:104: TDropExtSubdomain TDeleteExternalShards, operationId: 112:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:40.976194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 112:0 134 -> 135 2025-06-30T09:26:40.976213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:40.976221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 FAKE_COORDINATOR: Erasing txId 112 2025-06-30T09:26:40.976545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:40.976554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:40.976574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-30T09:26:40.976591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:40.976595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 1 2025-06-30T09:26:40.976598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 7 2025-06-30T09:26:40.976641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.976646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:400: [72057594046678944] TDeleteParts opId# 112:0 ProgressState 2025-06-30T09:26:40.976650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 112:0 135 -> 240 2025-06-30T09:26:40.976769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 27 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:26:40.976778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 27 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:26:40.976781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 112 2025-06-30T09:26:40.976785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 27 2025-06-30T09:26:40.976788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:26:40.976831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:26:40.976841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-30T09:26:40.976843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 112 2025-06-30T09:26:40.976846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 18446744073709551615 2025-06-30T09:26:40.976849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-30T09:26:40.976856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 112, ready parts: 0/1, is published: true 2025-06-30T09:26:40.977354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.977365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 112:0 ProgressState 2025-06-30T09:26:40.977376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#112:0 progress is 1/1 2025-06-30T09:26:40.977380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-30T09:26:40.977383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#112:0 progress is 1/1 2025-06-30T09:26:40.977386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-30T09:26:40.977389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 112, ready parts: 1/1, is published: true 2025-06-30T09:26:40.977393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-30T09:26:40.977397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 112:0 2025-06-30T09:26:40.977400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 112:0 2025-06-30T09:26:40.977409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-30T09:26:40.977471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:40.977476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-30T09:26:40.977487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 1 2025-06-30T09:26:40.977686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:40.977696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-30T09:26:40.977713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:40.977785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-30T09:26:40.977825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-30T09:26:40.978202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:26:40.978218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 112, wait until txId: 112 TestWaitNotification wait txId: 112 2025-06-30T09:26:40.978287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 112: send EvNotifyTxCompletion 2025-06-30T09:26:40.978292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 112 2025-06-30T09:26:40.978362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 112, at schemeshard: 72057594046678944 2025-06-30T09:26:40.978375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2025-06-30T09:26:40.978378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [1:660:2649] TestWaitNotification: OK eventTxId 112 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::SimpleFunction [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Id: 8 Arguments { Id: 2 } } } } Command { Projection { Columns { Id: 15 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Id: 8 Arguments { Id: 2 } } } } Command { Projection { Columns { Id: 15 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N3(15):{\"i\":\"2\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N1[shape=box, label="N1(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N4 -> N1[label="1"]; N2[shape=box, label="N2(7):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N1 -> N2[label="1"]; N3[shape=box, label="N4(15):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N3[label="1"]; N4[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N4->N1->N2->N0->N3[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":4}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[{"from":0}]},{"owner_id":4,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2}]},"o":"2","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"i":"15","t":"Projection"},"w":15,"id":3},"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":7,"id":2},"4":{"p":{"p":{"data":[{"name":"uid","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":4},"0":{"p":{"i":"2","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":15,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::NumRowsWithNulls |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:40.452046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:40.452089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:40.452096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:40.452102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:40.452111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:40.452115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:40.452125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:40.456636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:40.456802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:40.467408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:40.527843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:40.527874Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:40.548347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:40.548448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:40.562231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:40.565156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:40.573303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:40.573500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.573683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:40.596023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:40.596095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:40.619494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:40.619541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:40.628968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:40.629012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:40.629038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:40.629096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.643024Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:40.704262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:40.713921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.766376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:40.766411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:40.766507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:40.766533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:40.772053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.779079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:40.779294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.779331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:40.779339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:40.779348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:40.780319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:40.780886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.780920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:40.781603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:40.782140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:40.782183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:40.791403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.791458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:40.791469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.791578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:40.791590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.800589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:40.800654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:40.801612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:40.801632Z node 1 :FLAT_TX_SCHEMESHARD ... s.cpp:654: Send tablet strongly msg operationId: 175:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:175 msg type: 269090816 2025-06-30T09:26:41.424434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 175, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 175 at step: 5000076 FAKE_COORDINATOR: advance: minStep5000076 State->FrontStep: 5000075 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 175 at step: 5000076 2025-06-30T09:26:41.424650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000076, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:41.424679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 175 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000076 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:41.424689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_unsafe.cpp:47: TDropForceUnsafe TPropose, operationId: 175:0 HandleReply TEvOperationPlan, step: 5000076, at schemeshard: 72057594046678944 2025-06-30T09:26:41.424698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5309: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 26] name: USER_0 type: EPathTypeSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 175 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:41.424707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5325: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 26] 2025-06-30T09:26:41.424743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 175:0 128 -> 130 2025-06-30T09:26:41.424790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:41.424801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 2025-06-30T09:26:41.425108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-30T09:26:41.425231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 FAKE_COORDINATOR: Erasing txId 175 2025-06-30T09:26:41.425708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:41.425719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:41.425755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 26] 2025-06-30T09:26:41.425787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:41.425793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 175, path id: 1 2025-06-30T09:26:41.425800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 175, path id: 26 2025-06-30T09:26:41.425886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-30T09:26:41.425896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:418: [72057594046678944] TDeleteParts opId# 175:0 ProgressState 2025-06-30T09:26:41.425908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#175:0 progress is 1/1 2025-06-30T09:26:41.425914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-30T09:26:41.425920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#175:0 progress is 1/1 2025-06-30T09:26:41.425924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-30T09:26:41.425930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 175, ready parts: 1/1, is published: false 2025-06-30T09:26:41.425935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-30T09:26:41.425941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 175:0 2025-06-30T09:26:41.425949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 175:0 2025-06-30T09:26:41.425965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 3 2025-06-30T09:26:41.425973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 175, publications: 2, subscribers: 0 2025-06-30T09:26:41.425977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 175, [OwnerId: 72057594046678944, LocalPathId: 1], 103 2025-06-30T09:26:41.425982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 175, [OwnerId: 72057594046678944, LocalPathId: 26], 18446744073709551615 2025-06-30T09:26:41.426129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-30T09:26:41.426145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-30T09:26:41.426151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 175 2025-06-30T09:26:41.426157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 103 2025-06-30T09:26:41.426162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:26:41.426364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-30T09:26:41.426381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-30T09:26:41.426387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 175 2025-06-30T09:26:41.426392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 26], version: 18446744073709551615 2025-06-30T09:26:41.426398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 2025-06-30T09:26:41.426417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 175, subscribers: 0 2025-06-30T09:26:41.426649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:41.426661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-30T09:26:41.426694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 1 2025-06-30T09:26:41.426788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:41.426797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-30T09:26:41.426811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:41.427322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-30T09:26:41.427880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-30T09:26:41.427920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:26:41.427938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 175, wait until txId: 175 TestWaitNotification wait txId: 175 2025-06-30T09:26:41.428301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 175: send EvNotifyTxCompletion 2025-06-30T09:26:41.428313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 175 2025-06-30T09:26:41.428589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 175, at schemeshard: 72057594046678944 2025-06-30T09:26:41.428619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 175: got EvNotifyTxCompletionResult 2025-06-30T09:26:41.428626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 175: satisfy waiter [1:2481:4470] TestWaitNotification: OK eventTxId 175 |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::NumRowsWithNulls [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Table |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:40.452052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:40.452118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:40.452126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:40.452132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:40.452144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:40.452148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:40.452159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:40.456674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:40.456856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:40.467408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:40.527735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:40.527765Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:40.549631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:40.549714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:40.562243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:40.565398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:40.573311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:40.573490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.573684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:40.595987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:40.596079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:40.619494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:40.619541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:40.628970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:40.629012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:40.629037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:40.629110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.643189Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:40.702653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:40.713890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.766353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:40.766402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:40.766525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:40.766557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:40.772052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.779077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:40.779259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.779276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:40.779287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:40.779295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:40.780409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:40.781065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.781081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.781087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.781103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:40.781885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:40.782497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:40.782537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:40.791403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.791456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:40.791466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.791567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:40.791580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.800589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:40.800656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:40.801622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:40.801636Z node 1 :FLAT_TX_SCHEMESHARD ... RDINATOR: Add transaction: 175 at step: 5000076 FAKE_COORDINATOR: advance: minStep5000076 State->FrontStep: 5000075 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 175 at step: 5000076 2025-06-30T09:26:41.405240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000076, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:41.405261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 175 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000076 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:41.405269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:157: TDropExtSubdomain TPropose, operationId: 175:0 HandleReply TEvOperationPlan, step: 5000076, at schemeshard: 72057594046678944 2025-06-30T09:26:41.405288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5309: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 26] name: USER_0 type: EPathTypeExtSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 175 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:41.405292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5325: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 26] 2025-06-30T09:26:41.405299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 175:0 128 -> 134 2025-06-30T09:26:41.405464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-30T09:26:41.405785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-30T09:26:41.406106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-30T09:26:41.406117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:104: TDropExtSubdomain TDeleteExternalShards, operationId: 175:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:41.406152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 175:0 134 -> 135 2025-06-30T09:26:41.406179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:41.406190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 FAKE_COORDINATOR: Erasing txId 175 2025-06-30T09:26:41.406603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:41.406614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:41.406647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 26] 2025-06-30T09:26:41.406675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:41.406681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 175, path id: 1 2025-06-30T09:26:41.406687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 175, path id: 26 2025-06-30T09:26:41.406765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-30T09:26:41.406773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:400: [72057594046678944] TDeleteParts opId# 175:0 ProgressState 2025-06-30T09:26:41.406779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 175:0 135 -> 240 2025-06-30T09:26:41.407046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-30T09:26:41.407066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-30T09:26:41.407071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2025-06-30T09:26:41.407077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 103 2025-06-30T09:26:41.407082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:26:41.407250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-30T09:26:41.407261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-30T09:26:41.407265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2025-06-30T09:26:41.407272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 26], version: 18446744073709551615 2025-06-30T09:26:41.407277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 3 2025-06-30T09:26:41.407288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 175, ready parts: 0/1, is published: true 2025-06-30T09:26:41.407840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-30T09:26:41.407852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 175:0 ProgressState 2025-06-30T09:26:41.407865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#175:0 progress is 1/1 2025-06-30T09:26:41.407870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-30T09:26:41.407876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#175:0 progress is 1/1 2025-06-30T09:26:41.407880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-30T09:26:41.407884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 175, ready parts: 1/1, is published: true 2025-06-30T09:26:41.407891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-30T09:26:41.407896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 175:0 2025-06-30T09:26:41.407901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 175:0 2025-06-30T09:26:41.407915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 2025-06-30T09:26:41.408267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:41.408278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-30T09:26:41.408297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 1 2025-06-30T09:26:41.408341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:41.408347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-30T09:26:41.408360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:41.408453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-30T09:26:41.408487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-30T09:26:41.408994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:26:41.409014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 175, wait until txId: 175 TestWaitNotification wait txId: 175 2025-06-30T09:26:41.409295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 175: send EvNotifyTxCompletion 2025-06-30T09:26:41.409305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 175 2025-06-30T09:26:41.409497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 175, at schemeshard: 72057594046678944 2025-06-30T09:26:41.409523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 175: got EvNotifyTxCompletionResult 2025-06-30T09:26:41.409529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 175: satisfy waiter [1:2605:4594] TestWaitNotification: OK eventTxId 175 |85.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |85.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |85.1%| [LD] {RESULT} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tools/query_replay/ydb_query_replay |85.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/query_replay/ydb_query_replay |85.1%| [LD] {RESULT} $(B)/ydb/tools/query_replay/ydb_query_replay |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::NumRowsWithNulls [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 10001 } Function { Id: 7 Arguments { Id: 2 } } } } Command { Filter { Predicate { Id: 10001 } } } Command { GroupBy { Aggregates { Column { Id: 10002 } Function { Id: 2 } } } } Command { Projection { Columns { Id: 10002 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 10001 } Function { Id: 7 Arguments { Id: 2 } } } } Command { Filter { Predicate { Id: 10001 } } } Command { GroupBy { Aggregates { Column { Id: 10002 } Function { Id: 2 } } } } Command { Projection { Columns { Id: 10002 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N3(15):{\"i\":\"2\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"10001\",\"t\":\"Calculation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N1[shape=box, label="N1(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N1[label="1"]; N2[shape=box, label="N2(7):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N1 -> N2[label="1"]; N3[shape=box, label="N4(15):{\"i\":\"10001\",\"t\":\"Filter\"}\nREMOVE:10001",style=filled,color="#FFAAAA"]; N0 -> N3[label="1"]; N4[shape=box, label="N5(8):{\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"10002\",\"t\":\"Calculation\"}\n"]; N5[shape=box, label="N6(8):{\"i\":\"10002\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N4 -> N5[label="1"]; N6[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N6->N1->N2->N0->N3->N4->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":6}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[{"from":0}]},{"owner_id":4,"inputs":[]},{"owner_id":5,"inputs":[{"from":4}]},{"owner_id":6,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2}]},"o":"2","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"i":"10001","t":"Filter"},"w":15,"id":3},"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":7,"id":2},"6":{"p":{"p":{"data":[{"name":"uid","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":6},"5":{"p":{"i":"10002","t":"Projection"},"w":8,"id":5},"4":{"p":{"p":{"kernel":{"class_name":"SIMPLE"}},"o":"10002","t":"Calculation"},"w":8,"id":4},"0":{"p":{"i":"2","p":{"kernel":{"class_name":"SIMPLE"}},"o":"10001","t":"Calculation"},"w":15,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelStartsWith [GOOD] >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-true [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelStartsWith [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \024StartsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \024StartsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"7,9\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:7,9"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"9\",\"p\":{\"address\":{\"name\":\"substring\",\"id\":9}},\"o\":\"9\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"7,9\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"7,9","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"9","p":{"address":{"name":"substring","id":9}},"o":"9","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"7,9","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; >> KqpNewEngine::StreamLookupForDataQuery-StreamLookupJoin [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:40.452051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:40.452088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:40.452096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:40.452102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:40.452115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:40.452119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:40.452136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:40.456660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:40.456838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:40.467418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:40.526118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:40.526156Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:40.549378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:40.549440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:40.562199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:40.564759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:40.573263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:40.573416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.573561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:40.595884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:40.595971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:40.619415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:40.619450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:40.628928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:40.628973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:40.629003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:40.629053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.642703Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:40.702949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:40.713897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.766354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:40.766392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:40.766499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:40.766529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:40.772052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.779077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:40.779256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.779277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:40.779286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:40.779293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:40.780354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:40.780957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:40.780976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.780991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:40.781724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:40.782366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:40.782402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:40.791403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:40.791454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:40.791464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.791560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:40.791570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:40.800594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:40.800652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:40.801814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:40.801836Z node 1 :FLAT_TX_SCHEMESHARD ... -30T09:26:42.545911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 175, path id: 1 2025-06-30T09:26:42.545917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 175, path id: 26 2025-06-30T09:26:42.545933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-30T09:26:42.545940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:400: [72057594046678944] TDeleteParts opId# 175:0 ProgressState 2025-06-30T09:26:42.545946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 175:0 135 -> 240 2025-06-30T09:26:42.546183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-30T09:26:42.546196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-30T09:26:42.546201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2025-06-30T09:26:42.546206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 103 2025-06-30T09:26:42.546212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-30T09:26:42.546571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-30T09:26:42.546594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-30T09:26:42.546599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2025-06-30T09:26:42.546604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 26], version: 18446744073709551615 2025-06-30T09:26:42.546609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 6 2025-06-30T09:26:42.546629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 175, ready parts: 0/1, is published: true 2025-06-30T09:26:42.547120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:74 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:26:42.547136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:73 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:26:42.547140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:75 hive 72057594037968897 at ss 72057594046678944 2025-06-30T09:26:42.547198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-30T09:26:42.547207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 175:0 ProgressState 2025-06-30T09:26:42.547220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#175:0 progress is 1/1 2025-06-30T09:26:42.547224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-30T09:26:42.547230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#175:0 progress is 1/1 2025-06-30T09:26:42.547234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-30T09:26:42.547238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 175, ready parts: 1/1, is published: true 2025-06-30T09:26:42.547244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-30T09:26:42.547250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 175:0 2025-06-30T09:26:42.547254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 175:0 2025-06-30T09:26:42.547322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 5 2025-06-30T09:26:42.547574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-30T09:26:42.548055Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 74 TxId_Deprecated: 74 TabletID: 72075186233409619 2025-06-30T09:26:42.548164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 74 ShardOwnerId: 72057594046678944 ShardLocalIdx: 74, at schemeshard: 72057594046678944 2025-06-30T09:26:42.548251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 4 Forgetting tablet 72075186233409619 2025-06-30T09:26:42.548572Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 73 TxId_Deprecated: 73 TabletID: 72075186233409618 2025-06-30T09:26:42.548903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:42.550167Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 75 TxId_Deprecated: 75 TabletID: 72075186233409620 2025-06-30T09:26:42.550431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 73 ShardOwnerId: 72057594046678944 ShardLocalIdx: 73, at schemeshard: 72057594046678944 2025-06-30T09:26:42.550519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 3 Forgetting tablet 72075186233409618 2025-06-30T09:26:42.550838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6029: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 75 ShardOwnerId: 72057594046678944 ShardLocalIdx: 75, at schemeshard: 72057594046678944 2025-06-30T09:26:42.550899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 Forgetting tablet 72075186233409620 2025-06-30T09:26:42.551444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-30T09:26:42.551514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:42.551522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-30T09:26:42.551555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 1 2025-06-30T09:26:42.551677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:42.551687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-30T09:26:42.551704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:42.552460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:74 2025-06-30T09:26:42.552478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:74 tabletId 72075186233409619 2025-06-30T09:26:42.552560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:73 2025-06-30T09:26:42.552568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:73 tabletId 72075186233409618 2025-06-30T09:26:42.552633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:75 2025-06-30T09:26:42.552642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:75 tabletId 72075186233409620 2025-06-30T09:26:42.552999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:26:42.553033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 175, wait until txId: 175 TestWaitNotification wait txId: 175 2025-06-30T09:26:42.553436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 175: send EvNotifyTxCompletion 2025-06-30T09:26:42.553450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 175 2025-06-30T09:26:42.553726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 175, at schemeshard: 72057594046678944 2025-06-30T09:26:42.553751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 175: got EvNotifyTxCompletionResult 2025-06-30T09:26:42.553757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 175: satisfy waiter [1:6795:7773] TestWaitNotification: OK eventTxId 175 >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,10,100,0] |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |85.1%| [TA] $(B)/ydb/core/tx/schemeshard/ut_auditsettings/test-results/unittest/{meta.json ... results_accumulator.log} |85.1%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/test-results/unittest/{meta.json ... results_accumulator.log} |85.1%| [TA] $(B)/ydb/core/tx/columnshard/engines/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.1%| [TA] {RESULT} $(B)/ydb/core/tx/columnshard/engines/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_queues_count_over_limit[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue_with_unsupported_tables_format |85.1%| [TA] $(B)/ydb/core/tx/schemeshard/ut_incremental_restore_reboots/test-results/unittest/{meta.json ... results_accumulator.log} |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.1%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore_reboots/test-results/unittest/{meta.json ... results_accumulator.log} |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> TExternalTableTestReboots::CreateDroppedExternalTableWithReboots [GOOD] |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,10,100,0] [GOOD] >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,10,100,0.5] |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> TExternalTableTestReboots::DropExternalTableWithReboots [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::CreateDroppedExternalTableWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:26:32.124406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:32.124439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:32.124446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:32.124452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:32.124467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:32.124472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:32.124484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:32.124502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:32.124624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:32.124727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:32.141859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:26:32.141888Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:32.142011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:26:32.146597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:32.146727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:32.146794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:32.149489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:32.149545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:32.149678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:32.149753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:32.150416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:32.150462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:32.150681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:32.150688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:32.150706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:32.150712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:32.150716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:32.150760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:26:32.152454Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:26:32.177653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:32.177745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.177821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:32.177831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:32.177881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:32.177898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:32.178860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:32.178915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:32.178993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.179004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:32.179011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:32.179018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:32.179507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.179524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:32.179530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:32.179956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.179970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.179976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:32.179984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:32.180798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:32.181299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:32.181352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:32.181609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:32.181642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:26:44.984322Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1006:0 128 -> 240 2025-06-30T09:26:44.984353Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:26:44.984363Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-30T09:26:44.984371Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:26:44.984538Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-30T09:26:44.985173Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 FAKE_COORDINATOR: Erasing txId 1006 2025-06-30T09:26:44.985479Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:44.985488Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:44.985532Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:26:44.985557Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:26:44.985606Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:44.985612Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [49:209:2209], at schemeshard: 72057594046678944, txId: 1006, path id: 1 2025-06-30T09:26:44.985619Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [49:209:2209], at schemeshard: 72057594046678944, txId: 1006, path id: 5 2025-06-30T09:26:44.985624Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [49:209:2209], at schemeshard: 72057594046678944, txId: 1006, path id: 3 2025-06-30T09:26:44.985667Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1006:0, at schemeshard: 72057594046678944 2025-06-30T09:26:44.985676Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1006:0 ProgressState 2025-06-30T09:26:44.985692Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1006:0 progress is 1/1 2025-06-30T09:26:44.985697Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:26:44.985704Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1006:0 progress is 1/1 2025-06-30T09:26:44.985708Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:26:44.985713Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1006, ready parts: 1/1, is published: false 2025-06-30T09:26:44.985720Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:26:44.985726Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1006:0 2025-06-30T09:26:44.985731Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1006:0 2025-06-30T09:26:44.985746Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:26:44.985752Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:26:44.985758Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1006, publications: 3, subscribers: 0 2025-06-30T09:26:44.985763Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 1], 15 2025-06-30T09:26:44.985767Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-30T09:26:44.985771Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 5], 18446744073709551615 2025-06-30T09:26:44.985882Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:26:44.985893Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:26:44.985898Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:26:44.985903Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-30T09:26:44.985909Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-30T09:26:44.985998Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:44.986005Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:26:44.986018Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:26:44.986098Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:26:44.986108Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:26:44.986113Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:26:44.986118Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2025-06-30T09:26:44.986122Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:26:44.986397Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:26:44.986415Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:26:44.986421Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:26:44.986427Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:26:44.986436Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:26:44.986450Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1006, subscribers: 0 2025-06-30T09:26:44.987123Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-30T09:26:44.987263Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:26:44.987279Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-30T09:26:44.987348Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 TestModificationResult got TxId: 1006, wait until txId: 1006 TestWaitNotification wait txId: 1006 2025-06-30T09:26:44.987431Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1006: send EvNotifyTxCompletion 2025-06-30T09:26:44.987443Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1006 2025-06-30T09:26:44.987536Z node 49 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1006, at schemeshard: 72057594046678944 2025-06-30T09:26:44.987588Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1006: got EvNotifyTxCompletionResult 2025-06-30T09:26:44.987594Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1006: satisfy waiter [49:452:2441] TestWaitNotification: OK eventTxId 1006 |85.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |85.2%| [LD] {RESULT} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |85.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_reads_table[tables_format_v0-200] [GOOD] |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> TExternalTableTestReboots::CreateDroppedExternalTableAndDropWithReboots [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::DropExternalTableWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:26:32.215912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:32.215946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:32.215953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:32.215960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:32.215980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:32.215985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:32.215997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:32.216018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:32.216154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:32.216273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:32.235119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:26:32.235147Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:32.235278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:26:32.238992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:32.240243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:32.240297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:32.242271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:32.242332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:32.242479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:32.242540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:32.243544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:32.243606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:32.243907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:32.243923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:32.243933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:32.243942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:32.243948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:32.243980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:26:32.245539Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:26:32.268686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:32.268781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.268850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:32.268860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:32.268915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:32.268934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:32.272065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:32.272138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:32.272219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.272235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:32.272243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:32.272250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:32.273461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.273488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:32.273497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:32.274129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.274150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.274159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:32.274168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:32.275204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:32.275850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:32.275904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:32.276179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:32.276220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:45.693002Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:26:45.693016Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:26:45.693039Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:45.693046Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:207:2207], at schemeshard: 72057594046678944, txId: 1005, path id: 1 2025-06-30T09:26:45.693052Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:207:2207], at schemeshard: 72057594046678944, txId: 1005, path id: 5 2025-06-30T09:26:45.693061Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:207:2207], at schemeshard: 72057594046678944, txId: 1005, path id: 3 2025-06-30T09:26:45.693109Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-30T09:26:45.693115Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1005:0 ProgressState 2025-06-30T09:26:45.693125Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:26:45.693129Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:26:45.693133Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:26:45.693135Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:26:45.693139Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: false 2025-06-30T09:26:45.693143Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:26:45.693146Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1005:0 2025-06-30T09:26:45.693150Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1005:0 2025-06-30T09:26:45.693159Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:26:45.693163Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:26:45.693166Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1005, publications: 3, subscribers: 0 2025-06-30T09:26:45.693169Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 1], 15 2025-06-30T09:26:45.693172Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-30T09:26:45.693175Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 5], 18446744073709551615 2025-06-30T09:26:45.693227Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:26:45.693239Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:26:45.693244Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:26:45.693249Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-30T09:26:45.693257Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-30T09:26:45.693327Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:45.693332Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:26:45.693338Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:26:45.693385Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:26:45.693394Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:26:45.693398Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:26:45.693403Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2025-06-30T09:26:45.693407Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:26:45.693790Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:26:45.693822Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:26:45.693830Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:26:45.693837Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:26:45.693845Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:26:45.693866Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1005, subscribers: 0 2025-06-30T09:26:45.694704Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:26:45.694787Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:26:45.694967Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:26:45.695066Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 TestModificationResult got TxId: 1005, wait until txId: 1005 TestWaitNotification wait txId: 1005 2025-06-30T09:26:45.695140Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-30T09:26:45.695148Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-30T09:26:45.695240Z node 50 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-30T09:26:45.695263Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-30T09:26:45.695270Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [50:443:2432] TestWaitNotification: OK eventTxId 1005 2025-06-30T09:26:45.695362Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:45.695394Z node 50 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 46us result status StatusPathDoesNotExist 2025-06-30T09:26:45.695435Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::CreateDroppedExternalTableAndDropWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:26:33.132477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:33.132505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:33.132511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:33.132516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:33.132533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:33.132537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:33.132545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:33.132557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:33.132640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:33.132705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:33.145799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:26:33.145828Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:33.145947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:26:33.150085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:33.151246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:33.151289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:33.153287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:33.153339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:33.153461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:33.153518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:33.154789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:33.154851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:33.155121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:33.155131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:33.155140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:33.155148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:33.155155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:33.155184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:26:33.156874Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:26:33.180135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:33.180230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:33.180311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:33.180320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:33.180366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:33.180382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:33.181347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:33.181397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:33.181470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:33.181482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:33.181488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:33.181510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:33.182181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:33.182204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:33.182210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:33.182778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:33.182794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:33.182801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:33.182810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:33.183661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:33.187233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:33.187311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:33.187604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:33.187655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:46.359974Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:26:46.359993Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:26:46.360017Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:46.360022Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:210:2210], at schemeshard: 72057594046678944, txId: 1006, path id: 1 2025-06-30T09:26:46.360029Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:210:2210], at schemeshard: 72057594046678944, txId: 1006, path id: 5 2025-06-30T09:26:46.360033Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:210:2210], at schemeshard: 72057594046678944, txId: 1006, path id: 3 2025-06-30T09:26:46.360098Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1006:0, at schemeshard: 72057594046678944 2025-06-30T09:26:46.360108Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1006:0 ProgressState 2025-06-30T09:26:46.360122Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1006:0 progress is 1/1 2025-06-30T09:26:46.360127Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:26:46.360133Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1006:0 progress is 1/1 2025-06-30T09:26:46.360137Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:26:46.360142Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1006, ready parts: 1/1, is published: false 2025-06-30T09:26:46.360148Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-30T09:26:46.360153Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1006:0 2025-06-30T09:26:46.360158Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1006:0 2025-06-30T09:26:46.360172Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:26:46.360177Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:26:46.360183Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1006, publications: 3, subscribers: 0 2025-06-30T09:26:46.360193Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 1], 15 2025-06-30T09:26:46.360197Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-30T09:26:46.360201Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 5], 18446744073709551615 2025-06-30T09:26:46.360287Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:26:46.360299Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:26:46.360304Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:26:46.360308Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-30T09:26:46.360313Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-30T09:26:46.360406Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:46.360414Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-30T09:26:46.360425Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:26:46.360503Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:26:46.360514Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:26:46.360519Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:26:46.360523Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2025-06-30T09:26:46.360528Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:26:46.360917Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:26:46.360936Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-30T09:26:46.360945Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1006 2025-06-30T09:26:46.360950Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:26:46.360955Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:26:46.360970Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1006, subscribers: 0 2025-06-30T09:26:46.361901Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-30T09:26:46.361963Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:26:46.362010Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-30T09:26:46.362030Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 TestModificationResult got TxId: 1006, wait until txId: 1006 TestWaitNotification wait txId: 1006 2025-06-30T09:26:46.362104Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1006: send EvNotifyTxCompletion 2025-06-30T09:26:46.362115Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1006 2025-06-30T09:26:46.362200Z node 50 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1006, at schemeshard: 72057594046678944 2025-06-30T09:26:46.362220Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1006: got EvNotifyTxCompletionResult 2025-06-30T09:26:46.362226Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1006: satisfy waiter [50:449:2438] TestWaitNotification: OK eventTxId 1006 2025-06-30T09:26:46.362317Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:46.362350Z node 50 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 47us result status StatusPathDoesNotExist 2025-06-30T09:26:46.362393Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1163" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[update] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_fifo_queue_wo_postfix[tables_format_v0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::DefaultMeteringMode [GOOD] Test command err: 2025-06-30T09:22:53.268838Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670453554669256:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:53.268911Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:53.273670Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521670453831730485:2159];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:53.275066Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:22:53.306292Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:22:53.310750Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/002fd2/r3tmp/tmp1frTgF/pdisk_1.dat 2025-06-30T09:22:53.339963Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22760, node 1 2025-06-30T09:22:53.367436Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:53.367471Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:53.371588Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:53.386298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/002fd2/r3tmp/yandexynLT6X.tmp 2025-06-30T09:22:53.386317Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/002fd2/r3tmp/yandexynLT6X.tmp 2025-06-30T09:22:53.386406Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/002fd2/r3tmp/yandexynLT6X.tmp 2025-06-30T09:22:53.386468Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:22:53.394022Z INFO: TTestServer started on Port 16390 GrpcPort 22760 2025-06-30T09:22:53.408425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:53.408450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:53.410296Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:22:53.410683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16390 PQClient connected to localhost:22760 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:53.443676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:22:53.492015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-30T09:22:53.749775Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670453831730709:2275], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:53.749781Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7521670453831730696:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:53.749809Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:22:53.751290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:22:53.756705Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7521670453831730725:2276], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-30T09:22:53.847414Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521670453831730753:2168] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:22:53.853215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:53.853914Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7521670453831730767:2280], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:22:53.854016Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=2&id=ZTc1NzQwYmItNWRlNjgyNGItZDdiNTczYjctY2QxMjBhNTI=, ActorId: [2:7521670453831730694:2271], ActorState: ExecuteState, TraceId: 01jz02b15m53zfejw160gnk7q9, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:22:53.854561Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:22:53.854631Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521670453554670189:2303], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:22:53.854732Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NmZiMDU1MS01Y2M3YTFjYy0yYjJiMTY3My1iNTlkZDg0Nw==, ActorId: [1:7521670453554670163:2296], ActorState: ExecuteState, TraceId: 01jz02b177f4yyxbmsxqhrn2kh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:22:53.854925Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:22:53.917506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:22:54.022930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-30T09:22:54.071305Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720 ... 037892] TxId 281474976720673 State CALCULATING FrontTxId 281474976720673 2025-06-30T09:25:31.681540Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72075186224037892] Received 1, Expected 1 2025-06-30T09:25:31.681544Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976720673, NewState CALCULATED 2025-06-30T09:25:31.681546Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976720673 moved from CALCULATING to CALCULATED 2025-06-30T09:25:31.681550Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72075186224037892] write key for TxId 281474976720673 2025-06-30T09:25:31.681693Z node 29 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976720673] save tx TxId: 281474976720673 State: CALCULATED MinStep: 1751275531005 MaxStep: 18446744073709551615 Step: 1751275531726 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "ttt" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/ttt" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7521671107240260406 RawX2: 124554053796 } Partitions { Partition { PartitionId: 0 } } 2025-06-30T09:25:31.681727Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:25:31.682831Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:25:31.682845Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state CALCULATED 2025-06-30T09:25:31.682847Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976720673, State CALCULATED 2025-06-30T09:25:31.682851Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976720673 State CALCULATED FrontTxId 281474976720673 2025-06-30T09:25:31.682853Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976720673, NewState WAIT_RS 2025-06-30T09:25:31.682856Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976720673 moved from CALCULATED to WAIT_RS 2025-06-30T09:25:31.682869Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4027: [PQ: 72075186224037892] Send TEvTxProcessing::TEvReadSet to 0 receivers. Wait TEvTxProcessing::TEvReadSet from 0 senders. 2025-06-30T09:25:31.682873Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4521: [PQ: 72075186224037892] HaveParticipantsDecision 1 2025-06-30T09:25:31.682889Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976720673, NewState EXECUTING 2025-06-30T09:25:31.682891Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976720673 moved from WAIT_RS to EXECUTING 2025-06-30T09:25:31.682894Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72075186224037892] Received 0, Expected 1 2025-06-30T09:25:31.682920Z node 29 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1751275531726, TxId 281474976720673 2025-06-30T09:25:31.682984Z node 29 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:25:31.686059Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:25:31.686087Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:25:31.686119Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3583: [PQ: 72075186224037892] Handle TEvPQ::TEvTxCommitDone Step 1751275531726, TxId 281474976720673, Partition 0 2025-06-30T09:25:31.686127Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTING 2025-06-30T09:25:31.686130Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976720673, State EXECUTING 2025-06-30T09:25:31.686134Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976720673 State EXECUTING FrontTxId 281474976720673 2025-06-30T09:25:31.686138Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72075186224037892] Received 1, Expected 1 2025-06-30T09:25:31.686145Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4224: [PQ: 72075186224037892] TxId: 281474976720673 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-30T09:25:31.686150Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4555: [PQ: 72075186224037892] complete TxId 281474976720673 2025-06-30T09:25:31.686243Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72075186224037892] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "ttt" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/ttt" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } 2025-06-30T09:25:31.686260Z node 29 :PERSQUEUE NOTICE: pq_impl.cpp:1129: [PQ: 72075186224037892] metering mode METERING_MODE_REQUEST_UNITS 2025-06-30T09:25:31.686274Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4573: [PQ: 72075186224037892] delete partitions for TxId 281474976720673 2025-06-30T09:25:31.686277Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976720673, NewState EXECUTED 2025-06-30T09:25:31.686280Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976720673 moved from EXECUTING to EXECUTED 2025-06-30T09:25:31.686284Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72075186224037892] write key for TxId 281474976720673 2025-06-30T09:25:31.686345Z node 29 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976720673] save tx TxId: 281474976720673 State: EXECUTED MinStep: 1751275531005 MaxStep: 18446744073709551615 Step: 1751275531726 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "ttt" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/ttt" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7521671107240260406 RawX2: 124554053796 } Partitions { Partition { PartitionId: 0 } } 2025-06-30T09:25:31.686384Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:25:31.693526Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:25:31.693546Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-30T09:25:31.693561Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976720673, State EXECUTED 2025-06-30T09:25:31.693568Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976720673 State EXECUTED FrontTxId 281474976720673 2025-06-30T09:25:31.693572Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-30T09:25:31.693575Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976720673, NewState WAIT_RS_ACKS 2025-06-30T09:25:31.693579Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976720673 moved from EXECUTED to WAIT_RS_ACKS 2025-06-30T09:25:31.693586Z node 29 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976720673] PredicateAcks: 0/0 2025-06-30T09:25:31.693588Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-30T09:25:31.693590Z node 29 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976720673] PredicateAcks: 0/0 2025-06-30T09:25:31.693594Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037892] add an TxId 281474976720673 to the list for deletion 2025-06-30T09:25:31.693598Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976720673, NewState DELETING 2025-06-30T09:25:31.693605Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037892] delete key for TxId 281474976720673 2025-06-30T09:25:31.693621Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:25:31.694816Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:25:31.694830Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-30T09:25:31.694834Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976720673, State DELETING 2025-06-30T09:25:31.694838Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037892] delete TxId 281474976720673 2025-06-30T09:25:31.695104Z node 29 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-30T09:25:31.695149Z node 29 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ/ttt >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,10,100,0.5] [GOOD] >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,10,1000000,0] |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Table [GOOD] |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Query >> TExternalTableTestReboots::CreateExternalTableWithReboots [GOOD] |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::CreateExternalTableWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:26:30.398542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:30.398567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:30.398574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:30.398580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:30.398596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:30.398601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:30.398611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:30.398627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:30.398717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:30.398825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:30.413719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:26:30.413748Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:30.413860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:26:30.417269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:30.418284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:30.418324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:30.422465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:30.422530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:30.422668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:30.422732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:30.423999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:30.424074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:30.424408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:30.424420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:30.424432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:30.424443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:30.424450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:30.424486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:26:30.426701Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:26:30.449993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:30.450086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:30.450163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:30.450171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:30.450218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:30.450235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:30.451166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:30.451215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:30.451284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:30.451295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:30.451301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:30.451307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:30.451816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:30.451833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:30.451839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:30.452281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:30.452294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:30.452301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:30.452309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:30.452996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:30.453445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:30.453492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:30.453712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:30.453739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-30T09:26:48.701924Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:26:48.701951Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:48.701957Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [68:212:2212], at schemeshard: 72057594046678944, txId: 1004, path id: 4 2025-06-30T09:26:48.701984Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [68:212:2212], at schemeshard: 72057594046678944, txId: 1004, path id: 5 2025-06-30T09:26:48.701988Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [68:212:2212], at schemeshard: 72057594046678944, txId: 1004, path id: 5 2025-06-30T09:26:48.701992Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [68:212:2212], at schemeshard: 72057594046678944, txId: 1004, path id: 3 2025-06-30T09:26:48.702051Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-30T09:26:48.702062Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-30T09:26:48.702077Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:26:48.702082Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:26:48.702088Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1004:0 progress is 1/1 2025-06-30T09:26:48.702092Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:26:48.702097Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: false 2025-06-30T09:26:48.702102Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-30T09:26:48.702108Z node 68 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1004:0 2025-06-30T09:26:48.702112Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1004:0 2025-06-30T09:26:48.702126Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-30T09:26:48.702130Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:26:48.702135Z node 68 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1004, publications: 3, subscribers: 1 2025-06-30T09:26:48.702140Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-30T09:26:48.702144Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 4], 5 2025-06-30T09:26:48.702148Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 5], 2 2025-06-30T09:26:48.702404Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 5 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:26:48.702422Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 5 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:26:48.702427Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:26:48.702432Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 5 2025-06-30T09:26:48.702438Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:26:48.702813Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:26:48.702833Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:26:48.702839Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:26:48.702844Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 2 2025-06-30T09:26:48.702849Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-30T09:26:48.703040Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:26:48.703055Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-30T09:26:48.703059Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1004 2025-06-30T09:26:48.703064Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:26:48.703069Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:26:48.703082Z node 68 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1004, subscribers: 1 2025-06-30T09:26:48.703092Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [68:307:2296] 2025-06-30T09:26:48.703586Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:26:48.703897Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:26:48.703924Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-30T09:26:48.703941Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-30T09:26:48.703948Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [68:338:2327] TestWaitNotification: OK eventTxId 1002 TestWaitNotification: OK eventTxId 1003 TestWaitNotification: OK eventTxId 1004 2025-06-30T09:26:48.704095Z node 68 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirExternalTable/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-30T09:26:48.704144Z node 68 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirExternalTable/ExternalTable" took 60us result status StatusSuccess 2025-06-30T09:26:48.704262Z node 68 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirExternalTable/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 1004 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "a" Type: "Int32" TypeId: 1 Id: 1 NotNull: true } Columns { Name: "b" Type: "Int32" TypeId: 1 Id: 2 NotNull: true } Content: "" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_queues_count_over_limit[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,10,1000000,0] [GOOD] >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,10,1000000,0.5] >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |85.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |85.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tools/query_replay_yt/query_replay_yt |85.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/query_replay_yt/query_replay_yt |85.3%| [LD] {RESULT} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |85.3%| [LD] {RESULT} $(B)/ydb/tools/query_replay_yt/query_replay_yt |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[replace] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TA] $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/test-results/unittest/{meta.json ... results_accumulator.log} >> ListObjectsInS3Export::PagingParameters [GOOD] |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> ListObjectsInS3Export::ParametersValidation |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,10,1000000,0.5] [GOOD] >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch [GOOD] |85.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |85.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots >> TBackupTests::ShouldSucceedOnLargeData[Raw] [GOOD] >> ListObjectsInS3Export::ParametersValidation [GOOD] >> test_auditlog.py::test_dml_requests_logged_when_unauthorized >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Table [GOOD] |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,1000,0,0] |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> SpaceCheckForDiskReassign::Basic [GOOD] >> VDiskAssimilation::Test >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Query >> TBackupTests::ShouldSucceedOnLargeData[Zstd] [GOOD] >> TxUsage::WriteToTopic_Demo_27_Query [GOOD] >> KqpLimits::QueryExecTimeout [GOOD] >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,1000,0,0] [GOOD] >> test_auditlog.py::test_dynconfig >> test_auditlog.py::test_single_dml_query_logged[update] [GOOD] >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,1000,0,0.5] >> TxUsage::WriteToTopic_Demo_45_Table |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/test-results/unittest/{meta.json ... results_accumulator.log} |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:35.654527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:35.654557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.654574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:35.654581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:35.654587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:35.654592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:35.654603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.654619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:35.654776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:35.654904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:35.667585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:35.667612Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:35.671213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:35.671263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:35.671294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:35.673539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:35.673639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:35.673778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.673854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:35.674601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.674657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:35.674936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.674947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.674975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:35.674984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.674991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:35.675007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.676574Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:35.701245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:35.701332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.701410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:35.701419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:35.701482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:35.701498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:35.703816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.703889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:35.703964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.703987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:35.703994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:35.704000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:35.705612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.705644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:35.705654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:35.706362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.706382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.706390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.706398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:35.707309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:35.708553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:35.708608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:35.708863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.708909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:35.708919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.709018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:35.709030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.709069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:35.709084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:35.709677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.709689Z node 1 :FLAT_TX_SCHEMESHARD ... :3462:5423], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } REQUEST: PUT /data_00.csv?partNumber=100&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:29697 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: DB76E737-AFAD-4CA7-9142-B750AC4BBDF0 amz-sdk-request: attempt=1 content-length: 130 content-md5: Wyd1w7MZYbbZucaVvuRDAw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / partNumber=100&uploadId=1 / 130 2025-06-30T09:26:51.880704Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:592: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3463:5424], result# UploadPartResult { ETag: 5b2775c3b31961b6d9b9c695bee44303 } 2025-06-30T09:26:51.880746Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3462:5423] 2025-06-30T09:26:51.880753Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3463:5424], sender# [1:3462:5423], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv?partNumber=101&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:29697 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 4ACE1679-ADC4-4FE7-BF84-F196F95A8436 amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / partNumber=101&uploadId=1 / 0 2025-06-30T09:26:51.881353Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:592: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3463:5424], result# UploadPartResult { ETag: d41d8cd98f00b204e9800998ecf8427e } 2025-06-30T09:26:51.881361Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:3463:5424], success# 1, error# , multipart# 1, uploadId# 1 2025-06-30T09:26:51.882851Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:526: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [1:3463:5424], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [a59dd9a97cf3685e69093fb2d96653c6,bdbb215613239cb3a835fee1fe7e7ca3,cb38dbc776d5763f1926dfb22d508c87,3c430d66d07a0a4b1fa889f321fce197,43baf91083f286b60bf15e7786459cd9,90b5581bef612fa3bf9b38b336af405f,fd4869c26a12d22ee79256d778954d04,a9459bc28198b0b6bd67732c492fd740,697a3f8386ea1ff4e327de943224cb1a,614da0b4ec9464e69cd0c59909e80fbb,9b94eb3f67aa4c8a0bcbf546833ed966,fd45c3afacec641ad19e59d2b31aeba4,fd69678aecbc149601f58cf13c64d33e,90c09ab4923bc9f97f825d36e32bf362,c1586416a281a4cca2b2b4e333d9b079,f31908576272623f9f0a19bf774cde8e,6fe3b42388304d2af07c629aeb683581,7bc90eec21ca5bb3648e6a48e83c5730,8e1dda26de1af89bdffe2eefdcebea1d,14dc42d90caa1575bbfffa9dc8f21d66,92efb2368eecb32d4075c09294fde0b7,98efff5f7c7ecb42e7af65142ce05af9,6206c81807b3b9283b0173ee2c682100,616b431b91aedc9de4593321eb42ba96,9ae4762563ffdec596cc9ca4cb8913e1,946ebf2d95b4796ea2faee21f017be79,45834a9948bb4ab8b62d1894156d13ed,6ad3fe7286856927c1e00422bc8da697,ef89464d20eae46829e1bf557e4d04ce,f128e5de32097d205453080b01c94ac3,c13e650ee2cfcecfdf4f578a2e5b1c2d,fc26314711b25d20fc654cf59301b806,56f6f2c574fba86496a87a7dd5fab46c,c7951eace72cfe0f14f808173e07bc64,3d9ad3340e58b973eaf8d4f14ba3b0f9,fc41d6fdfb52389dda8b26d7a0a3a889,9974b6ae96ffd0b756acb67088e890f9,cde8a5604010abe8fccfa9492144036f,0364e048eaac35c26d48b0c5072b5255,aac5a84927124d6ae4931e2650c80d9f,eab068fe4ca35c2f3e35890bd727eb4f,bc3646bdbcbc7f97dcddf2202ea9421f,6d3f63d672eda4a4617c9e7589a68bfc,0401bade6c3031b5be872238520b993a,1c6405688f86423480173e3e316a20bd,52395f68e877cbb8d7115a247331b0a7,4b0673ac18058554d2c53bf9f99b34b2,87bc1b9e650b31e81a9ad2531e3ef9da,b29053c8cd093c8b92ad3954c42cb7be,faf1084f6b33b00e2e822d1d3c3f0083,eedec03ee8d7eda4654db7206ad0889e,be4469dd028d5519a67098055f25513f,a7afa9827ec27c565cff1ed505a06f4b,91fe8109d2ad934c4364d90c29aaba71,73b81ea00e11db12d66497d30eb48446,cce69ef69777afeab34eefa515abc7f4,4e4ac1a421353964356400b8be8e21da,32cd6083b12660bcd4062af08d89eb05,71957b9db37811c7680638b82dc6384b,a8787e692c423a2dfa07dd261e72790a,283838ab16206b27738ea6653110f833,88bf084fb3029f0d5c0705eece930d70,1ed2f9f7221f1718b81fdf2d846347dd,406706cfbc454922dcad50b9c534b8d1,dbb606c993d798974ed4f5c9ebf195ca,1a4a3868dc6fa26c6b019d237f9ea6f4,82660a3c6b576a1b3fea925f3c179a2e,d393db2749ae42e854e85eeec2ea3592,b42c92ad14ee0e5351fec7e5a045a91b,2c7af27f9dc77efbcbe71c2d7997d6e9,278aba62ab1d9e3ff16df2d82ac5f5c7,6b8380404a7e7ec95ad5f3941d5d404c,c9813b9fc1d6b5087e64849076edd0f8,160785e4dac02a91c43a497ee59eea06,db529a9ba22f60f404031cfe85e966e9,9b70af168e2d3769bd8bc4dffa3202ea,9ac39c3843b6621ace44acf430a59e06,4603ff564a46e93951f246ed18926071,66b85f35ee76a7f71f50e9aad56758de,1665c284ad04d6b893b69372bf8fc6b9,8c1c27ec88fb52f06de6e7516a392672,0a5f992db51277a05ec12f0d6459ef21,8debe3a6023155561cb0890fc05bd7fb,938ece258b7596f8eea7e82bc2b8f88c,767ca0dcf0b154fa3c818044bbfc58fd,914cc7165d994bb05824332ac120446f,ab0ece250f5959a510170ee07aa21b5d,8bf4b44d67f062026b0010a8a0b39cc0,e0aa13fa8246e68c18905d3abadfc44d,27b021b75b6a95f63ea27f7ec238c05f,673e661e4cfea1e431678dd9881c2a8c,f101b34943f1831ae8c0b46ffcb1c2d6,562b32a8142b29c1a88e507ab1981a6b,fdea4c6fc2befb44614992ca8bf34b21,b7c8ec6acc45b037978482996e910b75,aec72fbd2e171b798900b22897d00941,710ef5b5e8eba750b6acc9b32dff42a3,821c7e22ef9c22098171e7f837dcfcc8,aecc9f6d0e6f54e938a10d40fda96d7b,5b2775c3b31961b6d9b9c695bee44303,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:29697 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 04417CF7-0EFC-46B2-86E8-B84F1F7EC480 amz-sdk-request: attempt=1 content-length: 11529 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv / uploadId=1 2025-06-30T09:26:51.905456Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:623: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [1:3463:5424], result# CompleteMultipartUploadResult { Bucket: Key: data_00.csv ETag: 5d8c28efc812b445ddd02900ff3ee599 } 2025-06-30T09:26:51.905558Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3462:5423], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-30T09:26:51.908667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-30T09:26:51.908690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:26:51.908725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-30T09:26:51.908737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-30T09:26:51.908749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:51.908753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:51.908757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:26:51.908763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 129 -> 240 2025-06-30T09:26:51.908844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:51.909662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:51.909799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:51.909807Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:26:51.909818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:51.909822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:51.909826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:51.909830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:51.909834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:26:51.909853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:342:2319] message: TxId: 102 2025-06-30T09:26:51.909858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:51.909862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:26:51.909865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:26:51.909914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:26:51.910613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:26:51.910623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3446:5408] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:35.456628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:35.456660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.456666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:35.456671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:35.456678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:35.456682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:35.456690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:35.456703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:35.456801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:35.456856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:35.468112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:35.468140Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:35.471267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:35.471334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:35.471369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:35.473628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:35.473704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:35.473800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.473865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:35.474755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.474811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:35.475050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.475060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:35.475088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:35.475098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:35.475105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:35.475125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.476495Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:35.491831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:35.491903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.491965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:35.491972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:35.492017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:35.492028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:35.492892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.492933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:35.492990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.493007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:35.493011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:35.493015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:35.493459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.493469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:35.493476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:35.493888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.493898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:35.493903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.493909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:35.494415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:35.494879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:35.494915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:35.495065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:35.495086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:35.495092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.495150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:35.495155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:35.495181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:35.495190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:35.495690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:35.495699Z node 1 :FLAT_TX_SCHEMESHARD ... -06-30T09:26:51.585550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-30T09:26:51.585638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 128 -> 129 2025-06-30T09:26:51.585672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:26:51.588538Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:3463:5424], attempt# 0 2025-06-30T09:26:51.592996Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:3463:5424], sender# [1:3462:5423] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:4709 Accept: */* Connection: Upgrade, HTTP2-Settings2025-06-30T09:26:51.595543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:51.595567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: DB99CB31-3D94-4789-BF5F-982BCE8CBFFE amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-30T09:26:51.595649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:51.595655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-30T09:26:51.595846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:51.595857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:51.595939Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:3463:5424], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } FAKE_COORDINATOR: Erasing txId 102 2025-06-30T09:26:51.597060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:26:51.597087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-30T09:26:51.597092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-30T09:26:51.597101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-30T09:26:51.597109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-30T09:26:51.597152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:4709 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 244CFF68-82BB-447B-9443-72AFCEB07BFB amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-30T09:26:51.597846Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:3463:5424], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-06-30T09:26:51.597967Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3462:5423] 2025-06-30T09:26:51.598212Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3463:5424], sender# [1:3462:5423], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-06-30T09:26:51.598874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:4709 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 2CE1818E-12EC-4FC6-965B-1150716A69E9 amz-sdk-request: attempt=1 content-length: 740 content-md5: P/a/uWmNWYxyRT1pAtAE7A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 740 2025-06-30T09:26:51.599159Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:3463:5424], result# PutObjectResult { ETag: 3ff6bfb9698d598c72453d6902d004ec } 2025-06-30T09:26:51.599168Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:3463:5424], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-30T09:26:51.599253Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3462:5423], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-30T09:26:51.622674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-30T09:26:51.622703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:26:51.622731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-30T09:26:51.622765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-30T09:26:51.622783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:51.622787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:51.622791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:26:51.622798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 129 -> 240 2025-06-30T09:26:51.622882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:51.623826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:51.624002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:51.624013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:26:51.624028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:51.624033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:51.624037Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:51.624040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:51.624043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:26:51.624066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:342:2319] message: TxId: 102 2025-06-30T09:26:51.624083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:51.624088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:26:51.624092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:26:51.624149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:26:51.625004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:26:51.625015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3446:5408] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-30T09:26:36.711752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:36.711776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:36.711781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:36.711785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:36.711789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:36.711792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:36.711799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:36.711810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:36.711911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:36.711989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:36.724202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:26:36.724230Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:36.727410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:36.727476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:36.727516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:36.729958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:36.730049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:36.730197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:36.730304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:36.731120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:36.731182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:36.731492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:36.731507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:36.731533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:36.731543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:36.731549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:36.731571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-30T09:26:36.732995Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-30T09:26:36.750555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:36.750624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:36.750683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:36.750689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:36.750758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:36.750773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:36.751522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:36.751565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:36.751621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:36.751637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:36.751642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:36.751647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:36.752051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:36.752066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:36.752076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:36.752446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:36.752455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:36.752459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:36.752466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:36.752945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:36.753295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:36.753327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:36.753490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:36.753509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-30T09:26:36.753515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:36.753569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:26:36.753575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:36.753624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-30T09:26:36.753634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-30T09:26:36.753959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:36.753965Z node 1 :FLAT_TX_SCHEMESHARD ... :NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } REQUEST: PUT /data_00.csv.zst?partNumber=100&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:10781 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C4246E9F-CE76-4251-82F1-57A78C7247BB amz-sdk-request: attempt=1 content-length: 55 content-md5: B5SOCmjwb1RI3tHamcoRHA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv.zst / partNumber=100&uploadId=1 / 55 2025-06-30T09:26:52.638314Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:592: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3463:5424], result# UploadPartResult { ETag: 07948e0a68f06f5448ded1da99ca111c } 2025-06-30T09:26:52.638346Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3462:5423] 2025-06-30T09:26:52.638352Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3463:5424], sender# [1:3462:5423], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst?partNumber=101&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:10781 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A357F9D3-E87C-4E52-B2FB-C7D2AFAE29DA amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv.zst / partNumber=101&uploadId=1 / 0 2025-06-30T09:26:52.638669Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:592: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3463:5424], result# UploadPartResult { ETag: d41d8cd98f00b204e9800998ecf8427e } 2025-06-30T09:26:52.638674Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:3463:5424], success# 1, error# , multipart# 1, uploadId# 1 2025-06-30T09:26:52.639832Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:526: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [1:3463:5424], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [f8f51a1e4a70db44fa91cc2ab9680824,9eba675fd7f187274786dff2f47292df,921325fb6b8811df3d06a44dbe1f8523,4eeb6b90e8e61075275bd8a42f56bd69,2840a487abe8cb9502b3d9c8a8e1c942,607d8f6e3b235a360d63796efd3a51c2,ed22e08df7fb8840f7cabc779cc86885,efeff2c7731061edd9a39059cc078045,4af01cb3455932f28e3bba713dcd57c9,dc94d36ecf3b36d183d75c84b9b2fac6,e2ce425dd2bb582abcc13d0d714c3554,b71e46686939d2cdf046520dd2774281,ab731a82a161e5e044b24e895a1713d6,1df51aaec89711e13a6f95c13113e36c,b6066b2ed343831b1b0ee0076179981e,332d34d77adc2b024a33d87e07d4233f,cf0093cc99590a0e8f9c199ed6deca07,8cc923ec76224e69263ac93b7bfabd30,690d66897e0780f2dfe3614e5a659a22,7502aae0ec253663b1cbfdc8ede92ab9,7d2c6f728ee0c12097dfe5441970b946,5fc7b9b675e0a125eea67cf05f82627f,fc8c5faa99cc7f4ce7ca320f8e7adb58,8e305c5aca758683ff25407a7bbd9220,181bce9c6393e22a0ac359a7b45d8187,639677548f0a8b776a6db92f44d96505,390ff8f57cfa4c04bfbed0d7a63c90e8,3dd76756e6558fd6c8c918210f7dc136,a3f5254fdad3ded54edef910e704c151,e9186373f80dbaa55dd04d07621de277,8898b965060a431b499261ec0cd3cee3,3ed51c736e64defe04980ce328b17aa4,bb0e45971888796588c12ea1c1bec162,e2b3defa84005d3892986ca6894b811f,656c7c809c8c8485f6e91892591cd284,779c6827126f255bde25ae242bf4c8ff,8883fc9b073e683558f1231c5f2142d0,19390a0e3340bcb6ccfe866a790f05cb,305182d3e9745fba3aad1973bb1bfc93,002819d72a6dc7954ecc1bcd2bd20254,325c6bc3cdd6fd83083cf0126c606218,b86932903843b9626e80bd9ccb5d0571,b5054116537a7c467bdb488c9d67dee7,fc3a45bd17a00b147e4f9c55bc2493da,1118e2f41e8839211163250796a65dce,b403ff17c2c269a79201a03ce439dc2a,88f2692ee439cfadef1cd21d58aac8d3,e5bef12f89b101af84d52299a5867d99,ed613335180c53f69d450ef8b176a4d5,150fd7dcdc86eb38c7f821ff4698d8bc,a0c18bf08acc6ebecac04a2520efee9b,e8463d7ce8f502d1575a433c1b30a9af,f123e0fc879e2fdc2c3e2f698fc4176d,d7ab79d73e4648e0a2bf8dec3a19c019,4e74b82f6a8ea7fad8790ee7dfcdb76e,f72bb1d8aa0f5c9265bae10a3784d8e8,924b317371d16363a37962b17a2ae4bb,7214b458c7e25c791e54bd430b835a6e,e79dba1b56122372af3fe7b06ea91bda,6aae345b94d78fc7c1ed0b8697cf5e62,fd3636ed699facb5f0c12f81741cabc5,2c4a198408c3eb9577fcd339ca62c539,59fbf761f9b7574b65fa6877b167bb8c,14f9f5cfdf3a6c33c577a54429b19cb6,c6d078b3be9cd7943e8145fd982baeef,198f55ae25539fbd54a4a6075beac2d1,939123b44e362c76a151a85af0247fb7,0147f8bd741be7780cbc900b6f4b0899,43453200aeaf201420737354cd73cfe4,de26d1339779fe0c538d01d5963fd423,5c903650e719f959dc9f37ea360c6319,23607b3f36e0a2abae7f1ed8e38596f3,0db9af920c6d1cf868e470bf7a349747,aed6ac19c60d08500582eea9dadcdfee,3f4e37ddd3e2e56a725323fad4d85cf6,942b269af420b4277d025cea489dcb25,89eddc25ba615b6cf09b9cd9a11a16bb,1d8e7f0613dc1919ee90133c468380bd,8bf1e4c1266d8437c1bd85e0fca6640a,e9eabcf5b61cf257f530b156dbd77a88,411f1661ae7650d2144e8c6f8a33b28f,6706ec5b8771e555779d5cbeca41aa75,b3a33ef21a8224ddc78a52e8d7ca8357,58749d344f42c192e572eda4ee66fb01,381aeb5ee3014e2c0fd9b85bd59ce005,9aed2297cd10dce10d68de3ff1830b42,be88e095fc3a13708b714db03b1f2744,5628e81ee17fb22fc828ed1b2169578b,a1cfb563fa4af884fe02ced05c26c881,fc602b8ee2e9746fb52823f8fd1f0f28,a1de256e94c7baa9b8ab905c892d1a14,6bff895b0b5f3552ad4bdc61b0d24148,fcba1d258a8651d831767b42e010e439,bef6e3d7088e671809fe584531f96971,f0b489242271d11200dbdbc78e4ce715,372d2d6877fff7c04433e492ad4dbd45,32191cf1972dcccd59c0b5a8b53d4f23,25928b7997b97ac58f18fbbe589573e8,472e53a27497661c6400410909405c4e,07948e0a68f06f5448ded1da99ca111c,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv.zst?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:10781 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 73E030D9-77D7-497B-B537-9DCE81B07BB2 amz-sdk-request: attempt=1 content-length: 11529 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv.zst / uploadId=1 2025-06-30T09:26:52.641073Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:623: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [1:3463:5424], result# CompleteMultipartUploadResult { Bucket: Key: data_00.csv.zst ETag: c902b621cdd1ee89b9f1c4e6c36e6e45 } 2025-06-30T09:26:52.641142Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3462:5423], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-30T09:26:52.643546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5603: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-30T09:26:52.643563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1775: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-30T09:26:52.643585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:624: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-30T09:26:52.643596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-30T09:26:52.643607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:52.643611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:52.643615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-30T09:26:52.643621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 102:0 129 -> 240 2025-06-30T09:26:52.643662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:52.644348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:652: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:52.644442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-30T09:26:52.644448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-30T09:26:52.644459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:52.644463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:52.644467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-30T09:26:52.644469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:52.644472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-30T09:26:52.644484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1640: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:342:2319] message: TxId: 102 2025-06-30T09:26:52.644493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-30T09:26:52.644497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-30T09:26:52.644500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 102:0 2025-06-30T09:26:52.644524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-30T09:26:52.645146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-30T09:26:52.645160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3446:5408] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpLimits::QueryExecTimeout [GOOD] Test command err: Trying to start YDB, gRPC: 3322, MsgBus: 21305 2025-06-30T09:25:39.953505Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671169322138987:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:39.953542Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0041fc/r3tmp/tmpTjj0zy/pdisk_1.dat 2025-06-30T09:25:40.021817Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3322, node 1 2025-06-30T09:25:40.041211Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:40.041227Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:40.041229Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:40.041279Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:40.056347Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:40.056378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:40.057383Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21305 TClient is connected to server localhost:21305 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:40.123911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:40.147315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:40.237081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:40.275026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:40.295264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:40.526920Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671173617107874:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.526986Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.584506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.593015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.605089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.620130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.675931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.688485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.703505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:40.719890Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671173617108530:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.719931Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.720533Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671173617108535:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:40.721642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:40.729023Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671173617108537:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:40.788265Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671173617108588:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:40.955616Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:40.956772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:41.904586Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=YjZiNDEwNDYtYWI0N2YxZi0zMDcxYjM0MC01OTQ3NjdmNg==, ActorId: [1:7521671177912076903:2576], ActorState: ExecuteState, TraceId: 01jz02g5672nqxcdvtnk1s5gn7, Create QueryResponse for error on request, msg:
: Error: Query result size limit exceeded. (80001703 > 50331648), code: 2013 Trying to start YDB, gRPC: 9873, MsgBus: 18484 2025-06-30T09:25:42.180950Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671180740339868:2142];s ... ter 100ms
: Error: Cancelling after 98ms during execution Trying to start YDB, gRPC: 62071, MsgBus: 8782 2025-06-30T09:25:53.179573Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7521671227993614640:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:53.179591Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0041fc/r3tmp/tmphk9QGa/pdisk_1.dat 2025-06-30T09:25:53.198706Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62071, node 4 2025-06-30T09:25:53.211333Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:53.211350Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:53.211353Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:53.211407Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8782 TClient is connected to server localhost:8782 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:53.286069Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:53.286104Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:53.286189Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:53.287512Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:25:53.288481Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:53.293374Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.310373Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:53.340068Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:25:53.355689Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.590910Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671227993616204:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.590949Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.601599Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.615240Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.628243Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.643961Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.656339Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.670815Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.684783Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:53.756094Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671227993616863:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.756134Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671227993616868:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.756133Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:53.757032Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:53.759078Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671227993616870:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:25:53.815429Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671227993616921:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:54.183591Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:58.180254Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7521671227993614640:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:58.180367Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:26:08.195103Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7389: Cannot get console configs 2025-06-30T09:26:08.195123Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded
: Error: Query did not complete within specified timeout 500ms, session id ydb://session/3?node_id=4&id=MWNhMzM2ZjYtM2ZmYzAzN2EtNjc4NzM5YWItNzMxYjUwYjE= |85.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> TExternalTableTestReboots::DropReplacedExternalTableWithReboots [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue_with_unsupported_tables_format [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create[fifo] |85.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_backup/test-results/unittest/{meta.json ... results_accumulator.log} |85.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create[fifo] [GOOD] |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TA] $(B)/ydb/tests/fq/control_plane_storage/test-results/unittest/{meta.json ... results_accumulator.log} |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create[std] >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,1000,0,0.5] [GOOD] >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,1000,100,0.5] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create[std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::DropReplacedExternalTableWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:135:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:140:2058] recipient: [1:114:2144] 2025-06-30T09:26:32.620103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:26:32.620136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:32.620143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:26:32.620148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:26:32.620166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:26:32.620172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:26:32.620182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:26:32.620198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:26:32.620312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:26:32.620407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:26:32.638412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7739: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-30T09:26:32.638441Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:32.638568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2156] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-30T09:26:32.642232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:26:32.643383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:26:32.643447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-30T09:26:32.648790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:26:32.648864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:26:32.649030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:32.649100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-30T09:26:32.650327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:32.650401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:26:32.650713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:32.650729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:32.650766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:26:32.650778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:32.650786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:26:32.650823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:220:2058] recipient: [1:218:2217] Leader for TabletID 72057594037968897 is [1:224:2221] sender: [1:225:2058] recipient: [1:218:2217] 2025-06-30T09:26:32.652509Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:245:2058] recipient: [1:15:2062] 2025-06-30T09:26:32.670623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-30T09:26:32.670715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.670800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-30T09:26:32.670809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-30T09:26:32.670859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-30T09:26:32.670875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:32.671678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:32.671733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-30T09:26:32.671799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.671810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-30T09:26:32.671817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:26:32.671823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:26:32.672422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.672438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-30T09:26:32.672445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:26:32.672956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.672969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-30T09:26:32.672976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-30T09:26:32.672985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:26:32.673979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:26:32.674543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:26:32.674589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:139:2159] sender: [1:260:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-30T09:26:32.674906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-30T09:26:32.674942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 42949694 ... TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:26:54.424784Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1005:0 128 -> 240 2025-06-30T09:26:54.424813Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:26:54.424839Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:26:54.424851Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:26:54.425059Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:26:54.425084Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 FAKE_COORDINATOR: Erasing txId 1005 2025-06-30T09:26:54.425532Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-30T09:26:54.425541Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-30T09:26:54.425592Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-30T09:26:54.425613Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-30T09:26:54.425637Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-30T09:26:54.425643Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [83:210:2210], at schemeshard: 72057594046678944, txId: 1005, path id: 1 2025-06-30T09:26:54.425650Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [83:210:2210], at schemeshard: 72057594046678944, txId: 1005, path id: 4 2025-06-30T09:26:54.425656Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [83:210:2210], at schemeshard: 72057594046678944, txId: 1005, path id: 3 2025-06-30T09:26:54.425728Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-30T09:26:54.425736Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1005:0 ProgressState 2025-06-30T09:26:54.425751Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:26:54.425757Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:26:54.425764Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1005:0 progress is 1/1 2025-06-30T09:26:54.425769Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:26:54.425774Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: false 2025-06-30T09:26:54.425781Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-30T09:26:54.425787Z node 83 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1005:0 2025-06-30T09:26:54.425792Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1005:0 2025-06-30T09:26:54.425809Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-30T09:26:54.425815Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-30T09:26:54.425821Z node 83 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1005, publications: 3, subscribers: 0 2025-06-30T09:26:54.425827Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-30T09:26:54.425832Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-30T09:26:54.425837Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-30T09:26:54.425921Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:26:54.425934Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:26:54.425939Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:26:54.425945Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-30T09:26:54.425950Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-30T09:26:54.426037Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-30T09:26:54.426044Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-30T09:26:54.426054Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-30T09:26:54.426134Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:26:54.426146Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:26:54.426151Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:26:54.426156Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-30T09:26:54.426164Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-30T09:26:54.426507Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:26:54.426524Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-30T09:26:54.426529Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1005 2025-06-30T09:26:54.426535Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-30T09:26:54.426541Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-30T09:26:54.426555Z node 83 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1005, subscribers: 0 2025-06-30T09:26:54.427244Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:26:54.427300Z node 83 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-30T09:26:54.427488Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-30T09:26:54.427568Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 TestModificationResult got TxId: 1005, wait until txId: 1005 TestWaitNotification wait txId: 1005 2025-06-30T09:26:54.427633Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-30T09:26:54.427642Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-30T09:26:54.427725Z node 83 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-30T09:26:54.427747Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-30T09:26:54.427753Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [83:420:2409] TestWaitNotification: OK eventTxId 1005 >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[upsert] >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] >> test_auditlog.py::test_create_and_remove_tenant >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,1000,100,0.5] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[select] >> VDiskAssimilation::Test [GOOD] >> VDiskAssimilation::TestReverse >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] [GOOD] |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::DuplicationCompactionVariants[2,false,0,1000,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 20944, MsgBus: 7882 2025-06-30T09:26:43.222888Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671444351265858:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:43.223074Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003da1/r3tmp/tmpVmgaxm/pdisk_1.dat 2025-06-30T09:26:43.288727Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:43.289883Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671444351265743:2080] 1751275603221923 != 1751275603221926 TServer::EnableGrpc on GrpcPort 20944, node 1 2025-06-30T09:26:43.323137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:43.323187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:43.324478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:43.334970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:43.334986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:43.334989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:43.335046Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7882 TClient is connected to server localhost:7882 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:43.409559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2); 2025-06-30T09:26:43.740667Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671444351266367:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:43.740702Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:43.777431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:26:43.798099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671444351266438:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:43.798100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671444351266439:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:26:43.798153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671444351266439:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:26:43.798170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671444351266438:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:26:43.798246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671444351266439:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:26:43.798247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671444351266438:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:26:43.798286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671444351266439:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:26:43.798287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671444351266438:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:26:43.798313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671444351266439:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:26:43.798313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671444351266438:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:26:43.798337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671444351266438:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:26:43.798337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671444351266439:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:26:43.798365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671444351266439:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:26:43.798366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671444351266438:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:26:43.798396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671444351266439:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:26:43.798398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671444351266438:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:26:43.798423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671444351266438:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:26:43.798423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671444351266439:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:26:43.798456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671444351266438:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:26:43.798460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671444351266439:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:26:43.798488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671444351266439:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:26:43.798488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671444351266438:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:26:43.798513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7521671444351266439:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:26:43.798513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521671444351266438:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:26:43.799228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:26:43.799228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:26:43.799 ... 15587;max=18446744073709551615;plan=0;src=[7:7521671496849461731:2138];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:55.588241Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[7:7521671496849462108:2295];ev=NActors::IEventHandle;tablet_id=72075186224037889;tx_id=281474976715658;this=88899255550112;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1751275615587;max=18446744073709551615;plan=0;src=[7:7521671496849461731:2138];cookie=22:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:55.590104Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:55.590125Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-30T09:26:55.593222Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-30T09:26:55.593222Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `COMPACTION_PLANNER.CLASS_NAME`=`l-buckets`) 2025-06-30T09:26:55.597785Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671496849462195:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:55.597812Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:55.600961Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:55.603809Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:26:55.603809Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`) 2025-06-30T09:26:55.608114Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671496849462229:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:55.608140Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:55.610778Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:55.614106Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:26:55.614108Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`false`, `COLUMNS_LIMIT`=`0`, `SPARSED_DETECTOR_KFF`=`1000`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:26:55.618278Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671496849462263:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:55.618303Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:55.621252Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:26:55.627970Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-30T09:26:55.627975Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1"}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3"}')), (4u, JsonDocument('{"b" : "b4", "a" : "a4"}')) 2025-06-30T09:26:55.632609Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671496849462298:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:55.632633Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671496849462303:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:55.632640Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:55.633374Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:55.635488Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671496849462305:2329], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:26:55.695102Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671496849462356:2479] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:55.714679Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;local_tx_no=10;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037889;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:55.714679Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=10;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:55.720296Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715665;tx_id=281474976715665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715665; 2025-06-30T09:26:55.720400Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715665;tx_id=281474976715665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715665; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "1a1"}')), (2u, JsonDocument('{"a" : "1a2"}')), (3u, JsonDocument('{"b" : "1b3"}')) 2025-06-30T09:26:55.741518Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;local_tx_no=18;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037889;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:55.744884Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1) VALUES(2u) 2025-06-30T09:26:55.763287Z node 7 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;local_tx_no=24;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037889;tx_state=complete;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-30T09:26:55.766398Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; WAIT_COMPACTION: 0 2025-06-30T09:26:56.034522Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WAIT_COMPACTION: 0 WAIT_COMPACTION: 0 COMPACTION_HAPPENED: 0 -> 1 EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"1a1\"}"]];[2u;#];[3u;["{\"b\":\"1b3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4\"}"]]] OUTPUT: [[1u;["{\"a\":\"1a1\"}"]];[2u;#];[3u;["{\"b\":\"1b3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4\"}"]]] INDEX:0/0/0 HEADER:0/0/0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> ListObjectsInS3Export::ParametersValidation [GOOD] Test command err: 2025-06-30T09:23:02.619135Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670496267754506:2160];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:23:02.619223Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0026b2/r3tmp/tmpX7vWLG/pdisk_1.dat 2025-06-30T09:23:02.717803Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:23:02.721250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:23:02.721280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:23:02.722856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:23:02.735084Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 27142, node 1 2025-06-30T09:23:02.742939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:23:02.742952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:23:02.742954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:23:02.743001Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7554 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:23:02.781790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:23:03.078222Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670500562722615:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.078272Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.078648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521670500562722627:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:23:03.078777Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7521670496267754590:2140] Handle TEvProposeTransaction 2025-06-30T09:23:03.078785Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7521670496267754590:2140] TxId# 281474976715658 ProcessProposeTransaction 2025-06-30T09:23:03.078810Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7521670496267754590:2140] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7521670500562722630:2606] 2025-06-30T09:23:03.090986Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7521670500562722630:2606] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-30T09:23:03.091042Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7521670500562722630:2606] txid# 281474976715658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-30T09:23:03.091047Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7521670500562722630:2606] txid# 281474976715658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-30T09:23:03.091595Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7521670500562722630:2606] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-30T09:23:03.091615Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7521670500562722630:2606] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-30T09:23:03.091655Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7521670500562722630:2606] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-30T09:23:03.091705Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7521670500562722630:2606] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-30T09:23:03.091718Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7521670500562722630:2606] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-30T09:23:03.091770Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7521670500562722630:2606] txid# 281474976715658 HANDLE EvClientConnected 2025-06-30T09:23:03.091807Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [1:7521670500562722655:2612], Recipient [1:7521670496267754694:2196]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:03.091812Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:23:03.091815Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:23:03.091823Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 271122432, Sender [1:7521670500562722630:2606], Recipient [1:7521670496267754694:2196]: {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-30T09:23:03.091825Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-30T09:23:03.092696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: ".metadata/workload_manager/pools/default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } TxId: 281474976715658 TabletId: 72057594046644480 Owner: "metadata@system" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-30T09:23:03.092818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-30T09:23:03.092863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: .metadata, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-30T09:23:03.092879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-30T09:23:03.092895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976715658:0 type: TxMkDir target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-30T09:23:03.092913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-30T09:23:03.092919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976715658:1, at schemeshard: 72057594046644480 2025-06-30T09:23:03.092929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:349: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 2], parent name: .metadata, child name: workload_manager, child id: [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-30T09:23:03.092933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 0 2025-06-30T09:23:03.092936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 281474976715658:1 type: TxMkDir target ... , Sender [58:7521671477884328557:2344], Recipient [58:7521671477884326791:2212]: NKikimrExport.TEvGetExportRequest Request { Id: 281474976715664 } DatabaseName: "/Root" 2025-06-30T09:26:51.830402Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5062: StateWork, processing event TEvExport::TEvGetExportRequest 2025-06-30T09:26:51.830591Z node 58 :TX_PROXY DEBUG: rpc_get_operation.cpp:209: [GetExport] [58:7521671477884328557:2344] [0] Handle TEvExport::TEvGetExportResponse: record# Entry { Id: 281474976715664 Status: SUCCESS Progress: PROGRESS_DONE ExportToS3Settings { endpoint: "localhost:20622" scheme: HTTP bucket: "test_bucket" items { source_path: "dir1/Table1" destination_prefix: "Prefix/dir1/Table1" } items { source_path: "Table0" destination_prefix: "Prefix/Table0" } items { source_path: "dir1/dir2/Table2" destination_prefix: "Prefix/dir1/dir2/Table2" } source_path: "/Root" destination_prefix: "Prefix//" } ItemsProgress { parts_total: 1 parts_completed: 1 start_time { seconds: 1751275611 } end_time { seconds: 1751275611 } } ItemsProgress { parts_total: 1 parts_completed: 1 start_time { seconds: 1751275611 } end_time { seconds: 1751275611 } } ItemsProgress { parts_total: 1 parts_completed: 1 start_time { seconds: 1751275611 } end_time { seconds: 1751275611 } } StartTime { seconds: 1751275611 } EndTime { seconds: 1751275611 } } 2025-06-30T09:26:51.830860Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [58:7521671477884328560:3547], Recipient [58:7521671477884326791:2212]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:26:51.830870Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:26:51.830872Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:26:51.831617Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:57: [ListObjectsInS3Export] [58:7521671477884328561:2345] Resolve database: name# /Root 2025-06-30T09:26:51.831724Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:73: [ListObjectsInS3Export] [58:7521671477884328561:2345] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:26:51.831733Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:134: [ListObjectsInS3Export] [58:7521671477884328561:2345] Send request: schemeShardId# 72057594046644480 2025-06-30T09:26:51.831801Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [58:7521671477884328564:3549], Recipient [58:7521671477884326791:2212]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:26:51.831808Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:26:51.831810Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:26:51.831846Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 275251210, Sender [58:7521671477884328561:2345], Recipient [58:7521671477884326791:2212]: NKikimrImport.TEvListObjectsInS3ExportRequest OperationParams { } Settings { endpoint: "localhost:20622" scheme: HTTP bucket: "test_bucket" access_key: "test_key" secret_key: "test_secret" } PageSize: 0 PageToken: "" 2025-06-30T09:26:51.831854Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5077: StateWork, processing event TEvImport::TEvListObjectsInS3ExportRequest 2025-06-30T09:26:51.831896Z node 58 :IMPORT INFO: schemeshard_import_getters.cpp:1340: Reply: self# [58:7521671477884328565:3550], status# 400010, error# Empty S3 prefix specified 2025-06-30T09:26:51.831927Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:156: [ListObjectsInS3Export] [58:7521671477884328561:2345] Handle TListObjectsInS3ExportRPC::TEvListObjectsInS3ExportResponse: record# Status: BAD_REQUEST Issues { message: "Empty S3 prefix specified" } 2025-06-30T09:26:51.832105Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [58:7521671477884328564:3549], Recipient [58:7521671477884326791:2212]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:26:51.832126Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:26:51.832132Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:26:51.832538Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:57: [ListObjectsInS3Export] [58:7521671477884328566:2346] Resolve database: name# /Root 2025-06-30T09:26:51.832622Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:73: [ListObjectsInS3Export] [58:7521671477884328566:2346] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:26:51.832643Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:134: [ListObjectsInS3Export] [58:7521671477884328566:2346] Send request: schemeShardId# 72057594046644480 2025-06-30T09:26:51.832704Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [58:7521671477884328569:3552], Recipient [58:7521671477884326791:2212]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:26:51.832712Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:26:51.832714Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:26:51.832740Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 275251210, Sender [58:7521671477884328566:2346], Recipient [58:7521671477884326791:2212]: NKikimrImport.TEvListObjectsInS3ExportRequest OperationParams { } Settings { endpoint: "localhost:20622" scheme: HTTP bucket: "test_bucket" access_key: "test_key" secret_key: "test_secret" prefix: "Prefix" } PageSize: -42 PageToken: "" 2025-06-30T09:26:51.832748Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5077: StateWork, processing event TEvImport::TEvListObjectsInS3ExportRequest 2025-06-30T09:26:51.832774Z node 58 :IMPORT INFO: schemeshard_import_getters.cpp:1340: Reply: self# [58:7521671477884328570:3553], status# 400010, error# Page size should be greater than or equal to 0 2025-06-30T09:26:51.832795Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:156: [ListObjectsInS3Export] [58:7521671477884328566:2346] Handle TListObjectsInS3ExportRPC::TEvListObjectsInS3ExportResponse: record# Status: BAD_REQUEST Issues { message: "Page size should be greater than or equal to 0" } 2025-06-30T09:26:51.832939Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [58:7521671477884328569:3552], Recipient [58:7521671477884326791:2212]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:26:51.832948Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:26:51.832950Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-30T09:26:51.833759Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:57: [ListObjectsInS3Export] [58:7521671477884328571:2347] Resolve database: name# /Root 2025-06-30T09:26:51.833861Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:73: [ListObjectsInS3Export] [58:7521671477884328571:2347] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-30T09:26:51.833871Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:134: [ListObjectsInS3Export] [58:7521671477884328571:2347] Send request: schemeShardId# 72057594046644480 2025-06-30T09:26:51.833927Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877761, Sender [58:7521671477884328574:3555], Recipient [58:7521671477884326791:2212]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-30T09:26:51.833935Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5057: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-30T09:26:51.833937Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5844: Pipe server connected, at tablet: 72057594046644480 2025-06-30T09:26:51.833961Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 275251210, Sender [58:7521671477884328571:2347], Recipient [58:7521671477884326791:2212]: NKikimrImport.TEvListObjectsInS3ExportRequest OperationParams { } Settings { endpoint: "localhost:20622" scheme: HTTP bucket: "test_bucket" access_key: "test_key" secret_key: "test_secret" prefix: "Prefix" } PageSize: 42 PageToken: "incorrect page token" 2025-06-30T09:26:51.833964Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5077: StateWork, processing event TEvImport::TEvListObjectsInS3ExportRequest 2025-06-30T09:26:51.833986Z node 58 :IMPORT INFO: schemeshard_import_getters.cpp:1340: Reply: self# [58:7521671477884328575:3556], status# 400010, error# Failed to parse page token 2025-06-30T09:26:51.834012Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:156: [ListObjectsInS3Export] [58:7521671477884328571:2347] Handle TListObjectsInS3ExportRPC::TEvListObjectsInS3ExportResponse: record# Status: BAD_REQUEST Issues { message: "Failed to parse page token" } 2025-06-30T09:26:51.834129Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4960: StateWork, received event# 269877764, Sender [58:7521671477884328574:3555], Recipient [58:7521671477884326791:2212]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:26:51.834138Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-30T09:26:51.834140Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5892: Server pipe is reset, at schemeshard: 72057594046644480 >> test_auditlog.py::test_dml_requests_logged_when_unauthorized [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] >> test_auditlog.py::test_dml_begin_commit_logged >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Query [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[replace] [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Table >> TxUsage::WriteToTopic_Demo_45_Table [GOOD] >> TxUsage::WriteToTopic_Demo_45_Query |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |85.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_reads_table[tables_format_v0-200] [GOOD] Test command err: contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=531985) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=531985) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=531985) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_fifo_queue_wo_postfix[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |85.5%| [TA] {RESULT} $(B)/ydb/tests/fq/control_plane_storage/test-results/unittest/{meta.json ... results_accumulator.log} >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[upsert] [GOOD] >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Query [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Table >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1-pk_types9-all_types9-index9-Uint8] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types_float-pk_types5-all_types5-index5-Float] |85.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/test-results/unittest/{meta.json ... results_accumulator.log} >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3_float-pk_types1-all_types1-index1-Float] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3_float-pk_types1-all_types1-index1-Float] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types-pk_types11-all_types11-index11-Uint8] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0_float-pk_types4-all_types4-index4-Float] >> test_auditlog.py::test_single_dml_query_logged[select] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1-pk_types15-all_types15-index15-Int8] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2_float-pk_types2-all_types2-index2-Float] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types_float-pk_types5-all_types5-index5-Float] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types-pk_types17-all_types17-index17-Int8] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0-pk_types10-all_types10-index10-Uint8] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3-pk_types7-all_types7-index7-Uint8] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1_float-pk_types3-all_types3-index3-Float] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2-pk_types14-all_types14-index14-Int8] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1-pk_types9-all_types9-index9-Uint8] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3-pk_types13-all_types13-index13-Int8] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4-pk_types12-all_types12-index12-Int8] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types-pk_types17-all_types17-index17-Int8] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0-pk_types10-all_types10-index10-Uint8] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[update] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b9b/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk21/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.update/audit.txt 2025-06-30T09:26:53.548940Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:26:53.548923Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-30T09:26:53.523028Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |85.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/test-results/unittest/{meta.json ... results_accumulator.log} |85.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dynconfig [GOOD] |85.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_auditlog.py::test_create_and_remove_tenant [GOOD] >> VDiskAssimilation::TestReverse [GOOD] >> VDiskMalfunction::StuckInternalQueues >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Table [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b7c/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk8/testing_out_stuff/test_auditlog.py.test_cloud_ids_are_logged.attrs0/audit.txt 2025-06-30T09:26:55.729923Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","cloud_id":"cloud-id-A","end_time":"2025-06-30T09:26:55.729904Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-30T09:26:55.702595Z","subject":"root@builtin","detailed_status":"SUCCESS","resource_id":"database-id-C","operation":"ExecuteDataQueryRequest","folder_id":"folder-id-B","component":"grpc-proxy"} >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Query >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0-pk_types16-all_types16-index16-Int8] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[replace] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b6f/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk19/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.replace/audit.txt 2025-06-30T09:26:59.822120Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:26:59.822103Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-06-30T09:26:59.808632Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> test_auditlog.py::test_dml_begin_commit_logged [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4-pk_types12-all_types12-index12-Int8] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b6b/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk1/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_bad_auth-_good_dynconfig/audit.txt 2025-06-30T09:26:59.969944Z: {"sanitized_token":"**** (C877DF61)","subject":"__bad__@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2-pk_types14-all_types14-index14-Int8] >> TxUsage::WriteToTopic_Demo_45_Query [GOOD] >> TxUsage::WriteToTopic_Demo_46_Table >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected [GOOD] >> VDiskMalfunction::StuckInternalQueues [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b56/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk3/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_no_auth-_good_dynconfig/audit.txt 2025-06-30T09:27:00.952277Z: {"sanitized_token":"{none}","subject":"{none}","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b50/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk6/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_root-_bad_dynconfig/audit.txt 2025-06-30T09:26:57.409803Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"ERROR","subject":"root@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2_float-pk_types2-all_types2-index2-Float] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4_float-pk_types0-all_types0-index0-Float] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> VDiskMalfunction::StuckInternalQueues [GOOD] Test command err: RandomSeed# 16765379519304962128 2025-06-30T09:19:10.052956Z 1 00h01m00.010512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [100:1:1:0:0:0:1] Marker# BSVS44 2025-06-30T09:19:10.158585Z 1 00h01m00.010512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMultiPut: blob size cannot be 0; id# [100:1:1:0:0:0:1] Marker# BSVS44 2025-06-30T09:19:10.158611Z 1 00h01m00.010512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMultiPut: blob size cannot be 0; id# [100:1:1:0:1:0:1] Marker# BSVS44 2025-06-30T09:19:10.158617Z 1 00h01m00.010512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMultiPut: blob size cannot be 0; id# [100:1:1:0:2:0:1] Marker# BSVS44 2025-06-30T09:19:10.158622Z 1 00h01m00.010512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMultiPut: blob size cannot be 0; id# [100:1:1:0:3:0:1] Marker# BSVS44 2025-06-30T09:19:10.158626Z 1 00h01m00.010512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMultiPut: blob size cannot be 0; id# [100:1:1:0:4:0:1] Marker# BSVS44 2025-06-30T09:19:10.158632Z 1 00h01m00.010512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMultiPut: blob size cannot be 0; id# [100:1:1:0:5:0:1] Marker# BSVS44 2025-06-30T09:19:10.158636Z 1 00h01m00.010512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMultiPut: blob size cannot be 0; id# [100:1:1:0:6:0:1] Marker# BSVS44 2025-06-30T09:19:10.158642Z 1 00h01m00.010512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMultiPut: blob size cannot be 0; id# [100:1:1:0:7:0:1] Marker# BSVS44 2025-06-30T09:19:10.158647Z 1 00h01m00.010512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMultiPut: blob size cannot be 0; id# [100:1:1:0:8:0:1] Marker# BSVS44 2025-06-30T09:19:10.158652Z 1 00h01m00.010512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMultiPut: blob size cannot be 0; id# [100:1:1:0:9:0:1] Marker# BSVS44 2025-06-30T09:19:10.661667Z 1 00h01m00.010512s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) failed to read data from PDisk SelfId# [1:5585:840] Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} BlobId# [1:1:1:0:0:8388608:0] Parts# 0 0 0 1 0 0 Status# CORRUPTED 2025-06-30T09:19:10.717739Z 2 00h01m00.010512s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) failed to read data from PDisk SelfId# [2:5707:654] Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} BlobId# [1:1:1:0:0:8388608:0] Parts# 0 0 0 0 1 0 Status# CORRUPTED 2025-06-30T09:19:10.745171Z 3 00h01m00.010512s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) failed to read data from PDisk SelfId# [3:5814:658] Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} BlobId# [1:1:1:0:0:8388608:0] Parts# 0 0 0 0 0 1 Status# CORRUPTED 2025-06-30T09:19:10.813998Z 6 00h01m00.010512s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) failed to read data from PDisk SelfId# [6:6053:680] Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} BlobId# [1:1:1:0:0:8388608:0] Parts# 1 0 0 0 0 0 Status# CORRUPTED 2025-06-30T09:19:10.854664Z 7 00h01m00.010512s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) failed to read data from PDisk SelfId# [7:6157:686] Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} BlobId# [1:1:1:0:0:8388608:0] Parts# 0 1 0 0 0 0 Status# CORRUPTED 2025-06-30T09:19:10.920397Z 8 00h01m00.010512s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) failed to read data from PDisk SelfId# [8:6261:692] Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} BlobId# [1:1:1:0:0:8388608:0] Parts# 0 0 1 0 0 0 Status# CORRUPTED 2025-06-30T09:19:11.244498Z 1 00h02m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) huge blob read Id# [1:1:1:0:0:8388608:0] Local# 0 0 0 1 0 0 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.245454Z 2 00h02m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) huge blob read Id# [1:1:1:0:0:8388608:0] Local# 0 0 0 0 1 0 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.246306Z 3 00h02m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) huge blob read Id# [1:1:1:0:0:8388608:0] Local# 0 0 0 0 0 1 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.247268Z 6 00h02m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) huge blob read Id# [1:1:1:0:0:8388608:0] Local# 1 0 0 0 0 0 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.247865Z 7 00h02m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) huge blob read Id# [1:1:1:0:0:8388608:0] Local# 0 1 0 0 0 0 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.248421Z 8 00h02m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) huge blob read Id# [1:1:1:0:0:8388608:0] Local# 0 0 1 0 0 0 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.292508Z 1 00h02m00.060512s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) failed to read data from PDisk SelfId# [1:10653:1949] Location# {ChunkIdx: 1 Offset: 4243456 Size: 2097157} BlobId# [1:1:2:0:0:8388608:0] Parts# 0 0 0 0 1 0 Status# CORRUPTED 2025-06-30T09:19:11.320609Z 2 00h02m00.060512s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) failed to read data from PDisk SelfId# [2:10760:1215] Location# {ChunkIdx: 1 Offset: 4243456 Size: 2097157} BlobId# [1:1:2:0:0:8388608:0] Parts# 0 0 0 0 0 1 Status# CORRUPTED 2025-06-30T09:19:11.467324Z 5 00h02m00.060512s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) failed to read data from PDisk SelfId# [5:11008:1220] Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} BlobId# [1:1:2:0:0:8388608:0] Parts# 1 0 0 0 0 0 Status# CORRUPTED 2025-06-30T09:19:11.550789Z 6 00h02m00.060512s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) failed to read data from PDisk SelfId# [6:11112:1254] Location# {ChunkIdx: 1 Offset: 4243456 Size: 2097157} BlobId# [1:1:2:0:0:8388608:0] Parts# 0 1 0 0 0 0 Status# CORRUPTED 2025-06-30T09:19:11.588295Z 7 00h02m00.060512s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) failed to read data from PDisk SelfId# [7:11216:1259] Location# {ChunkIdx: 1 Offset: 4243456 Size: 2097157} BlobId# [1:1:2:0:0:8388608:0] Parts# 0 0 1 0 0 0 Status# CORRUPTED 2025-06-30T09:19:11.653756Z 8 00h02m00.060512s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) failed to read data from PDisk SelfId# [8:11320:1265] Location# {ChunkIdx: 1 Offset: 4243456 Size: 2097157} BlobId# [1:1:2:0:0:8388608:0] Parts# 0 0 0 1 0 0 Status# CORRUPTED 2025-06-30T09:19:11.823188Z 1 00h03m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) huge blob read Id# [1:1:2:0:0:8388608:0] Local# 0 0 0 0 1 0 Location# {ChunkIdx: 1 Offset: 4243456 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.823827Z 1 00h03m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) huge blob read Id# [1:1:1:0:0:8388608:0] Local# 0 0 0 1 0 0 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.824577Z 2 00h03m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) huge blob read Id# [1:1:2:0:0:8388608:0] Local# 0 0 0 0 0 1 Location# {ChunkIdx: 1 Offset: 4243456 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.825021Z 2 00h03m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) huge blob read Id# [1:1:1:0:0:8388608:0] Local# 0 0 0 0 1 0 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.825518Z 3 00h03m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) huge blob read Id# [1:1:1:0:0:8388608:0] Local# 0 0 0 0 0 1 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.826207Z 5 00h03m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) huge blob read Id# [1:1:2:0:0:8388608:0] Local# 1 0 0 0 0 0 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.826655Z 6 00h03m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) huge blob read Id# [1:1:2:0:0:8388608:0] Local# 0 1 0 0 0 0 Location# {ChunkIdx: 1 Offset: 4243456 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.826892Z 6 00h03m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) huge blob read Id# [1:1:1:0:0:8388608:0] Local# 1 0 0 0 0 0 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.827308Z 7 00h03m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) huge blob read Id# [1:1:2:0:0:8388608:0] Local# 0 0 1 0 0 0 Location# {ChunkIdx: 1 Offset: 4243456 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.827509Z 7 00h03m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) huge blob read Id# [1:1:1:0:0:8388608:0] Local# 0 1 0 0 0 0 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.827920Z 8 00h03m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) huge blob read Id# [1:1:2:0:0:8388608:0] Local# 0 0 0 1 0 0 Location# {ChunkIdx: 1 Offset: 4243456 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.828485Z 8 00h03m00.000000s :BS_VDISK_SCRUB ERROR: {VDS21@scrub_actor_huge_blob_merger.h:55} PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) huge blob read Id# [1:1:1:0:0:8388608:0] Local# 0 0 1 0 0 0 Location# {ChunkIdx: 1 Offset: 0 Size: 2097157} IsReadable# false 2025-06-30T09:19:11.878686Z 1 00h03m00.100000s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) failed to read data from PDisk SelfId# [1:15704:3042] Location# {ChunkIdx: 1 Offset: 8486912 Size: 2097157} BlobId# [1:1:3:0:0:8388608:0] Parts# 0 0 0 0 0 1 Status# CORRUPTED 2025-06-30T09:19:11.925605Z 4 00h03m00.100000s :BS_VDISK_SCRUB WARN: {VDS26@restore_corrupted_blob_actor.cpp:190} PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) failed to read data from PDisk SelfId# [4:1 ... 4.482792Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE INFO: [423ce154595e5449] bootstrap ActorId# [1:164013:163689] Group# 2181038080 RestartCounter# 0 Marker# BPA01 2025-06-30T09:27:04.482802Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [423ce154595e5449] Request orderNumber# 0 VDiskId# [82000000:1:0:0:0] Marker# BPA03 2025-06-30T09:27:04.483308Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [423ce154595e5449] Handle TEvVAssimilateResult Status# OK ErrorReason# '' VDiskId# [82000000:1:0:0:0] Blocks# 10000 Barriers# 0 Blobs# 0 RequestsInFlight# 1 Marker# BPA02 2025-06-30T09:27:04.483626Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [423ce154595e5449] SendResponseAndDie (10k) Marker# BPA05 2025-06-30T09:27:04.484690Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE INFO: [e2970b77b789b7ff] bootstrap ActorId# [1:164015:163691] Group# 2181038080 RestartCounter# 0 Marker# BPA01 2025-06-30T09:27:04.484700Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [e2970b77b789b7ff] Request orderNumber# 0 VDiskId# [82000000:1:0:0:0] Marker# BPA03 2025-06-30T09:27:04.490857Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [e2970b77b789b7ff] Handle TEvVAssimilateResult Status# OK ErrorReason# '' VDiskId# [82000000:1:0:0:0] Blocks# 1717 Barriers# 100 Blobs# 8183 RequestsInFlight# 1 Marker# BPA02 2025-06-30T09:27:04.491466Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [e2970b77b789b7ff] SendResponseAndDie (10k) Marker# BPA05 2025-06-30T09:27:04.493489Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE INFO: [00017e285308d076] bootstrap ActorId# [1:164017:163693] Group# 2181038080 RestartCounter# 0 Marker# BPA01 2025-06-30T09:27:04.493511Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [00017e285308d076] Request orderNumber# 0 VDiskId# [82000000:1:0:0:0] Marker# BPA03 2025-06-30T09:27:04.494548Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [00017e285308d076] Handle TEvVAssimilateResult Status# OK ErrorReason# '' VDiskId# [82000000:1:0:0:0] Blocks# 0 Barriers# 0 Blobs# 10000 RequestsInFlight# 1 Marker# BPA02 2025-06-30T09:27:04.495124Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [00017e285308d076] SendResponseAndDie (10k) Marker# BPA05 2025-06-30T09:27:04.497165Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE INFO: [931b0887be18d038] bootstrap ActorId# [1:164019:163695] Group# 2181038080 RestartCounter# 0 Marker# BPA01 2025-06-30T09:27:04.497184Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [931b0887be18d038] Request orderNumber# 0 VDiskId# [82000000:1:0:0:0] Marker# BPA03 2025-06-30T09:27:04.498141Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [931b0887be18d038] Handle TEvVAssimilateResult Status# OK ErrorReason# '' VDiskId# [82000000:1:0:0:0] Blocks# 0 Barriers# 0 Blobs# 10000 RequestsInFlight# 1 Marker# BPA02 2025-06-30T09:27:04.498626Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [931b0887be18d038] SendResponseAndDie (10k) Marker# BPA05 2025-06-30T09:27:04.500571Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE INFO: [05505abce6e7a554] bootstrap ActorId# [1:164021:163697] Group# 2181038080 RestartCounter# 0 Marker# BPA01 2025-06-30T09:27:04.500592Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [05505abce6e7a554] Request orderNumber# 0 VDiskId# [82000000:1:0:0:0] Marker# BPA03 2025-06-30T09:27:04.501530Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [05505abce6e7a554] Handle TEvVAssimilateResult Status# OK ErrorReason# '' VDiskId# [82000000:1:0:0:0] Blocks# 0 Barriers# 0 Blobs# 10000 RequestsInFlight# 1 Marker# BPA02 2025-06-30T09:27:04.502019Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [05505abce6e7a554] SendResponseAndDie (10k) Marker# BPA05 2025-06-30T09:27:04.503783Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE INFO: [b12030b7e1c639cc] bootstrap ActorId# [1:164023:163699] Group# 2181038080 RestartCounter# 0 Marker# BPA01 2025-06-30T09:27:04.503802Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [b12030b7e1c639cc] Request orderNumber# 0 VDiskId# [82000000:1:0:0:0] Marker# BPA03 2025-06-30T09:27:04.504717Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [b12030b7e1c639cc] Handle TEvVAssimilateResult Status# OK ErrorReason# '' VDiskId# [82000000:1:0:0:0] Blocks# 0 Barriers# 0 Blobs# 10000 RequestsInFlight# 1 Marker# BPA02 2025-06-30T09:27:04.505177Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [b12030b7e1c639cc] SendResponseAndDie (10k) Marker# BPA05 2025-06-30T09:27:04.506776Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE INFO: [78f107e93c522eff] bootstrap ActorId# [1:164025:163701] Group# 2181038080 RestartCounter# 0 Marker# BPA01 2025-06-30T09:27:04.506792Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [78f107e93c522eff] Request orderNumber# 0 VDiskId# [82000000:1:0:0:0] Marker# BPA03 2025-06-30T09:27:04.507692Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [78f107e93c522eff] Handle TEvVAssimilateResult Status# OK ErrorReason# '' VDiskId# [82000000:1:0:0:0] Blocks# 0 Barriers# 0 Blobs# 10000 RequestsInFlight# 1 Marker# BPA02 2025-06-30T09:27:04.508163Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [78f107e93c522eff] SendResponseAndDie (10k) Marker# BPA05 2025-06-30T09:27:04.509690Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE INFO: [7f051f9fe81a11d0] bootstrap ActorId# [1:164027:163703] Group# 2181038080 RestartCounter# 0 Marker# BPA01 2025-06-30T09:27:04.509706Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [7f051f9fe81a11d0] Request orderNumber# 0 VDiskId# [82000000:1:0:0:0] Marker# BPA03 2025-06-30T09:27:04.510599Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [7f051f9fe81a11d0] Handle TEvVAssimilateResult Status# OK ErrorReason# '' VDiskId# [82000000:1:0:0:0] Blocks# 0 Barriers# 0 Blobs# 10000 RequestsInFlight# 1 Marker# BPA02 2025-06-30T09:27:04.511084Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [7f051f9fe81a11d0] SendResponseAndDie (10k) Marker# BPA05 2025-06-30T09:27:04.512519Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE INFO: [ff94a4292555bd5d] bootstrap ActorId# [1:164029:163705] Group# 2181038080 RestartCounter# 0 Marker# BPA01 2025-06-30T09:27:04.512534Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [ff94a4292555bd5d] Request orderNumber# 0 VDiskId# [82000000:1:0:0:0] Marker# BPA03 2025-06-30T09:27:04.513256Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [ff94a4292555bd5d] Handle TEvVAssimilateResult Status# OK ErrorReason# '' VDiskId# [82000000:1:0:0:0] Blocks# 0 Barriers# 0 Blobs# 9015 RequestsInFlight# 1 Marker# BPA02 2025-06-30T09:27:04.513686Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [ff94a4292555bd5d] Request orderNumber# 0 VDiskId# [82000000:1:0:0:0] Marker# BPA03 2025-06-30T09:27:04.513939Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [ff94a4292555bd5d] Handle TEvVAssimilateResult Status# OK ErrorReason# '' VDiskId# [82000000:1:0:0:0] Blocks# 0 Barriers# 0 Blobs# 0 RequestsInFlight# 1 Marker# BPA02 2025-06-30T09:27:04.513965Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [ff94a4292555bd5d] SendResponseAndDie (heap empty) Marker# BPA07 2025-06-30T09:27:04.514921Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE INFO: [4f2783d8bbbf87dd] bootstrap ActorId# [1:164032:163708] Group# 2181038080 RestartCounter# 0 Marker# BPA01 2025-06-30T09:27:04.514933Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [4f2783d8bbbf87dd] Request orderNumber# 0 VDiskId# [82000000:1:0:0:0] Marker# BPA03 2025-06-30T09:27:04.514970Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [4f2783d8bbbf87dd] Handle TEvVAssimilateResult Status# OK ErrorReason# '' VDiskId# [82000000:1:0:0:0] Blocks# 0 Barriers# 0 Blobs# 0 RequestsInFlight# 1 Marker# BPA02 2025-06-30T09:27:04.514974Z 1 00h00m30.010000s :BS_PROXY_ASSIMILATE DEBUG: [4f2783d8bbbf87dd] SendResponseAndDie (heap empty) Marker# BPA07 2025-06-30T09:27:05.117230Z 1 00h02m00.010512s :BS_QUEUE CRIT: [[1:5367:746] TargetVDisk# [82000000:1:0:6:0] Queue# PutTabletLog] @HandleWatchdog: watchdog timer hit InFlightCount# 50 StatusRequests.size# 0 Marker# BSQ19 2025-06-30T09:27:05.117820Z 1 00h02m00.010512s :BS_QUEUE CRIT: [[1:5360:739] TargetVDisk# [82000000:1:0:5:0] Queue# PutTabletLog] @HandleWatchdog: watchdog timer hit InFlightCount# 50 StatusRequests.size# 0 Marker# BSQ19 2025-06-30T09:27:05.118229Z 1 00h02m00.010512s :BS_QUEUE CRIT: [[1:5353:732] TargetVDisk# [82000000:1:0:4:0] Queue# PutTabletLog] @HandleWatchdog: watchdog timer hit InFlightCount# 50 StatusRequests.size# 0 Marker# BSQ19 2025-06-30T09:27:05.118593Z 1 00h02m00.010512s :BS_QUEUE CRIT: [[1:5346:725] TargetVDisk# [82000000:1:0:3:0] Queue# PutTabletLog] @HandleWatchdog: watchdog timer hit InFlightCount# 50 StatusRequests.size# 0 Marker# BSQ19 2025-06-30T09:27:05.118991Z 1 00h02m00.010512s :BS_QUEUE CRIT: [[1:5332:711] TargetVDisk# [82000000:1:0:1:0] Queue# PutTabletLog] @HandleWatchdog: watchdog timer hit InFlightCount# 50 StatusRequests.size# 0 Marker# BSQ19 2025-06-30T09:27:05.120519Z 1 00h02m00.010512s :BS_QUEUE CRIT: [[1:5325:704] TargetVDisk# [82000000:1:0:0:0] Queue# PutTabletLog] @HandleWatchdog: watchdog timer hit InFlightCount# 50 StatusRequests.size# 0 Marker# BSQ19 2025-06-30T09:27:05.120773Z 1 00h02m00.010512s :BS_QUEUE CRIT: [[1:5374:753] TargetVDisk# [82000000:1:0:7:0] Queue# PutTabletLog] @HandleWatchdog: watchdog timer hit InFlightCount# 50 StatusRequests.size# 0 Marker# BSQ19 2025-06-30T09:27:05.121046Z 1 00h02m00.010512s :BS_QUEUE CRIT: [[1:5339:718] TargetVDisk# [82000000:1:0:2:0] Queue# PutTabletLog] @HandleWatchdog: watchdog timer hit InFlightCount# 50 StatusRequests.size# 0 Marker# BSQ19 2025-06-30T09:27:05.129338Z 1 00h02m00.010512s :BS_PROXY_PUT ERROR: [5a76c14aa6e102da] Result# TEvPutResult {Id# [1:1:1:1:1:100000:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:1:1:1:100000:0] Reported ErrorReasons# [ { OrderNumber# 4 VDiskId# [82000000:1:0:4:0] NodeId# 5 ErrorReasons# [ "BS_QUEUE: watchdog timer hit", ] } { OrderNumber# 5 VDiskId# [82000000:1:0:5:0] NodeId# 6 ErrorReasons# [ "BS_QUEUE: watchdog timer hit", ] } { OrderNumber# 6 VDiskId# [82000000:1:0:6:0] NodeId# 7 ErrorReasons# [ "BS_QUEUE: watchdog timer hit", ] } ] Part situations# [ { OrderNumber# 4 Situations# EUUUUU } { OrderNumber# 5 Situations# UEUUUU } { OrderNumber# 6 Situations# UUEUUU } { OrderNumber# 7 Situations# UUUSUU } { OrderNumber# 0 Situations# UUUUSU } { OrderNumber# 1 Situations# UUUUUS } { OrderNumber# 2 Situations# UUSUUU } { OrderNumber# 3 Situations# USUUUU } ] " ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 2025-06-30T09:27:05.431046Z 1 00h05m00.010512s :BS_SKELETON CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Stuck internal queue detected, restarting VDisk, Queue.Name# LogPuts Marker# BSVSF08 2025-06-30T09:27:05.431077Z 6 00h05m00.010512s :BS_SKELETON CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Stuck internal queue detected, restarting VDisk, Queue.Name# LogPuts Marker# BSVSF08 2025-06-30T09:27:05.431083Z 7 00h05m00.010512s :BS_SKELETON CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Stuck internal queue detected, restarting VDisk, Queue.Name# LogPuts Marker# BSVSF08 2025-06-30T09:27:05.431089Z 8 00h05m00.010512s :BS_SKELETON CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Stuck internal queue detected, restarting VDisk, Queue.Name# LogPuts Marker# BSVSF08 2025-06-30T09:27:05.431096Z 2 00h05m00.010512s :BS_SKELETON CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Stuck internal queue detected, restarting VDisk, Queue.Name# LogPuts Marker# BSVSF08 2025-06-30T09:27:05.431102Z 3 00h05m00.010512s :BS_SKELETON CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Stuck internal queue detected, restarting VDisk, Queue.Name# LogPuts Marker# BSVSF08 2025-06-30T09:27:05.431107Z 4 00h05m00.010512s :BS_SKELETON CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Stuck internal queue detected, restarting VDisk, Queue.Name# LogPuts Marker# BSVSF08 2025-06-30T09:27:05.431112Z 5 00h05m00.010512s :BS_SKELETON CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Stuck internal queue detected, restarting VDisk, Queue.Name# LogPuts Marker# BSVSF08 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_unauthorized [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b4f/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk15/testing_out_stuff/test_auditlog.py.test_dml_requests_logged_when_unauthorized/audit.txt 2025-06-30T09:26:57.532335Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:26:57.532324Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-06-30T09:26:57.529147Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-30T09:26:57.641326Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:26:57.641309Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-06-30T09:26:57.637383Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-30T09:26:57.751392Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:26:57.751365Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-06-30T09:26:57.747494Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-30T09:26:57.862476Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:26:57.862462Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-30T09:26:57.857381Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-30T09:26:57.971633Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:26:57.971622Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-06-30T09:26:57.968093Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-30T09:26:58.081318Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:26:58.081302Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-06-30T09:26:58.077032Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4_float-pk_types0-all_types0-index0-Float] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create[std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4-pk_types6-all_types6-index6-Uint8] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dynconfig [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b48/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk16/testing_out_stuff/test_auditlog.py.test_dynconfig/audit.txt 2025-06-30T09:27:03.342809Z: {"sanitized_token":"**** (B6C6F477)","subject":"root@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpStats::SysViewCancelled [GOOD] Test command err: Trying to start YDB, gRPC: 3126, MsgBus: 13881 2025-06-30T09:25:24.949194Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671104271598596:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:24.950240Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004236/r3tmp/tmpg5jCN4/pdisk_1.dat 2025-06-30T09:25:25.024387Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:25.027585Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671104271598481:2080] 1751275524947887 != 1751275524947890 TServer::EnableGrpc on GrpcPort 3126, node 1 2025-06-30T09:25:25.040136Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:25.040151Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:25.040155Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:25.040199Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:25.050217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:25.050244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:25.053359Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13881 TClient is connected to server localhost:13881 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:25.128850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:25.134043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:25:25.136665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:25.168174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:25.197730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:25.225489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:25.503807Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671108566567392:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:25.503840Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:25.553988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.563670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.574885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.590853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.603569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.618112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.674560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:25.691714Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671108566568049:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:25.691749Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:25.691862Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671108566568054:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:25.692789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:25.700447Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671108566568056:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-30T09:25:25.799408Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671108566568107:3400] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:25.950529Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; {"Plan":{"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Plans":[{"PlanNodeId":7,"Plans":[{"PlanNodeId":6,"Plans":[{"E-Size":"0","PlanNodeId":5,"LookupKeyColumns":["Key"],"Node Type":"TableLookup","Path":"\/Root\/TwoShard","Columns":["Key","Value1","Value2"],"E-Rows":"0","Table":"TwoShard","Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["KeyValue"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/KeyValue","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"KeyValue","ReadColumns":["Key"],"E-Cost":"0"}],"Node Type":"Tab ... eACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:35.068190Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:35.068231Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:35.068271Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:35.069268Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:35.075799Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:35.087116Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:35.147967Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:35.158192Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:35.313592Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671407577096405:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:35.313619Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:35.322813Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:35.331559Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:35.344196Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:35.358280Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:35.372073Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:35.385799Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:35.399892Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:35.416112Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671407577097058:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:35.416136Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7521671407577097063:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:35.416148Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:35.417041Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:35.419092Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7521671407577097065:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:35.507513Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7521671407577097116:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:35.676357Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:35.965018Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:39.400621Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275599394, txId: 281474976715673] shutting down 2025-06-30T09:26:39.568238Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1993: ActorId: [4:7521671424756967514:2602] TxId: 281474976715675. Ctx: { TraceId: 01jz02hxkc29evf14zv810dppr, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NTRlNjBkNDAtZjZhNWYzODktNDM2MjFhYjctMjk1ZDVhMDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 100ms } {
: Error: Cancelling after 99ms during execution } ] 2025-06-30T09:26:39.568648Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7521671424756967543:2629], TxId: 281474976715675, task: 9. Ctx: { SessionId : ydb://session/3?node_id=4&id=NTRlNjBkNDAtZjZhNWYzODktNDM2MjFhYjctMjk1ZDVhMDM=. TraceId : 01jz02hxkc29evf14zv810dppr. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7521671424756967514:2602], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-30T09:26:39.674751Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7521671424756967535:2621], TxId: 281474976715675, task: 1. Ctx: { TraceId : 01jz02hxkc29evf14zv810dppr. SessionId : ydb://session/3?node_id=4&id=NTRlNjBkNDAtZjZhNWYzODktNDM2MjFhYjctMjk1ZDVhMDM=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7521671424756967514:2602], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-30T09:26:39.674985Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=NTRlNjBkNDAtZjZhNWYzODktNDM2MjFhYjctMjk1ZDVhMDM=, ActorId: [4:7521671424756967456:2602], ActorState: ExecuteState, TraceId: 01jz02hxkc29evf14zv810dppr, Create QueryResponse for error on request, msg: 2025-06-30T09:26:39.675268Z node 4 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037922 Cancelled read: {[4:7521671424756967560:2621], 0}
: Error: Request canceled after 100ms
: Error: Cancelling after 99ms during execution 2025-06-30T09:26:39.727165Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1751275599722, txId: 281474976715677] shutting down >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types-pk_types11-all_types11-index11-Uint8] >> test_vector_index_negative.py::TestVectorIndexNegative::test_vector_index_negative >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2-pk_types8-all_types8-index8-Uint8] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Query [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[upsert] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b2f/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk22/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.upsert/audit.txt 2025-06-30T09:27:01.656817Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:27:01.656801Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-06-30T09:27:01.643744Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b2b/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk9/testing_out_stuff/test_auditlog.py.test_cloud_ids_are_logged.attrs1/audit.txt 2025-06-30T09:27:01.896706Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:27:01.896687Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-30T09:27:01.874924Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","folder_id":"folder-id-B","component":"grpc-proxy"} >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Table >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0-pk_types16-all_types16-index16-Int8] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_create_and_remove_tenant [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b27/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk10/testing_out_stuff/test_auditlog.py.test_create_and_remove_tenant/audit.txt 2025-06-30T09:26:58.797734Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"BEGIN INIT DATABASE CONFIG","remote_address":"::1","database":"/Root/users/database"} 2025-06-30T09:26:58.800639Z: {"paths":"[/Root/users/database]","tx_id":"281474976715660","database":"/Root","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DATABASE","component":"schemeshard"} 2025-06-30T09:26:58.807670Z: {"paths":"[/Root/users/database]","tx_id":"281474976715661","database":"/Root","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"ALTER DATABASE","component":"schemeshard"} 2025-06-30T09:26:59.796773Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"END INIT DATABASE CONFIG","remote_address":"::1","database":"/Root/users/database"} 2025-06-30T09:26:59.973039Z: {"paths":"[.metadata/workload_manager/pools/default]","tx_id":"281474976720657","new_owner":"metadata@system","acl_add":"[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin]","database":"/Root/users/database","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"metadata@system","detailed_status":"StatusAccepted","operation":"CREATE RESOURCE POOL","component":"schemeshard"} 2025-06-30T09:27:00.047135Z: {"reason":"Check failed: path: '/Root/users/database/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92","paths":"[default]","tx_id":"281474976720658","new_owner":"metadata@system","acl_add":"[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin]","database":"/Root/users/database","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"metadata@system","detailed_status":"StatusAlreadyExists","operation":"CREATE RESOURCE POOL","component":"schemeshard"} 2025-06-30T09:27:02.323203Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"BEGIN REMOVE DATABASE","remote_address":"::1","database":"/Root/users/database"} 2025-06-30T09:27:02.324782Z: {"paths":"[/Root/users/database]","tx_id":"281474976715662","database":"/Root","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"DROP DATABASE","component":"schemeshard"} 2025-06-30T09:27:02.330656Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"END REMOVE DATABASE","remote_address":"::1","database":"/Root/users/database"} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b22/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk4/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_other-_bad_dynconfig/audit.txt 2025-06-30T09:27:02.136626Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"othe****ltin (27F910A9)","remote_address":"127.0.0.1","status":"ERROR","subject":"other-user@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[select] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b25/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk20/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.select/audit.txt 2025-06-30T09:27:02.391692Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:27:02.391678Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-06-30T09:27:02.376710Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Table [GOOD] >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookupDepededRead [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookupDepededRead [GOOD] Test command err: Trying to start YDB, gRPC: 1605, MsgBus: 23379 2025-06-30T09:25:33.973516Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671141902675204:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:33.973664Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00421a/r3tmp/tmptVapQ1/pdisk_1.dat 2025-06-30T09:25:34.059243Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:34.060889Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671141902674986:2080] 1751275533969873 != 1751275533969876 TServer::EnableGrpc on GrpcPort 1605, node 1 2025-06-30T09:25:34.077407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:34.077435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:34.079208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:34.084950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:34.084961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:34.084964Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:34.085012Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23379 TClient is connected to server localhost:23379 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-30T09:25:34.164999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:25:34.168043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:34.178264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:25:34.419487Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671146197643249:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.419601Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671146197643244:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.419612Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:34.420641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:25:34.423991Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671146197643258:2319], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-30T09:25:34.508421Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671146197643310:2552] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:25:34.574492Z node 1 :KQP_COMPUTE WARN: log.cpp:784: fline=kqp_compute_actor_factory.cpp:41;problem=cannot_allocate_memory;tx_id=281474976715661;task_id=1;memory=1048576; 2025-06-30T09:25:34.574512Z node 1 :KQP_COMPUTE WARN: dq_compute_memory_quota.h:152: TxId: 281474976715661, task: 1. [Mem] memory 1048576 NOT granted 2025-06-30T09:25:34.576313Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7521671146197643351:2327], TxId: 281474976715661, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=MTQ0MjNkMzAtYjljZTllYTQtYzM4NjY4MmEtYmQ2YzFhMDA=. CustomerSuppliedId : . TraceId : 01jz02fy2j484wkz93csg62cfj. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: OVERLOADED KIKIMR_PRECONDITION_FAILED: {
: Error: Mkql memory limit exceeded, allocated by task 1: 10, host: ghrun-x4qzxsyz2q, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976715661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-06-30T09:25:34.573314Z }, code: 2029 }. 2025-06-30T09:25:34.576493Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7521671146197643352:2328], TxId: 281474976715661, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=MTQ0MjNkMzAtYjljZTllYTQtYzM4NjY4MmEtYmQ2YzFhMDA=. CustomerSuppliedId : . TraceId : 01jz02fy2j484wkz93csg62cfj. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [1:7521671146197643340:2312], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-06-30T09:25:34.577171Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=1&id=MTQ0MjNkMzAtYjljZTllYTQtYzM4NjY4MmEtYmQ2YzFhMDA=, ActorId: [1:7521671146197643217:2312], ActorState: ExecuteState, TraceId: 01jz02fy2j484wkz93csg62cfj, Create QueryResponse for error on request, msg:
: Error: Mkql memory limit exceeded, allocated by task 1: 10, host: ghrun-x4qzxsyz2q, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976715661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-06-30T09:25:34.573314Z } , code: 2029 Trying to start YDB, gRPC: 5429, MsgBus: 4952 2025-06-30T09:25:34.886057Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7521671147086976212:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:34.886106Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00421a/r3tmp/tmpeGoDZx/pdisk_1.dat 2025-06-30T09:25:34.904607Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5429, node 2 2025-06-30T09:25:34.916487Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:34.916504Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:34.916507Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:34.916557Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4952 TClient is connected to server localhost:4952 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:34.991037Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:34.991071Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:34.991340Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 2814 ... ate QueryResponse for error on request, msg: 2025-06-30T09:26:34.089515Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hraf332p75ctk1pf67ff, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.117065Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrba65499rrctpxa4e75, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.145175Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrc69rzjh67mrtgdrtv0, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.189269Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrdf3rct2vfe7hhkvpc8, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.221162Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hreefdz50n8yj0jex5b9, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.286557Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrge567vk2msxz2w7b7c, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.320822Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrhf9vz9mh37pec60b4p, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.356164Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrjhdsxqgahs5vkd1195, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.393021Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrkn1debjvyqvp957crt, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.430450Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrmtcd0ysyw5scp00tpt, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.485181Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrpe6xd3tpk28nfr74c1, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.525688Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrqp47fe3ttphdmhtqgz, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.608607Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrt7e0h1z8vrzk6rxd98, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.682369Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrwe2sfqk77zymr8dgyz, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.752982Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hrymcqsk6vr544vmnvh5, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.825712Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hs0t22aynjf7v1kz7m69, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.891251Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hs2t6d4c7epj9xf57vr8, Create QueryResponse for error on request, msg: 2025-06-30T09:26:34.961862Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hs4yeakscyrtbzy9xnte, Create QueryResponse for error on request, msg: 2025-06-30T09:26:35.026862Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hs6xags2db06sckbvzbk, Create QueryResponse for error on request, msg: 2025-06-30T09:26:35.101681Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hs96eecd66hanavb9za4, Create QueryResponse for error on request, msg: 2025-06-30T09:26:35.181015Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hsbk6rtjmkkk7rbbsvjp, Create QueryResponse for error on request, msg: 2025-06-30T09:26:35.248592Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hsdn8hk71jast56eb77g, Create QueryResponse for error on request, msg: 2025-06-30T09:26:35.315186Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hsfncrd5gjps879ercye, Create QueryResponse for error on request, msg: 2025-06-30T09:26:35.396061Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hsj45mh8vdvc381ccj1w, Create QueryResponse for error on request, msg: 2025-06-30T09:26:35.495156Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hsn5ephtwenekm0f86yy, Create QueryResponse for error on request, msg: 2025-06-30T09:26:35.570634Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hsqf7711j4yz63yc60m8, Create QueryResponse for error on request, msg: 2025-06-30T09:26:35.660776Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hst7b8dk8nvxngkvwwrg, Create QueryResponse for error on request, msg: 2025-06-30T09:26:35.816415Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hsz0431bhd4bd6fbewf4, Create QueryResponse for error on request, msg: 2025-06-30T09:26:36.045799Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02ht61avtvez4179p2sc10, Create QueryResponse for error on request, msg: 2025-06-30T09:26:36.145631Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02ht9363e2wdvhy6c239s8, Create QueryResponse for error on request, msg: 2025-06-30T09:26:36.244937Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02htc48hhratgax3qqejw5, Create QueryResponse for error on request, msg: 2025-06-30T09:26:36.424215Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02hthm5bxy19rz04j22hgm, Create QueryResponse for error on request, msg: 2025-06-30T09:26:36.521436Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02htmkb1wxgf69rhckxwq7, Create QueryResponse for error on request, msg: 2025-06-30T09:26:36.620738Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02htqn07vktby1t7nc1az5, Create QueryResponse for error on request, msg: 2025-06-30T09:26:36.724208Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2676: SessionId: ydb://session/3?node_id=4&id=ZTkzMWMxNjEtOGUwNmMwNjctNGNlMDBjMS0yMTE5YzI4MA==, ActorId: [4:7521671402505863874:2469], ActorState: ExecuteState, TraceId: 01jz02htttcdm9v4ketak31eh2, Create QueryResponse for error on request, msg: 2025-06-30T09:26:37.794945Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7521671398210894049:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:37.794977Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-30T09:26:47.814506Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7389: Cannot get console configs 2025-06-30T09:26:47.814520Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types_float-pk_types5-all_types5-index5-Float] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::StreamLookupForDataQuery-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 4467, MsgBus: 1670 2025-06-30T09:25:45.901463Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671193855275169:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:45.903143Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ce0/r3tmp/tmpCKaTDY/pdisk_1.dat 2025-06-30T09:25:45.977059Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4467, node 1 2025-06-30T09:25:46.000544Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:46.000559Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:46.000562Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:46.000606Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:46.002537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:46.002557Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:46.004652Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1670 TClient is connected to server localhost:1670 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:25:46.098690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:25:46.101053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:25:46.479756Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671198150243036:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.479791Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:46.537276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.628508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.645339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:25:46.675530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710762:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.703526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710763:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:25:46.726603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.749200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710766:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.767659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710767:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:25:46.797491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710770:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.839644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710771:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:25:46.880227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710774:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.899707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710775:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:25:46.903283Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:25:46.924256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.964008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710778:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:46.992932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710779:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:25:47.028031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710782:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:47.050471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710783:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:25:47.081599Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671202445211667:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default no ... ror: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:40.972898Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:40.978277Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521671428895621583:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:41.061528Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521671433190588930:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 12116, MsgBus: 25639 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/001ce0/r3tmp/tmpYbL3eM/pdisk_1.dat 2025-06-30T09:26:41.677974Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:41.678129Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 12116, node 7 2025-06-30T09:26:41.688558Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:26:41.688576Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:26:41.688579Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:26:41.688635Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25639 TClient is connected to server localhost:25639 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:41.764705Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:41.764737Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:41.765458Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:26:41.765779Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:26:41.771902Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:41.783905Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:41.819772Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:41.833730Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:42.120909Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671439335316972:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:42.120931Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:42.129158Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:42.146869Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:42.157270Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:42.171042Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:42.183591Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:42.197455Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:42.211932Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:42.236087Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671439335317625:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:42.236140Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:42.236371Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671439335317630:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:42.237679Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:42.240595Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671439335317632:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:26:42.335798Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671439335317683:3403] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:42.669494Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types-pk_types11-all_types11-index11-Uint8] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1_float-pk_types3-all_types3-index3-Float] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2-pk_types8-all_types8-index8-Uint8] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types-pk_types17-all_types17-index17-Int8] [GOOD] >> TxUsage::WriteToTopic_Demo_46_Table [GOOD] >> TxUsage::WriteToTopic_Demo_46_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b11/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk2/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_no_auth-_bad_dynconfig/audit.txt 2025-06-30T09:27:05.067181Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"{none}","remote_address":"127.0.0.1","status":"ERROR","subject":"{none}","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_begin_commit_logged [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b17/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk11/testing_out_stuff/test_auditlog.py.test_dml_begin_commit_logged/audit.txt 2025-06-30T09:27:05.097508Z: {"tx_id":"01jz02jpm99dw5eekhb1bgc0gg","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:27:05.097491Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"SUCCESS","start_time":"2025-06-30T09:27:05.097167Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"BeginTransactionRequest","component":"grpc-proxy"} 2025-06-30T09:27:05.126871Z: {"tx_id":"01jz02jpm99dw5eekhb1bgc0gg","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:27:05.126850Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","commit_tx":"0","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-30T09:27:05.101798Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-30T09:27:05.132593Z: {"tx_id":"01jz02jpm99dw5eekhb1bgc0gg","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:27:05.132579Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"SUCCESS","start_time":"2025-06-30T09:27:05.130491Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"CommitTransactionRequest","component":"grpc-proxy"} >> test_vector_index_negative.py::TestVectorIndexNegative::test_vector_index_negative [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/cl3w/004b12/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk14/testing_out_stuff/test_auditlog.py.test_dml_requests_logged_when_sid_is_unexpected/audit.txt 2025-06-30T09:27:05.376857Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:27:05.376839Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-06-30T09:27:05.361493Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-30T09:27:05.508599Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:27:05.508581Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-06-30T09:27:05.481892Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-30T09:27:05.635275Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:27:05.635259Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-06-30T09:27:05.614231Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-30T09:27:05.767977Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:27:05.767961Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-30T09:27:05.742102Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-30T09:27:05.888854Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:27:05.888834Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-06-30T09:27:05.873486Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-30T09:27:06.010658Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-30T09:27:06.010628Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-06-30T09:27:05.993936Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |85.5%| [TA] $(B)/ydb/core/kqp/ut/opt/test-results/unittest/{meta.json ... results_accumulator.log} >> TConsoleConfigSubscriptionTests::TestConfigSubscriptionsCleanup [GOOD] >> TConsoleConfigTests::TestAddConfigItem |85.5%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/opt/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types_float-pk_types5-all_types5-index5-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> TConsoleConfigTests::TestAddConfigItem [GOOD] >> TConsoleConfigTests::TestAutoKind ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types-pk_types11-all_types11-index11-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types-pk_types17-all_types17-index17-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> TConsoleConfigTests::TestAutoKind [GOOD] >> TConsoleConfigTests::TestAllowedScopes >> TConsoleConfigTests::TestAllowedScopes [GOOD] >> TConsoleConfigTests::TestAffectedConfigs |85.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index_negative.py::TestVectorIndexNegative::test_vector_index_negative [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1_float-pk_types3-all_types3-index3-Float] [GOOD] >> TConsoleConfigTests::TestAffectedConfigs [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1-pk_types9-all_types9-index9-Uint8] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0_float-pk_types4-all_types4-index4-Float] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0-pk_types10-all_types10-index10-Uint8] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-10.test] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join3.test] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join0.test] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-create_table.test] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select1-1.test] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/console/ut/unittest >> TConsoleConfigTests::TestAffectedConfigs [GOOD] Test command err: 2025-06-30T09:20:40.644142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7576: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-30T09:20:40.644177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7604: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:40.644184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7490: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-30T09:20:40.644191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7506: OperationsProcessing config: using default configuration 2025-06-30T09:20:40.644208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-30T09:20:40.644235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7512: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-30T09:20:40.644248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7636: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-30T09:20:40.644283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-30T09:20:40.644400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7707: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-30T09:20:40.644496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-30T09:20:40.649146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7397: Cannot subscribe to console configs 2025-06-30T09:20:40.649180Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:20:40.653037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-30T09:20:40.653130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-30T09:20:40.653185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046578944 2025-06-30T09:20:40.655091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-30T09:20:40.655236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-30T09:20:40.655367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:40.655507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: dc-1, pathId: [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:40.656296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:40.656345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-30T09:20:40.656563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:40.656575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046578944 2025-06-30T09:20:40.656685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-30T09:20:40.656699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046578944, domainId: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:40.656707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-30T09:20:40.656726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6762: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046578944 2025-06-30T09:20:40.691956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "hdd" } StoragePools { Name: "" Kind: "hdd-3" } StoragePools { Name: "" Kind: "hdd-1" } StoragePools { Name: "" Kind: "hdd-2" } } } TxId: 1 TabletId: 72057594046578944 , at schemeshard: 72057594046578944 2025-06-30T09:20:40.692035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //dc-1, opId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:40.692108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 0 2025-06-30T09:20:40.692115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5241: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046578944, LocalPathId: 1] source path: 2025-06-30T09:20:40.692157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046578944 2025-06-30T09:20:40.692170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-30T09:20:40.692973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:456: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046578944 PathId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:40.693027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //dc-1 2025-06-30T09:20:40.693076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:40.693087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046578944 2025-06-30T09:20:40.693091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-30T09:20:40.693096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 2 -> 3 2025-06-30T09:20:40.693552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:40.693562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046578944 2025-06-30T09:20:40.693566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 3 -> 128 2025-06-30T09:20:40.693877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:40.693889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:20:40.693894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:40.693900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1661: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-30T09:20:40.694400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1730: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046578944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-30T09:20:40.694792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046578944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-30T09:20:40.694856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1762: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-30T09:20:40.695105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:40.695117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-30T09:20:40.695122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:40.920790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:681: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046578944 2025-06-30T09:20:40.920862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046578944, at schemeshard: 72057594046578944 2025-06-30T09:20:40.920875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:40.920982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2565: Change state for txid 1:0 128 -> 240 2025-06-30T09:20:40.920994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046578944 2025-06-30T09:20:40.921038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:506: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 1 2025-06-30T09:20:40.921053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046578944, LocalPathId: 1], at schemeshard: 72057594046578944 2025-06-30T09:20:40.921848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046578944 2025-06-30T09:20:40.921865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046578944, txId: 1, path id: [OwnerId: 72057594046578944, LocalPathId: 1] 2025-06-30T09:20:40.92192 ... _operation.cpp:490: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046578944 2025-06-30T09:27:18.582281Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046578944] TDone opId# 1:0 ProgressState 2025-06-30T09:27:18.582307Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:27:18.582312Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:27:18.582318Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-30T09:27:18.582322Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:27:18.582328Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-30T09:27:18.582335Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1652: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-30T09:27:18.582340Z node 29 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-30T09:27:18.582345Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5265: RemoveTx for txid 1:0 2025-06-30T09:27:18.582357Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 2 2025-06-30T09:27:18.582364Z node 29 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2025-06-30T09:27:18.582372Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046578944, LocalPathId: 1], 3 2025-06-30T09:27:18.582567Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5925: Handle TEvUpdateAck, at schemeshard: 72057594046578944, msg: Owner: 72057594046578944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046578944, cookie: 1 2025-06-30T09:27:18.582581Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046578944, msg: Owner: 72057594046578944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046578944, cookie: 1 2025-06-30T09:27:18.582587Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046578944, txId: 1 2025-06-30T09:27:18.582592Z node 29 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046578944, txId: 1, pathId: [OwnerId: 72057594046578944, LocalPathId: 1], version: 3 2025-06-30T09:27:18.582597Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:517: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046578944, LocalPathId: 1] was 1 2025-06-30T09:27:18.582611Z node 29 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046578944, txId: 1, subscribers: 1 2025-06-30T09:27:18.582616Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046578944, to actorId: [29:118:2151] 2025-06-30T09:27:18.583674Z node 29 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046578944, cookie: 1 2025-06-30T09:27:18.583777Z node 29 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [29:316:2293] Bootstrap 2025-06-30T09:27:18.585870Z node 29 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [29:316:2293] Become StateWork (SchemeCache [29:321:2298]) 2025-06-30T09:27:18.586160Z node 29 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [29:316:2293] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-30T09:27:18.586908Z node 29 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-30T09:27:18.588544Z node 29 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-30T09:27:18.589420Z node 29 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-30T09:27:18.589511Z node 29 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-30T09:27:18.589786Z node 29 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-30T09:27:18.589880Z node 29 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-30T09:27:18.589913Z node 29 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-30T09:27:18.589919Z node 29 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-30T09:27:18.589958Z node 29 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-30T09:27:18.591986Z node 29 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-30T09:27:18.592016Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-30T09:27:18.592047Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-30T09:27:18.592141Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:27:18.592158Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-30T09:27:18.592168Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:27:18.614200Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-30T09:27:18.614263Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:27:18.625195Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-30T09:27:18.625251Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:27:18.625269Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-30T09:27:18.625282Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:27:18.625311Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-30T09:27:18.625321Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:27:18.625327Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-30T09:27:18.625336Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:27:18.636331Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-30T09:27:18.636393Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:27:18.647404Z node 29 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-30T09:27:18.647460Z node 29 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-30T09:27:18.647656Z node 29 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-30T09:27:18.647664Z node 29 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-30T09:27:18.647750Z node 29 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-30T09:27:18.647759Z node 29 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-30T09:27:18.648002Z node 29 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/cl3w/0043bd/r3tmp/tmp656yDT/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } } } } 2025-06-30T09:27:18.648057Z node 29 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 29:1 Path# /home/runner/.ya/build/build_root/cl3w/0043bd/r3tmp/tmp656yDT/pdisk_1.dat 2025-06-30T09:27:18.659373Z node 29 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-30T09:27:18.659474Z node 29 :LOCAL DEBUG: local.cpp:1542: TLocal::Bootstrap 2025-06-30T09:27:18.659490Z node 29 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-30T09:27:18.659526Z node 29 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-30T09:27:18.659546Z node 29 :LOCAL DEBUG: local.cpp:1492: TDomainLocal(dc-1): Bootstrap 2025-06-30T09:27:18.659648Z node 29 :LOCAL DEBUG: local.cpp:1200: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-30T09:27:18.659659Z node 29 :LOCAL DEBUG: local.cpp:1007: TLocalNodeRegistrar::Bootstrap 2025-06-30T09:27:18.659665Z node 29 :LOCAL DEBUG: local.cpp:183: TLocalNodeRegistrar::TryToRegister 2025-06-30T09:27:18.659684Z node 29 :LOCAL DEBUG: local.cpp:218: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[29:411:2364] 2025-06-30T09:27:18.660144Z node 29 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-30T09:27:18.660156Z node 29 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [29:405:2360] 2025-06-30T09:27:18.660300Z node 29 :LOCAL DEBUG: local.cpp:265: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[29:411:2364]} 2025-06-30T09:27:18.660311Z node 29 :LOCAL DEBUG: local.cpp:329: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-30T09:27:18.660320Z node 29 :LOCAL DEBUG: local.cpp:385: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-30T09:27:18.660324Z node 29 :LOCAL DEBUG: local.cpp:302: TLocalNodeRegistrar SendStatusOk >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 |85.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test |85.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test |85.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> TFqYdbTest::ShouldStatusToIssuesProcessEmptyIssues [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration >> TFqYdbTest::ShouldStatusToIssuesProcessExceptions [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration [GOOD] >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration [GOOD] |85.5%| [TA] $(B)/ydb/tests/functional/audit/test-results/py3test/{meta.json ... results_accumulator.log} |85.5%| [TA] {RESULT} $(B)/ydb/tests/functional/audit/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1_float-pk_types3-all_types3-index3-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |85.5%| [TA] $(B)/ydb/core/cms/console/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0_float-pk_types4-all_types4-index4-Float] >> TxUsage::WriteToTopic_Demo_46_Query [GOOD] >> KqpAnalyze::AnalyzeTable+ColumnStore [FAIL] >> KqpAnalyze::AnalyzeTable-ColumnStore >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Query >> TxUsage::WriteToTopic_Demo_47_Table |85.6%| [TA] {RESULT} $(B)/ydb/core/cms/console/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TFqYdbTest::ShouldStatusToIssuesProcessEmptyIssues [GOOD] |85.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TFqYdbTest::ShouldStatusToIssuesProcessExceptions [GOOD] |85.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration [GOOD] |85.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Query [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3-pk_types7-all_types7-index7-Uint8] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4-pk_types6-all_types6-index6-Uint8] |85.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Table >> KqpAnalyze::AnalyzeTable-ColumnStore [GOOD] >> KqpExplain::AggGroupLimit >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact |85.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |85.6%| [LD] {RESULT} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |85.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-6.test] >> KqpExplain::AggGroupLimit [GOOD] >> KqpExplain::ComplexJoin |85.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join3.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join4.test] >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration |85.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1-pk_types9-all_types9-index9-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |85.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test |85.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0_float-pk_types4-all_types4-index4-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> KqpExplain::ComplexJoin [GOOD] >> KqpExplain::CompoundKeyRange ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0-pk_types10-all_types10-index10-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_postgres.py::TestPGSQL::test_sql_suite[plan-create_table.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/coalesce-and-join.test] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3-pk_types13-all_types13-index13-Int8] >> KqpExplain::CompoundKeyRange [GOOD] |85.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails >> test_postgres.py::TestPGSQL::test_sql_suite[plan-abstime.test] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1-pk_types15-all_types15-index15-Int8] >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact [GOOD] >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails [GOOD] >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/coalesce-and-join.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join-group-by-with-null.test] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0-pk_types16-all_types16-index16-Int8] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select2-1.test] |85.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 [GOOD] |85.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> TSubscriberSinglePathUpdateTest::OneWriteOnlyRingGroup [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types_float-pk_types5-all_types5-index5-Float] [GOOD] >> TSubscriberSinglePathUpdateTest::OneDisconnectedRingGroup [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberSinglePathUpdateTest::OneWriteOnlyRingGroup [GOOD] Test command err: ... waiting for initial path lookups 2025-06-30T09:24:17.100798Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:19:2066][TestPath] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:17.101245Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:2:2049] 2025-06-30T09:24:17.101268Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:5:2052] 2025-06-30T09:24:17.101276Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:8:2055] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR 2025-06-30T09:24:17.101359Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:20:2066] 2025-06-30T09:24:17.101383Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:21:2066] 2025-06-30T09:24:17.101394Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:19:2066][TestPath] Set up state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.101403Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:22:2066] 2025-06-30T09:24:17.101412Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:19:2066][TestPath] Ignore empty state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:24339059:0] 2025-06-30T09:24:17.101500Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:2:2049] 2025-06-30T09:24:17.101510Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:20:2066] 2025-06-30T09:24:17.101519Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:19:2066][TestPath] Update to strong state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:1099535966835:0] 2025-06-30T09:24:17.101574Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [1:5:2052] 2025-06-30T09:24:17.101583Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [1:21:2066] 2025-06-30T09:24:17.101591Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:19:2066][TestPath] Path was updated to new version: owner# [1:18:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:2199047594611:0] 2025-06-30T09:24:17.101623Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [1:8:2055] 2025-06-30T09:24:17.101632Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [1:22:2066] 2025-06-30T09:24:17.101638Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:19:2066][TestPath] Path was updated to new version: owner# [1:18:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 3) DomainId: AbandonedSchemeShards: there are 0 elements } ... waiting for initial path lookups 2025-06-30T09:24:17.310726Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][2:28:2075][TestPath] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:17.310876Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:2:2049] 2025-06-30T09:24:17.310890Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:5:2052] 2025-06-30T09:24:17.310897Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:8:2055] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR 2025-06-30T09:24:17.310960Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:29:2075] 2025-06-30T09:24:17.310983Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:30:2075] 2025-06-30T09:24:17.310993Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][2:28:2075][TestPath] Set up state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.311003Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:31:2075] 2025-06-30T09:24:17.311010Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][2:28:2075][TestPath] Ignore empty state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [2:24339059:0] 2025-06-30T09:24:17.311081Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [2:2:2049] 2025-06-30T09:24:17.311090Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [2:29:2075] 2025-06-30T09:24:17.311098Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][2:28:2075][TestPath] Update to strong state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [2:1099535966835:0] 2025-06-30T09:24:17.311176Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [2:5:2052] 2025-06-30T09:24:17.311184Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [2:30:2075] 2025-06-30T09:24:17.311191Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][2:28:2075][TestPath] Path was updated to new version: owner# [2:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [2:2199047594611:0] 2025-06-30T09:24:17.311223Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [2:8:2055] 2025-06-30T09:24:17.311232Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [2:31:2075] 2025-06-30T09:24:17.311238Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][2:28:2075][TestPath] Path was updated to new version: owner# [2:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 3) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [2:3298559222387:0] Sending path update to replica: [2:4398070850163:0] Sending path update to replica: [2:5497582477939:0] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types-pk_types17-all_types17-index17-Int8] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberSinglePathUpdateTest::OneDisconnectedRingGroup [GOOD] Test command err: 2025-06-30T09:24:17.084904Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2025-06-30T09:24:17.084932Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:3:2050] Successful handshake: owner# 800, generation# 1 2025-06-30T09:24:17.084970Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2025-06-30T09:24:17.084976Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:3:2050] Commit generation: owner# 800, generation# 1 2025-06-30T09:24:17.084984Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [1:37:2067] 2025-06-30T09:24:17.084988Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 900, generation# 1 2025-06-30T09:24:17.085026Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [1:37:2067] 2025-06-30T09:24:17.085030Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:6:2053] Commit generation: owner# 900, generation# 1 2025-06-30T09:24:17.085047Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][1:39:2069][/root/db/dir_inside] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:17.085115Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 1 }: sender# [1:43:2069] 2025-06-30T09:24:17.085119Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:3:2050] Upsert description: path# /root/db/dir_inside 2025-06-30T09:24:17.085148Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:3:2050] Subscribe: subscriber# [1:43:2069], path# /root/db/dir_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:24:17.085164Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 1 }: sender# [1:44:2069] 2025-06-30T09:24:17.085167Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:6:2053] Upsert description: path# /root/db/dir_inside 2025-06-30T09:24:17.085171Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:44:2069], path# /root/db/dir_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:24:17.085180Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 1 }: sender# [1:45:2069] 2025-06-30T09:24:17.085183Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:9:2056] Upsert description: path# /root/db/dir_inside 2025-06-30T09:24:17.085186Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:9:2056] Subscribe: subscriber# [1:45:2069], path# /root/db/dir_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-30T09:24:17.085195Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:3:2050] 2025-06-30T09:24:17.085202Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:43:2069] 2025-06-30T09:24:17.085207Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:44:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:6:2053] 2025-06-30T09:24:17.085210Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:44:2069] 2025-06-30T09:24:17.085214Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:45:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:9:2056] 2025-06-30T09:24:17.085219Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:45:2069] 2025-06-30T09:24:17.085227Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:40:2069] 2025-06-30T09:24:17.085243Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:41:2069] 2025-06-30T09:24:17.085250Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][1:39:2069][/root/db/dir_inside] Set up state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.085257Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:42:2069] 2025-06-30T09:24:17.085267Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][1:39:2069][/root/db/dir_inside] Ignore empty state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== !argsLeft.IsDeletion 2025-06-30T09:24:17.085316Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:3:2050] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:36:2066], cookie# 0, event size# 118 2025-06-30T09:24:17.085321Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:3:2050] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], deletion# false 2025-06-30T09:24:17.086283Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:3:2050] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 800, LocalPathId: 1111], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-06-30T09:24:17.086338Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 800, LocalPathId: 1111] Version: 1 }: sender# [1:3:2050] 2025-06-30T09:24:17.086351Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [1:43:2069] 2025-06-30T09:24:17.086360Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 800, LocalPathId: 1111] Version: 1 }: sender# [1:40:2069] 2025-06-30T09:24:17.086371Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:39:2069][/root/db/dir_inside] Update to strong state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 800, LocalPathId: 1111], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() < argsRight.GetSuperId() =========== !argsRight.IsDeletion 2025-06-30T09:24:17.086422Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 900 Generation: 1 }: sender# [1:37:2067], cookie# 0, event size# 117 2025-06-30T09:24:17.086428Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], deletion# false 2025-06-30T09:24:17.086437Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:6:2053] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 900, LocalPathId: 11], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-06-30T09:24:17.086452Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:44:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 900, LocalPathId: 11] Version: 1 }: sender# [1:6:2053] 2025-06-30T09:24:17.086459Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [1:44:2069] 2025-06-30T09:24:17.086468Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 900, LocalPathId: 11] Version: 1 }: sender# [1:41:2069] 2025-06-30T09:24:17.086476Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][1:39:2069][/root/db/dir_inside] Path was updated to new version: owner# [1:38:2068], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 800, LocalPathId: 1111], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 900, LocalPathId: 11], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } ... waiting for initial path lookups 2025-06-30T09:24:17.494317Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1019: [main][3:28:2075][TestPath] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-30T09:24:17.494443Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:2:2049] 2025-06-30T09:24:17.494456Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:5:2052] 2025-06-30T09:24:17.494465Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:8:2055] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR 2025-06-30T09:24:17.494534Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:29:2075] 2025-06-30T09:24:17.494561Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:30:2075] 2025-06-30T09:24:17.494572Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:868: [main][3:28:2075][TestPath] Set up state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-30T09:24:17.494583Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:31:2075] 2025-06-30T09:24:17.494591Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:886: [main][3:28:2075][TestPath] Ignore empty state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [3:24339059:0] 2025-06-30T09:24:17.494667Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:2:2049] 2025-06-30T09:24:17.494678Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:29:2075] 2025-06-30T09:24:17.494686Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][3:28:2075][TestPath] Update to strong state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [3:1099535966835:0] 2025-06-30T09:24:17.494807Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [3:5:2052] 2025-06-30T09:24:17.494820Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [3:30:2075] 2025-06-30T09:24:17.494830Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][3:28:2075][TestPath] Path was updated to new version: owner# [3:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [3:2199047594611:0] 2025-06-30T09:24:17.494869Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [3:8:2055] 2025-06-30T09:24:17.494878Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:833: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [3:31:2075] 2025-06-30T09:24:17.494886Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:874: [main][3:28:2075][TestPath] Path was updated to new version: owner# [3:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 3) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [3:3298559222387:0] Sending path update to replica: [3:4398070850163:0] Sending path update to replica: [3:5497582477939:0] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join-group-by-with-null.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-14.test] |85.6%| [TA] $(B)/ydb/core/tx/scheme_board/ut_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} |85.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact [GOOD] |85.6%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} |85.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-1.test] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-abstime.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-boolean.test] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-5.test] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-boolean.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-case.test] >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Query [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Table >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/query/unittest >> KqpExplain::CompoundKeyRange [GOOD] Test command err: 2025-06-30T09:25:12.788332Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671050878676756:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:25:12.788358Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/00424d/r3tmp/tmpYaVVIm/pdisk_1.dat 2025-06-30T09:25:12.900989Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:12.903902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:12.903925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:12.908419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4909, node 1 2025-06-30T09:25:12.928537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:25:12.928555Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:25:12.928559Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:25:12.928619Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:25:12.935431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) TClient is connected to server localhost:17201 2025-06-30T09:25:13.016423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-30T09:25:13.054259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:13.054287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:13.059357Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:25:13.059711Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:13.069559Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-30T09:25:13.107131Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:25:13.107224Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:25:13.107236Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:25:13.107249Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:25:13.107258Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:25:13.107273Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:25:13.107284Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:25:13.107295Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:25:13.107308Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-30T09:25:13.158577Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:25:13.158612Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:25:13.164498Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:25:13.194765Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-30T09:25:13.194794Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-30T09:25:13.216019Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-30T09:25:13.216049Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-30T09:25:13.216089Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-30T09:25:13.216094Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-30T09:25:13.216100Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-30T09:25:13.216106Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-30T09:25:13.216111Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-30T09:25:13.216118Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-30T09:25:13.216276Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-30T09:25:13.216336Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:25:13.218883Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7521671056282793576:2205] 2025-06-30T09:25:13.218918Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-30T09:25:13.220321Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-30T09:25:13.220335Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-30T09:25:13.220350Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-30T09:25:13.221601Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7521671056282793720:2297] txid# 281474976720657, issues: { message: "Schemeshard not available" severity: 1 } 2025-06-30T09:25:13.222676Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 20 Issues { message: "Schemeshard not available" severity: 1 } SchemeShardStatus: 13 SchemeShardReason: "Schemeshard not available" } 2025-06-30T09:25:13.227319Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-30T09:25:13.265197Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7960: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:25:13.265219Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7990: ConnectToSA(), pipe client id: [2:7521671056282793791:2287], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-30T09:25:13.268592Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7521671056282793836:2352] 2025-06-30T09:25:13.268614Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7521671056282793836:2352], schemeshard id = 72075186224037897 2025-06-30T09:25:13.305596Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-30T09:25:13.300723Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671055173645147:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:13.300767Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:25:13.307975Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720658:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:25:13.320038Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720658 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-30T09:25:13.320076Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720658 2025-06-30T09:25:13.386210Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:25:13.455048Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037904;self_id=[2:7521671056282794150:2333];tablet_id=72075186224037904;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:25:13.455166Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037904;self_id=[2:7521671056282794150:2333];tablet_id=72075186224037904;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:25:13.455281Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037904;self_id=[2:7521671056282794150:2333];tablet_id=72075186224037904;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:25:13.455311Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037904;self_id=[2:7521671056282794150:2333];tablet_id=72075186224037904;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:25:13.455334Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037904;self_id=[2:7521671056282794150:2333];tablet_id=72075186224037904;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;descriptio ... fier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:27:27.162165Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:27:27.162169Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:27:27.162221Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10589 2025-06-30T09:27:27.214489Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:27:27.214540Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:27:27.222439Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10589 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:27:27.265388Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:27:27.271296Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-30T09:27:27.287606Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:27:27.327852Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:27:27.399708Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:27:27.467311Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:27:27.558103Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671632966835314:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:27:27.558139Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:27:27.562830Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:27:27.572314Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:27:27.585815Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:27:27.599830Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:27:27.660363Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:27:27.675585Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:27:27.691075Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:27:27.727964Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671632966835969:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:27:27.727991Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:27:27.728105Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521671632966835974:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:27:27.729152Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:27:27.735507Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-30T09:27:27.735588Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521671632966835976:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-30T09:27:27.809387Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521671632966836027:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Logs"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Limit","Limit":"10"},{"Scan":"Parallel","ReadRange":["App (new_app_1)","Ts (49)","Host (null, xyz)"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/Logs","E-Rows":"1","Table":"Logs","ReadColumns":["App","Host","Message","Ts"],"E-Cost":"0"}],"Node Type":"Limit-TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":2}],"Name":"Limit","Limit":"10"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Logs","reads":[{"lookup_by":["App (new_app_1)","Ts (49)"],"columns":["App","Host","Message","Ts"],"scan_by":["Host (null, xyz)"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Operators":[{"Scan":"Parallel","ReadRange":["App (new_app_1)","Ts (49)","Host (null, xyz)"],"E-Size":"0","Name":"TableRangeScan","Path":"\/Root\/Logs","E-Rows":"1","Table":"Logs","ReadColumns":["App","Host","Message","Ts"],"E-Cost":"0"}],"Node Type":"TableRangeScan"}],"Operators":[{"Name":"Limit","Limit":"10"}],"Node Type":"Limit"}],"Operators":[{"Name":"Limit","Limit":"10"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} >> test_postgres.py::TestPGSQL::test_sql_suite[plan-select_distinct.test] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Table [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration [GOOD] |85.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join0.test] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1-pk_types9-all_types9-index9-Uint8] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0-pk_types10-all_types10-index10-Uint8] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-case.test] [GOOD] >> TxUsage::WriteToTopic_Demo_47_Table [GOOD] |85.6%| [TA] $(B)/ydb/core/kqp/ut/query/test-results/unittest/{meta.json ... results_accumulator.log} |85.6%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/query/test-results/unittest/{meta.json ... results_accumulator.log} |85.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> TxUsage::WriteToTopic_Demo_47_Query >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-10.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-11.test] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1-pk_types15-all_types15-index15-Int8] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-1.test] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0-pk_types16-all_types16-index16-Int8] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-15.test] |85.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration [GOOD] |85.6%| [TA] $(B)/ydb/core/fq/libs/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.6%| [TA] {RESULT} $(B)/ydb/core/fq/libs/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_postgres.py::TestPGSQL::test_sql_suite[results-create_table.test] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1-pk_types9-all_types9-index9-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types_float-pk_types5-all_types5-index5-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join4.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-select.test] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types-pk_types17-all_types17-index17-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_postgres.py::TestPGSQL::test_sql_suite[results-abstime.test] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3_float-pk_types1-all_types1-index1-Float] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0-pk_types16-all_types16-index16-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_postgres.py::TestPGSQL::test_sql_suite[results-select_distinct.test] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2-pk_types14-all_types14-index14-Int8] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-insert.test] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3-pk_types7-all_types7-index7-Uint8] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types-pk_types11-all_types11-index11-Uint8] [GOOD] |85.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-6.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-7.test] >> test_postgres.py::TestPGSQL::test_sql_suite[results-abstime.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-boolean.test] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0-pk_types10-all_types10-index10-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0-pk_types16-all_types16-index16-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> LabeledDbCounters::OneTabletRestart [GOOD] >> LabeledDbCounters::TwoTablets >> test_postgres.py::TestPGSQL::test_sql_suite[results-boolean.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-case.test] >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Table [GOOD] |85.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4-pk_types12-all_types12-index12-Int8] [GOOD] |85.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join-group-by-with-null.test] [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query [GOOD] |85.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join3.test] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query [GOOD] Test command err: 2025-06-30T09:26:08.123146Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671293514746537:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:08.125350Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a02/r3tmp/tmpo00mY8/pdisk_1.dat 2025-06-30T09:26:08.199300Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:26:08.351536Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24967, node 1 2025-06-30T09:26:08.409837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:08.409898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:08.411933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:08.559048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004a02/r3tmp/yandexiugMj6.tmp 2025-06-30T09:26:08.559065Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004a02/r3tmp/yandexiugMj6.tmp 2025-06-30T09:26:08.559167Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004a02/r3tmp/yandexiugMj6.tmp 2025-06-30T09:26:08.559221Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:08.705880Z INFO: TTestServer started on Port 4295 GrpcPort 24967 TClient is connected to server localhost:4295 PQClient connected to localhost:24967 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:08.815250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:26:08.820598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:26:08.835920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:08.913210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:26:08.920224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-30T09:26:08.970697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671293514747232:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.970730Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.971170Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671293514747259:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.973521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:08.976885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-30T09:26:08.977022Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671293514747261:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:26:09.048387Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671297809714621:2436] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:09.139529Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:09.155049Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521671297809714629:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:26:09.163214Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=MjIyMmU0NTAtNDNhMDg0NWEtNWM4ZmJlNDMtZTNlZTYzMDY=, ActorId: [1:7521671293514747229:2296], ActorState: ExecuteState, TraceId: 01jz02gzt9bkcx73f6etkytcs6, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:26:09.167371Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:26:09.181241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:09.193786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:09.262466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521671297809714919:2611] 2025-06-30T09:26:13.123737Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521671293514746537:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:13.123782Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:26:14.580603Z :Sinks_Oltp_WriteToTopicAndTable_1_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:26:14.600702Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:26:14.626837Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:26:14.627149Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:26:14.627200Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521671319284551613:2704] connected; active server actors: 1 2025-06-30T09:26:14.627287Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:26:14.627547Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:26:14.627586Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:26:14.62760 ... 30T09:27:44.847631Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer test-consumer session test-consumer_11_3_5425843312707150828_v1 grpc read done: success# 0, data# { } 2025-06-30T09:27:44.847647Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer test-consumer session test-consumer_11_3_5425843312707150828_v1 grpc read failed 2025-06-30T09:27:44.847656Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer test-consumer session test-consumer_11_3_5425843312707150828_v1 grpc closed 2025-06-30T09:27:44.847666Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer test-consumer session test-consumer_11_3_5425843312707150828_v1 is DEAD 2025-06-30T09:27:44.847932Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [11:7521671696232222477:2595]: session cookie 4 consumer test-consumer session test-consumer_11_3_5425843312707150828_v1 grpc read done: success# 0, data# { } 2025-06-30T09:27:44.847936Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [11:7521671696232222477:2595]: session cookie 4 consumer test-consumer session test-consumer_11_3_5425843312707150828_v1grpc read failed 2025-06-30T09:27:44.847941Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [11:7521671696232222477:2595]: session cookie 4 consumer test-consumer session test-consumer_11_3_5425843312707150828_v1 grpc closed 2025-06-30T09:27:44.847944Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [11:7521671696232222477:2595]: session cookie 4 consumer test-consumer session test-consumer_11_3_5425843312707150828_v1 proxy is DEAD 2025-06-30T09:27:44.848341Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037897][topic_B] pipe [11:7521671696232222469:2590] disconnected; active server actors: 1 2025-06-30T09:27:44.848345Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037897][topic_B] pipe [11:7521671696232222469:2590] client test-consumer disconnected session test-consumer_11_3_5425843312707150828_v1 2025-06-30T09:27:44.848370Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037896] Destroy direct read session test-consumer_11_3_5425843312707150828_v1 2025-06-30T09:27:44.848375Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521671696232222472:2593] destroyed 2025-06-30T09:27:44.848386Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_3_5425843312707150828_v1 2025-06-30T09:27:44.862813Z :INFO: [/Root] [/Root] [3a5e5e5e-3877a781-7de3bc3a-1835f122] Closing read session. Close timeout: 0.000000s 2025-06-30T09:27:44.862848Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:0:1 2025-06-30T09:27:44.862860Z :INFO: [/Root] [/Root] [3a5e5e5e-3877a781-7de3bc3a-1835f122] Counters: { Errors: 0 CurrentSessionLifetimeMs: 4143 BytesRead: 144 MessagesRead: 1 BytesReadCompressed: 144 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:27:44.862885Z :NOTICE: [/Root] [/Root] [3a5e5e5e-3877a781-7de3bc3a-1835f122] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:27:44.862896Z :DEBUG: [/Root] [/Root] [3a5e5e5e-3877a781-7de3bc3a-1835f122] [] Abort session to cluster 2025-06-30T09:27:44.863072Z :DEBUG: [/Root] 0x00003166FFC16D90 TDirectReadSessionManager ServerSessionId=test-consumer_11_1_10484187572654164654_v1 Close 2025-06-30T09:27:44.863125Z :DEBUG: [/Root] 0x00003166FFC16D90 TDirectReadSessionManager ServerSessionId=test-consumer_11_1_10484187572654164654_v1 Close 2025-06-30T09:27:44.863142Z :NOTICE: [/Root] [/Root] [3a5e5e5e-3877a781-7de3bc3a-1835f122] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:27:44.866819Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_10484187572654164654_v1 grpc read done: success# 0, data# { } 2025-06-30T09:27:44.866838Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_11_1_10484187572654164654_v1 grpc read failed 2025-06-30T09:27:44.866847Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_11_1_10484187572654164654_v1 grpc closed 2025-06-30T09:27:44.866858Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_11_1_10484187572654164654_v1 is DEAD 2025-06-30T09:27:44.867182Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [11:7521671687642287789:2553]: session cookie 2 consumer test-consumer session test-consumer_11_1_10484187572654164654_v1 grpc read done: success# 0, data# { } 2025-06-30T09:27:44.867189Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [11:7521671687642287789:2553]: session cookie 2 consumer test-consumer session test-consumer_11_1_10484187572654164654_v1grpc read failed 2025-06-30T09:27:44.867197Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [11:7521671687642287789:2553]: session cookie 2 consumer test-consumer session test-consumer_11_1_10484187572654164654_v1 grpc closed 2025-06-30T09:27:44.867201Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [11:7521671687642287789:2553]: session cookie 2 consumer test-consumer session test-consumer_11_1_10484187572654164654_v1 proxy is DEAD 2025-06-30T09:27:44.867564Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [11:7521671687642287781:2548] disconnected; active server actors: 1 2025-06-30T09:27:44.867569Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [11:7521671687642287781:2548] client test-consumer disconnected session test-consumer_11_1_10484187572654164654_v1 2025-06-30T09:27:44.867597Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_11_1_10484187572654164654_v1 2025-06-30T09:27:44.867602Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521671687642287784:2551] destroyed 2025-06-30T09:27:44.867614Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_1_10484187572654164654_v1 2025-06-30T09:27:44.874961Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|cd2e8cc6-ae3dbfe5-491f7757-c720b696_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:27:44.874977Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|cd2e8cc6-ae3dbfe5-491f7757-c720b696_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:27:44.874987Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|cd2e8cc6-ae3dbfe5-491f7757-c720b696_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:27:44.875190Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|cd2e8cc6-ae3dbfe5-491f7757-c720b696_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:27:44.875197Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|cd2e8cc6-ae3dbfe5-491f7757-c720b696_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:27:44.877722Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message_group_id|cd2e8cc6-ae3dbfe5-491f7757-c720b696_0 grpc read done: success: 0 data: 2025-06-30T09:27:44.877738Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message_group_id|cd2e8cc6-ae3dbfe5-491f7757-c720b696_0 grpc read failed 2025-06-30T09:27:44.877749Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message_group_id|cd2e8cc6-ae3dbfe5-491f7757-c720b696_0 grpc closed 2025-06-30T09:27:44.877753Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message_group_id|cd2e8cc6-ae3dbfe5-491f7757-c720b696_0 is DEAD 2025-06-30T09:27:44.878123Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:27:44.878131Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:27:44.878189Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521671687642287647:2519] destroyed 2025-06-30T09:27:44.878195Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [11:7521671687642287650:2519] destroyed 2025-06-30T09:27:44.878206Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:27:44.882905Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|eab3c0cd-bd62bc43-ac2f68d6-e85e6ab_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-30T09:27:44.882923Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|eab3c0cd-bd62bc43-ac2f68d6-e85e6ab_0] PartitionId [0] Generation [1] Write session will now close 2025-06-30T09:27:44.882932Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|eab3c0cd-bd62bc43-ac2f68d6-e85e6ab_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-30T09:27:44.883154Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|eab3c0cd-bd62bc43-ac2f68d6-e85e6ab_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-30T09:27:44.883164Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|eab3c0cd-bd62bc43-ac2f68d6-e85e6ab_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-30T09:27:44.883651Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|eab3c0cd-bd62bc43-ac2f68d6-e85e6ab_0 grpc read done: success: 0 data: 2025-06-30T09:27:44.883678Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|eab3c0cd-bd62bc43-ac2f68d6-e85e6ab_0 grpc read failed 2025-06-30T09:27:44.883688Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|eab3c0cd-bd62bc43-ac2f68d6-e85e6ab_0 grpc closed 2025-06-30T09:27:44.883692Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|eab3c0cd-bd62bc43-ac2f68d6-e85e6ab_0 is DEAD 2025-06-30T09:27:44.884013Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:27:44.884022Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:27:44.886869Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521671687642287595:2508] destroyed 2025-06-30T09:27:44.886889Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521671687642287598:2508] destroyed 2025-06-30T09:27:44.886904Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4_float-pk_types0-all_types0-index0-Float] [GOOD] >> test_stream_query.py::TestStreamQuery::test_sql_suite[plan-window.test] >> test_postgres.py::TestPGSQL::test_sql_suite[results-create_table.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/coalesce-and-join.test] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2_float-pk_types2-all_types2-index2-Float] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-15.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-2.test] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1_float-pk_types3-all_types3-index3-Float] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-case.test] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2_float-pk_types2-all_types2-index2-Float] [GOOD] >> test_stream_query.py::TestStreamQuery::test_sql_suite[results-window.test] >> TxUsage::WriteToTopic_Demo_47_Query [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-insert.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-1.test] >> TxUsage::WriteToTopic_Demo_50_Table |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[plan-case.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-11.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-12.test] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1-pk_types15-all_types15-index15-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1-pk_types15-all_types15-index15-Int8] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2-pk_types14-all_types14-index14-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/coalesce-and-join.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join-group-by-with-null.test] |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types-pk_types11-all_types11-index11-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-1.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-2.test] >> YdbSdkSessionsPool::StressTestAsync/1 [GOOD] |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2-pk_types8-all_types8-index8-Uint8] [GOOD] |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3-pk_types13-all_types13-index13-Int8] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3_float-pk_types1-all_types1-index1-Float] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4-pk_types12-all_types12-index12-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3_float-pk_types1-all_types1-index1-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4-pk_types12-all_types12-index12-Int8] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2-pk_types14-all_types14-index14-Int8] [GOOD] |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestAsync/1 [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join-group-by-with-null.test] [GOOD] |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query [GOOD] |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join3.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join4.test] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3-pk_types7-all_types7-index7-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> TxUsage::WriteToTopic_Demo_50_Table [GOOD] >> TxUsage::WriteToTopic_Demo_50_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1_float-pk_types3-all_types3-index3-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query [GOOD] Test command err: 2025-06-30T09:26:08.542550Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671291909405228:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:08.542788Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/0049fe/r3tmp/tmpZ1IaTu/pdisk_1.dat 2025-06-30T09:26:08.587366Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-30T09:26:08.614625Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:08.615165Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671291909405012:2080] 1751275568539946 != 1751275568539949 TServer::EnableGrpc on GrpcPort 7421, node 1 2025-06-30T09:26:08.631931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/0049fe/r3tmp/yandexte3Fku.tmp 2025-06-30T09:26:08.631944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/0049fe/r3tmp/yandexte3Fku.tmp 2025-06-30T09:26:08.632030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/0049fe/r3tmp/yandexte3Fku.tmp 2025-06-30T09:26:08.632081Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:08.643329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:08.643382Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:08.644958Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:26:08.705859Z INFO: TTestServer started on Port 7387 GrpcPort 7421 TClient is connected to server localhost:7387 PQClient connected to localhost:7421 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:08.795973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:26:08.813625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-30T09:26:08.820011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-30T09:26:08.909781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:26:08.970774Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671291909405787:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.970805Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.971009Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671291909405799:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:08.973493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:08.976451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-30T09:26:08.976517Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671291909405801:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-30T09:26:09.130334Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671296204373162:2437] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:09.155993Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521671296204373170:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:26:09.163684Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=Mzc0YzU0ODgtOTU0MTQwNzItMjhkNzgyMzUtYjIwMmI2OWU=, ActorId: [1:7521671291909405784:2296], ActorState: ExecuteState, TraceId: 01jz02gzt961x1ge81qyajbbv3, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:26:09.171468Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:26:09.179876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:09.204463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:09.250927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521671296204373441:2611] 2025-06-30T09:26:09.542259Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:13.542461Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521671291909405228:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:13.542533Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:26:14.673556Z :WriteToTopic_Demo_18_RestartNo_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:26:14.687193Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:26:14.707851Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521671317679210131:2697] connected; active server actors: 1 2025-06-30T09:26:14.707939Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:26:14.708046Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-30T09:26:14.711174Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:26:14.711301Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-30T09:26:14.711396Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-30T09:26:14.711401Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxI ... ets [0, 12). Partition stream id: 1 2025-06-30T09:27:52.002644Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 grpc read done: success# 1, data# { commit_offset_request { commit_offsets { partition_session_id: 1 offsets { end: 12 } } } } 2025-06-30T09:27:52.002815Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) committing to position 12 prev 0 end 12 by cookie 2 2025-06-30T09:27:52.002973Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-30T09:27:52.002985Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037894] got client message batch for topic 'topic_A' partition 0 2025-06-30T09:27:52.003045Z node 11 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 12 (startOffset 12) session test-consumer_11_1_5090248123557553787_v1 2025-06-30T09:27:52.003094Z node 11 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-30T09:27:52.003618Z node 11 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 12 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-30T09:27:52.003642Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-30T09:27:52.003641Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-30T09:27:52.003656Z node 11 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 0, State: StateIdle] no data for compaction 2025-06-30T09:27:52.003680Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-30T09:27:52.003696Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 12 endOffset 12 with cookie 2 2025-06-30T09:27:52.003710Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 12 2025-06-30T09:27:52.003974Z :DEBUG: [/Root] [/Root] [12699274-9c3720d9-b950515c-afa39543] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 12 } } 2025-06-30T09:27:52.939912Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:11:12 2025-06-30T09:27:52.939943Z :INFO: [/Root] [/Root] [12699274-9c3720d9-b950515c-afa39543] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1000 BytesRead: 15000000 MessagesRead: 12 BytesReadCompressed: 15000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:27:52.944062Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 checking auth because of timeout 2025-06-30T09:27:52.944115Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 auth for : test-consumer 2025-06-30T09:27:52.944479Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 Handle describe topics response 2025-06-30T09:27:52.944509Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 auth is DEAD 2025-06-30T09:27:52.944532Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:27:52.950981Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 checking auth because of timeout 2025-06-30T09:27:52.951013Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 auth for : test-consumer 2025-06-30T09:27:52.951759Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 Handle describe topics response 2025-06-30T09:27:52.951785Z node 11 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 auth is DEAD 2025-06-30T09:27:52.951807Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 auth ok: topics# 1, initDone# 1 2025-06-30T09:27:53.942657Z :INFO: [/Root] [/Root] [12699274-9c3720d9-b950515c-afa39543] Closing read session. Close timeout: 0.000000s 2025-06-30T09:27:53.942676Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:11:12 2025-06-30T09:27:53.942688Z :INFO: [/Root] [/Root] [12699274-9c3720d9-b950515c-afa39543] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2003 BytesRead: 15000000 MessagesRead: 12 BytesReadCompressed: 15000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:27:53.942711Z :NOTICE: [/Root] [/Root] [12699274-9c3720d9-b950515c-afa39543] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-30T09:27:53.942721Z :DEBUG: [/Root] [/Root] [12699274-9c3720d9-b950515c-afa39543] [] Abort session to cluster 2025-06-30T09:27:53.942957Z :DEBUG: [/Root] 0x000073DA799BF290 TDirectReadSessionManager ServerSessionId=test-consumer_11_1_5090248123557553787_v1 Close 2025-06-30T09:27:53.943025Z :DEBUG: [/Root] 0x000073DA799BF290 TDirectReadSessionManager ServerSessionId=test-consumer_11_1_5090248123557553787_v1 Close 2025-06-30T09:27:53.943045Z :NOTICE: [/Root] [/Root] [12699274-9c3720d9-b950515c-afa39543] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-30T09:27:53.943280Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|8ff14fd5-2d3e18c8-b08d959-b6281871_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2025-06-30T09:27:53.943285Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|8ff14fd5-2d3e18c8-b08d959-b6281871_0] PartitionId [0] Generation [2] Write session will now close 2025-06-30T09:27:53.943292Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|8ff14fd5-2d3e18c8-b08d959-b6281871_0] PartitionId [0] Generation [2] Write session: aborting 2025-06-30T09:27:53.943366Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|8ff14fd5-2d3e18c8-b08d959-b6281871_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2025-06-30T09:27:53.943372Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|8ff14fd5-2d3e18c8-b08d959-b6281871_0] PartitionId [0] Generation [2] Write session: destroy 2025-06-30T09:27:53.945171Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 grpc read done: success# 0, data# { } 2025-06-30T09:27:53.945185Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 grpc read failed 2025-06-30T09:27:53.945192Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 grpc closed 2025-06-30T09:27:53.945200Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 is DEAD 2025-06-30T09:27:53.945437Z node 11 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [11:7521671733779445869:2512]: session cookie 2 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 grpc read done: success# 0, data# { } 2025-06-30T09:27:53.945440Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [11:7521671733779445869:2512]: session cookie 2 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1grpc read failed 2025-06-30T09:27:53.945445Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [11:7521671733779445869:2512]: session cookie 2 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 grpc closed 2025-06-30T09:27:53.945449Z node 11 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [11:7521671733779445869:2512]: session cookie 2 consumer test-consumer session test-consumer_11_1_5090248123557553787_v1 proxy is DEAD 2025-06-30T09:27:53.945564Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|8ff14fd5-2d3e18c8-b08d959-b6281871_0 grpc read done: success: 0 data: 2025-06-30T09:27:53.945567Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|8ff14fd5-2d3e18c8-b08d959-b6281871_0 grpc read failed 2025-06-30T09:27:53.945574Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|8ff14fd5-2d3e18c8-b08d959-b6281871_0 grpc closed 2025-06-30T09:27:53.945576Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|8ff14fd5-2d3e18c8-b08d959-b6281871_0 is DEAD 2025-06-30T09:27:53.945741Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:27:53.946088Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [11:7521671733779445860:2506] disconnected; active server actors: 1 2025-06-30T09:27:53.946103Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [11:7521671733779445860:2506] client test-consumer disconnected session test-consumer_11_1_5090248123557553787_v1 2025-06-30T09:27:53.946149Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521671725189511188:2478] destroyed 2025-06-30T09:27:53.946157Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_11_1_5090248123557553787_v1 2025-06-30T09:27:53.946177Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [11:7521671733779445863:2509] destroyed 2025-06-30T09:27:53.946192Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:27:53.946226Z node 11 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_11_1_5090248123557553787_v1 >> test_counters.py::TestSqsCountersFeatures::test_creates_counter[tables_format_v0] |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.7%| [TA] $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/test-results/gtest/{meta.json ... results_accumulator.log} |85.7%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/test-results/gtest/{meta.json ... results_accumulator.log} >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-7.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-8.test] |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1-pk_types15-all_types15-index15-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0_float-pk_types4-all_types4-index4-Float] [GOOD] |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2-pk_types8-all_types8-index8-Uint8] [GOOD] |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4_float-pk_types0-all_types0-index0-Float] [GOOD] |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4_float-pk_types0-all_types0-index0-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2_float-pk_types2-all_types2-index2-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4-pk_types6-all_types6-index6-Uint8] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2_float-pk_types2-all_types2-index2-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_counters.py::TestSqsCountersFeatures::test_creates_counter[tables_format_v0] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_creates_counter[tables_format_v1] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[results-case.test] [GOOD] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_counters.py::TestSqsCountersFeatures::test_creates_counter[tables_format_v1] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_detailed_counters[queue] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[none] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[block-4-2] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4-pk_types12-all_types12-index12-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2-pk_types14-all_types14-index14-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> TxUsage::WriteToTopic_Demo_50_Query [GOOD] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4-pk_types6-all_types6-index6-Uint8] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4_float-pk_types0-all_types0-index0-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_50_Query [GOOD] Test command err: 2025-06-30T09:26:08.054793Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521671294282227535:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:08.054973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-30T09:26:08.199305Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/004a16/r3tmp/tmpywAKST/pdisk_1.dat 2025-06-30T09:26:08.355052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:26:08.355109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:26:08.355239Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:26:08.356718Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521671294282227340:2080] 1751275568028593 != 1751275568028596 2025-06-30T09:26:08.366369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64195, node 1 2025-06-30T09:26:08.559048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/cl3w/004a16/r3tmp/yandexaSRC2q.tmp 2025-06-30T09:26:08.559065Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/cl3w/004a16/r3tmp/yandexaSRC2q.tmp 2025-06-30T09:26:08.559167Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/cl3w/004a16/r3tmp/yandexaSRC2q.tmp 2025-06-30T09:26:08.559222Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:26:08.706411Z INFO: TTestServer started on Port 15844 GrpcPort 64195 TClient is connected to server localhost:15844 PQClient connected to localhost:64195 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:26:08.800883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-30T09:26:08.814698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:26:08.819962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-30T09:26:08.910560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-30T09:26:08.922013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-30T09:26:09.056467Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:26:09.061742Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671298577195408:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:09.061787Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:09.062901Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521671298577195436:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:26:09.064352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:26:09.073894Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7521671298577195438:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-30T09:26:09.141515Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7521671298577195502:2439] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:26:09.165350Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7521671298577195527:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-30T09:26:09.166163Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2245: SessionId: ydb://session/3?node_id=1&id=NWNjMTY3LWUzMWNmY2JkLTRhMWUzZDVjLTFjZmNhNmI4, ActorId: [1:7521671298577195388:2295], ActorState: ExecuteState, TraceId: 01jz02gzx5dham1yg794g28fqz, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-30T09:26:09.170927Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-30T09:26:09.179122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:09.202165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:26:09.235204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7521671298577195788:2614] 2025-06-30T09:26:13.047360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7521671294282227535:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:26:13.047417Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-30T09:26:14.558852Z :WriteToTopic_Demo_24_Table INFO: TTopicSdkTestSetup started 2025-06-30T09:26:14.594958Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-30T09:26:14.610837Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7521671320052032471:2700] connected; active server actors: 1 2025-06-30T09:26:14.610950Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-30T09:26:14.611253Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:26:14.612775Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-30T09:26:14.614807Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-30T09:26:14.617586Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 720751862240 ... nd: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "test-topic" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/test-topic" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7521671746157495217 RawX2: 47244642413 } Partitions { Partition { PartitionId: 0 } } 2025-06-30T09:28:00.955130Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:28:00.956404Z node 11 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-30T09:28:00.957117Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-30T09:28:00.957152Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:28:00.957156Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-30T09:28:00.957158Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715674, State EXECUTED 2025-06-30T09:28:00.957161Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976715674 State EXECUTED FrontTxId 281474976715674 2025-06-30T09:28:00.957165Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-30T09:28:00.957167Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715674, NewState WAIT_RS_ACKS 2025-06-30T09:28:00.957169Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976715674 moved from EXECUTED to WAIT_RS_ACKS 2025-06-30T09:28:00.957173Z node 11 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715674] PredicateAcks: 0/0 2025-06-30T09:28:00.957175Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-30T09:28:00.957177Z node 11 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715674] PredicateAcks: 0/0 2025-06-30T09:28:00.957179Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037892] add an TxId 281474976715674 to the list for deletion 2025-06-30T09:28:00.957181Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715674, NewState DELETING 2025-06-30T09:28:00.957186Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037892] delete key for TxId 281474976715674 2025-06-30T09:28:00.957196Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-30T09:28:00.957531Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-30T09:28:00.957537Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-30T09:28:00.957539Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715674, State DELETING 2025-06-30T09:28:00.957543Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037892] delete TxId 281474976715674 2025-06-30T09:28:00.971682Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-30T09:28:00.971873Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-30T09:28:00.971880Z :INFO: [/Root] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:23582 2025-06-30T09:28:00.973441Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-30T09:28:00.973814Z node 11 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-30T09:28:00.973828Z node 11 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-30T09:28:00.973982Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-30T09:28:00.974006Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:33826 2025-06-30T09:28:00.974011Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:33826 proto=v1 topic=test-topic durationSec=0 2025-06-30T09:28:00.974014Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-30T09:28:00.975114Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-30T09:28:00.975154Z node 11 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-30T09:28:00.975156Z node 11 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-30T09:28:00.975158Z node 11 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-30T09:28:00.975173Z node 11 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [11:7521671771927300113:2427] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-30T09:28:00.975178Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-30T09:28:00.975486Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:821: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 11, Generation: 1 2025-06-30T09:28:00.975505Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [11:7521671771927300116:2427], now have 1 active actors on pipe 2025-06-30T09:28:00.975583Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-30T09:28:00.975592Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-30T09:28:00.975627Z node 11 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|66b0db51-5305f349-5fb31e22-647eb6a1_0 generated for partition 0 topic 'test-topic' owner src 2025-06-30T09:28:00.975666Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-30T09:28:00.975687Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:28:00.975783Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-30T09:28:00.975787Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-30T09:28:00.975803Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-30T09:28:00.975821Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|66b0db51-5305f349-5fb31e22-647eb6a1_0 2025-06-30T09:28:00.976123Z :INFO: [/Root] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1751275680976 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-30T09:28:00.976144Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|66b0db51-5305f349-5fb31e22-647eb6a1_0" topic: "test-topic" 2025-06-30T09:28:00.976257Z :INFO: [/Root] MessageGroupId [src] SessionId [src|66b0db51-5305f349-5fb31e22-647eb6a1_0] Write session: close. Timeout = 0 ms 2025-06-30T09:28:00.976263Z :INFO: [/Root] MessageGroupId [src] SessionId [src|66b0db51-5305f349-5fb31e22-647eb6a1_0] Write session will now close 2025-06-30T09:28:00.976268Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|66b0db51-5305f349-5fb31e22-647eb6a1_0] Write session: aborting 2025-06-30T09:28:00.976359Z :INFO: [/Root] MessageGroupId [src] SessionId [src|66b0db51-5305f349-5fb31e22-647eb6a1_0] Write session: gracefully shut down, all writes complete 2025-06-30T09:28:00.976364Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|66b0db51-5305f349-5fb31e22-647eb6a1_0] Write session: destroy 2025-06-30T09:28:00.976683Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|66b0db51-5305f349-5fb31e22-647eb6a1_0 grpc read done: success: 0 data: 2025-06-30T09:28:00.976690Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|66b0db51-5305f349-5fb31e22-647eb6a1_0 grpc read failed 2025-06-30T09:28:00.976696Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|66b0db51-5305f349-5fb31e22-647eb6a1_0 grpc closed 2025-06-30T09:28:00.976699Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|66b0db51-5305f349-5fb31e22-647eb6a1_0 is DEAD 2025-06-30T09:28:00.976863Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:561: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-30T09:28:00.977235Z node 11 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [11:7521671771927300116:2427] destroyed 2025-06-30T09:28:00.977250Z node 11 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-30T09:28:00.982928Z :WriteToTopic_Demo_50_Query INFO: Topic created |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_counters.py::TestSqsCountersFeatures::test_detailed_counters[queue] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_detailed_counters[user] >> test_pdisk_format_info.py::TestPDiskInfo::test_read_disk_state ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2-pk_types8-all_types8-index8-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-2.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-3.test] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-1.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-2.test] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TA] $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3-pk_types13-all_types13-index13-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3_float-pk_types1-all_types1-index1-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |85.8%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_counters.py::TestSqsCountersFeatures::test_detailed_counters[user] [GOOD] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2-pk_types8-all_types8-index8-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_pdisk_format_info.py::TestPDiskInfo::test_read_disk_state [GOOD] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3-pk_types13-all_types13-index13-Int8] [GOOD] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3-dc] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join-group-by-with-null.test] [GOOD] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-2.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-3.test] >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test[read_update_write_load] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-12.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-13.test] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_scheme_load.py::TestSchemeLoad::test[create_and_drop_tables] >> test_alter_tiering.py::TestAlterTiering::test[many_tables] >> test_alter_compression.py::TestAlterCompression::test[alter_compression] >> test_simple.py::TestSimple::test[alter_table] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_async_table-True] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_table-True] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join4.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-select.test] >> test_self_heal.py::TestEnableSelfHeal::test_replication >> test_postgres.py::TestPGSQL::test_sql_suite[plan-select.test] [GOOD] |85.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_in_new_channel_then_can_read_from_tablet |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> overlapping_portions.py::TestOverlappingPortions::test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0_float-pk_types4-all_types4-index4-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[none] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_async_table-True] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_in_new_channel_then_can_read_from_tablet [GOOD] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[none] [GOOD] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4-pk_types6-all_types6-index6-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_simple.py::TestSimple::test[alter_table] [GOOD] >> test_simple.py::TestSimple::test[alter_tablestore] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_simple_table-False] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_pdisk_format_info.py::TestPDiskInfo::test_read_disk_state [GOOD] >> test_insert.py::TestInsert::test[read_data_during_bulk_upsert] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-8.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-9.test] >> test_simple.py::TestSimple::test[alter_tablestore] [GOOD] >> test_simple.py::TestSimple::test[table] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4-pk_types6-all_types6-index6-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_simple_table-False] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3] [GOOD] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3-pk_types7-all_types7-index7-Uint8] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_table-True] >> test_simple.py::TestSimple::test[table] [GOOD] >> test_simple.py::TestSimple::test[tablestores] >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_and_change_tablet_channel_then_can_read_from_tablet |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-5.test] [GOOD] >> test_simple.py::TestSimple::test[tablestores] [GOOD] >> test_simple.py::TestSimple::test_multi[alter_table] [GOOD] >> test_simple.py::TestSimple::test_multi[alter_tablestore] [GOOD] >> test_simple.py::TestSimple::test_multi[table] [GOOD] >> test_simple.py::TestSimple::test_multi[tablestores] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-6.test] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_self_heal.py::TestEnableSelfHeal::test_replication [GOOD] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3-pk_types13-all_types13-index13-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-3.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-4.test] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_in_new_channel_then_can_read_from_tablet [GOOD] >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_and_change_tablet_channel_then_can_read_from_tablet [GOOD] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_self_heal.py::TestEnableSelfHeal::test_replication [GOOD] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[plan-select.test] [GOOD] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_scheme_load.py::TestSchemeLoad::test[create_and_drop_tables] [GOOD] >> test_scheme_load.py::TestSchemeLoad::test_multi[create_and_drop_tables] [GOOD] |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_counters.py::TestSqsCountersFeatures::test_detailed_counters[user] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-2.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-3.test] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[block-4-2] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/medium/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3-pk_types7-all_types7-index7-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |85.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-13.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-14.test] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[block-4-2] [GOOD] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |86.0%| [TA] $(B)/ydb/tests/datashard/vector_index/medium/test-results/py3test/{meta.json ... results_accumulator.log} |86.0%| [TA] {RESULT} $(B)/ydb/tests/datashard/vector_index/medium/test-results/py3test/{meta.json ... results_accumulator.log} >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-3.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-4.test] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3-dc] [GOOD] >> test_bindings_0.py::TestBindings::test_binding_operations[v2-kikimr_settings0-client0] >> test_statistics.py::TestS3::test_egress[v2-client0-json_list] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-14.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-15.test] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_and_change_tablet_channel_then_can_read_from_tablet [GOOD] >> test_public_metrics.py::TestPublicMetrics::test_public_metrics[v2-client0] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3] [GOOD] >> test_streaming_join.py::TestStreamingJoin::test_grace_join[v1-client0] >> test_format_setting.py::TestS3::test_interval_unit[v2-client0] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_table-True] [GOOD] >> test_s3_0.py::TestS3::test_csv[v1-false-client0] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_async_table-True] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_simple_table-False] [GOOD] >> test_early_finish.py::TestEarlyFinish::test_early_finish[v1-client0] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-4.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-5.test] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_async_table-True] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_simple_table-False] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select1-1.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select1-2.test] >> test_yq_v2.py::TestS3::test_yqv2_enabled[v2-False-client0] >> test_bindings_1.py::TestBindings::test_s3_insert[v2-kikimr_settings0-client0] >> test_validation.py::TestS3::test_empty[v2-client0] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-9.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-insert.test] >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params0] >> test_statistics.py::TestS3::test_egress[v2-client0-json_list] [GOOD] >> test_statistics.py::TestS3::test_egress[v2-client0-json_each_row] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3-dc] [GOOD] |86.0%| [TA] $(B)/ydb/tests/functional/blobstorage/test-results/py3test/{meta.json ... results_accumulator.log} |86.0%| [TA] {RESULT} $(B)/ydb/tests/functional/blobstorage/test-results/py3test/{meta.json ... results_accumulator.log} >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-14.test] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_table-True] [GOOD] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_simple.py::TestSimple::test_multi[tablestores] [GOOD] >> test_statistics.py::TestS3::test_egress[v2-client0-json_each_row] [GOOD] >> test_statistics.py::TestS3::test_egress[v2-client0-csv_with_names] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-insert.test] [GOOD] >> test_public_metrics.py::TestPublicMetrics::test_public_metrics[v2-client0] [GOOD] >> test_public_metrics.py::TestPublicMetrics::test_public_metrics[v1-client0] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_s3_0.py::TestS3::test_csv[v1-false-client0] [GOOD] >> test_s3_0.py::TestS3::test_csv[v1-true-client0] >> test_statistics.py::TestS3::test_egress[v2-client0-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_egress[v2-client0-parquet] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-1.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-10.test] >> LabeledDbCounters::TwoTablets [GOOD] >> LabeledDbCounters::TwoTabletsKillOneTablet >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-3.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-4.test] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_format_setting.py::TestS3::test_interval_unit[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_interval_unit[v1-client0] >> test_statistics.py::TestS3::test_egress[v2-client0-parquet] [GOOD] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_scheme_load.py::TestSchemeLoad::test_multi[create_and_drop_tables] [GOOD] >> test_statistics.py::TestS3::test_egress[v1-client0-json_list] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_table-True] [GOOD] >> test_s3_0.py::TestS3::test_csv[v1-true-client0] [GOOD] >> test_s3_0.py::TestS3::test_csv[v2-false-client0] >> test_public_metrics.py::TestPublicMetrics::test_public_metrics[v1-client0] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-4.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-5.test] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-5.test] [GOOD] >> test_statistics.py::TestS3::test_egress[v1-client0-json_list] [GOOD] >> test_statistics.py::TestS3::test_egress[v1-client0-json_each_row] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-insert.test] [GOOD] >> test_yq_v2.py::TestS3::test_yqv2_enabled[v2-False-client0] [GOOD] >> test_yq_v2.py::TestS3::test_yqv2_enabled[v2-True-client0] >> test_format_setting.py::TestS3::test_interval_unit[v1-client0] [GOOD] >> test_format_setting.py::TestS3::test_bad_format_setting[v2-client0] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_async_table-True] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_simple_table-False] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_format_setting.py::TestS3::test_bad_format_setting[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_bad_format_setting[v1-client0] >> test_format_setting.py::TestS3::test_bad_format_setting[v1-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.csv-csv_with_names] >> test_bindings_1.py::TestBindings::test_s3_insert[v2-kikimr_settings0-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_s3_insert[v1-kikimr_settings0-client0] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_async_table-True] [GOOD] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-14.test] [GOOD] >> test_statistics.py::TestS3::test_egress[v1-client0-json_each_row] [GOOD] >> test_statistics.py::TestS3::test_egress[v1-client0-csv_with_names] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_simple_table-False] [GOOD] >> test_yq_v2.py::TestS3::test_yqv2_enabled[v2-True-client0] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select2-1.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select2-2.test] >> test_early_finish.py::TestEarlyFinish::test_early_finish[v1-client0] [GOOD] >> test_yq_v2.py::TestS3::test_removed_database_path[v2-client0] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_s3_0.py::TestS3::test_csv[v2-false-client0] [GOOD] >> test_s3_0.py::TestS3::test_csv[v2-true-client0] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.tsv-tsv_with_names] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-5.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-1.test] >> test_bindings_0.py::TestBindings::test_binding_operations[v2-kikimr_settings0-client0] [GOOD] >> test_statistics.py::TestS3::test_egress[v1-client0-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_egress[v1-client0-parquet] >> test_bindings_0.py::TestBindings::test_binding_operations[v1-kikimr_settings0-client0] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_s3_0.py::TestS3::test_csv[v2-true-client0] [GOOD] >> test_s3_0.py::TestS3::test_inference[v2-client0] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.tsv-tsv_with_names] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.json-json_each_row] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest >> HugeBlobOnlineSizeChange::Compaction 2025-06-30 09:28:50,407 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-30 09:28:50,551 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 129868 47.4M 46.9M 24.0M test_tool run_ut @/home/runner/.ya/build/build_root/cl3w/0048bb/ydb/core/blobstorage/ut_blobstorage/ut_huge/test-results/unittest/testing_out_stuff/chunk0/testing_out_stuff/te 131310 166M 164M 152M └─ ydb-core-blobstorage-ut_blobstorage-ut_huge --trace-path-append /home/runner/.ya/build/build_root/cl3w/0048bb/ydb/core/blobstorage/ut_blobstorage/ut_huge/test-results/un Test command err: fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 0 targetHuge2# 1 targetHuge3# 1 RandomSeed# 5191612657573421766 fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 0 targetHuge2# 1 targetHuge3# 0 fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 0 targetHuge2# 0 targetHuge3# 1 fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 0 targetHuge2# 0 targetHuge3# 0 fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 1 targetHuge2# 1 targetHuge3# 1 small blob# 1 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 1 targetHuge2# 1 targetHuge3# 0 small blob# 1 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 1 targetHuge2# 0 targetHuge3# 1 small blob# 1 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 1 targetHuge2# 0 targetHuge3# 0 small blob# 1 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 0 targetHuge2# 1 targetHuge3# 1 small blob# 0 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 0 targetHuge2# 1 targetHuge3# 0 small blob# 0 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 0 targetHuge2# 0 targetHuge3# 1 small blob# 0 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 1 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 1 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 2 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 0 targetHuge2# 0 targetHuge3# 0 small blob# 0 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 1 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 1 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 1 targetHuge2# 1 targetHuge3# 1 small blob# 0 writing partIdx# 0 to 6 small blob# 1 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 1 targetHuge2# 1 targetHuge3# 0 small blob# 0 writing partIdx# 0 to 6 small blob# 1 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 1 targetHuge2# 0 targetHuge3# 1 small blob# 0 writing partIdx# 0 to 6 small blob# 1 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 1 targetHuge2# 0 targetHuge3# 0 small blob# 0 writing partIdx# 0 to 6 small blob# 1 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 2 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 2 huge3# 0 targetHuge2# 1 targetHuge3# 1 small blob# 0 writing partIdx# 1 to 6 small blob# 1 compacting fresh checking parts in place mask# 2 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 2 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 2 huge3# 0 targetHuge2# 1 targetHuge3# 0 small blob# 0 writing partIdx# 1 to 6 small blob# 1 compacting fresh checking parts in place mask# 2 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 2 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 2 huge3# 0 targetHuge2# 0 targetHuge3# 1 small blob# 0 writing partIdx# 1 to 6 small blob# 0 compacting fresh checking parts in place mask# 2 {Location# {ChunkIdx: 1 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 2 {Location# {ChunkIdx: 1 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 2 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 2 huge3# 0 targetHuge2# 0 targetHuge3# 0 small blob# 0 writing partIdx# 1 to 6 small blob# 0 compacting fresh checking parts in place mask# 2 {Location# {ChunkIdx: 1 Offset: 12288 Size: 140} Database# 0 RecordType# 2 Blo ... Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 3 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 2 Level# 0} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 2 Level# 0} small blob# 0 compacting levels checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} fresh1# 6 fresh2# 1 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 1 targetHuge2# 0 targetHuge3# 1 small blob# 0 writing partIdx# 1 to 6 writing partIdx# 2 to 6 writing partIdx# 0 to 6 checking parts in place mask# 7 small blob# 1 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 3 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 2 Level# 0} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 2 Level# 0} small blob# 1 compacting levels checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} fresh1# 6 fresh2# 1 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 1 targetHuge2# 0 targetHuge3# 0 small blob# 0 writing partIdx# 1 to 6 writing partIdx# 2 to 6 writing partIdx# 0 to 6 checking parts in place mask# 7 small blob# 1 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 3 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 2 Level# 0} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 2 Level# 0} small blob# 0 compacting levels checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} fresh1# 6 fresh2# 1 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 0 targetHuge2# 1 targetHuge3# 1 small blob# 0 writing partIdx# 1 to 6 writing partIdx# 2 to 6 writing partIdx# 0 to 6 checking parts in place mask# 7 small blob# 1 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 3 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 2 Level# 0} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 2 Level# 0} small blob# 1 compacting levels checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} fresh1# 6 fresh2# 1 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 0 targetHuge2# 1 targetHuge3# 0 small blob# 0 writing partIdx# 1 to 6 writing partIdx# 2 to 6 writing partIdx# 0 to 6 checking parts in place mask# 7 small blob# 1 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 3 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 2 Level# 0} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 2 Level# 0} small blob# 0 compacting levels checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} fresh1# 6 fresh2# 1 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 0 targetHuge2# 0 targetHuge3# 1 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/cl3w/0048bb/ydb/core/blobstorage/ut_blobstorage/ut_huge/test-results/unittest/testing_out_stuff/chunk0/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/cl3w/0048bb/ydb/core/blobstorage/ut_blobstorage/ut_huge/test-results/unittest/testing_out_stuff/chunk0/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> test_statistics.py::TestS3::test_egress[v1-client0-parquet] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-json_list] >> test_postgres.py::TestPGSQL::test_sql_suite[results-select.test] [GOOD] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-15.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-2.test] |86.0%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/test-results/unittest/{meta.json ... results_accumulator.log} >> test_yq_v2.py::TestS3::test_removed_database_path[v2-client0] [GOOD] |86.0%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/test-results/unittest/{meta.json ... results_accumulator.log} >> test_yq_v2.py::TestS3::test_query_parameters[v2-client0] >> test_bindings_1.py::TestBindings::test_s3_insert[v1-kikimr_settings0-client0] [GOOD] >> test_s3_0.py::TestS3::test_inference[v2-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_s3_format_mismatch[v2-client0] >> test_s3_0.py::TestS3::test_inference_null_column[v2-client0] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.json-json_each_row] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.parquet-parquet] >> test_yq_v2.py::TestS3::test_query_parameters[v2-client0] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_table-True] [GOOD] >> test_push_down.py::TestS3PushDown::test_simple_case[v1-client0] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-json_list] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-json_each_row] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |86.0%| [TA] $(B)/ydb/tests/functional/rename/test-results/py3test/{meta.json ... results_accumulator.log} >> test_s3_0.py::TestS3::test_inference_null_column[v2-client0] [GOOD] |86.0%| [TA] {RESULT} $(B)/ydb/tests/functional/rename/test-results/py3test/{meta.json ... results_accumulator.log} >> test_s3_0.py::TestS3::test_inference_optional_types[v2-client0] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.csv-csv_with_names] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.csv-csv_with_names] [GOOD] >> test_s3_0.py::TestS3::test_inference_optional_types[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.tsv-tsv_with_names] >> test_s3_0.py::TestS3::test_inference_multiple_files[v2-client0] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-json_each_row] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-csv_with_names] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-5.test] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.json-json_each_row] >> test_bindings_0.py::TestBindings::test_binding_operations[v1-kikimr_settings0-client0] [GOOD] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v2-kikimr_settings0-client0] >> test_s3_0.py::TestS3::test_inference_multiple_files[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_inference_file_error[v2-client0] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.json-json_each_row] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-4.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-5.test] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.parquet-parquet] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_s3_0.py::TestS3::test_inference_file_error[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_inference_parameters[v2-client0] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-parquet] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.csv-csv_with_names] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_public_metrics.py::TestPublicMetrics::test_public_metrics[v1-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004752/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_public_metrics/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004752/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_public_metrics/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=690338) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 695248 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[results-select.test] [GOOD] >> test_push_down.py::TestS3PushDown::test_simple_case[v1-client0] [GOOD] >> test_push_down.py::TestS3PushDown::test_simple_case[v2-client0] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-1.test] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_s3_0.py::TestS3::test_inference_parameters[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_inference_timestamp[v2-client0] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_bindings_1.py::TestBindings::test_s3_format_mismatch[v2-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_s3_format_mismatch[v1-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-parquet] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-json_list] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_early_finish.py::TestEarlyFinish::test_early_finish[v1-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004742/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_early_finish/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004742/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_early_finish/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=696678) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 701958 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_yq_v2.py::TestS3::test_query_parameters[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00473d/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_yq_v2/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00473d/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_yq_v2/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=700072) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 704859 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.tsv-tsv_with_names] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-5.test] [GOOD] >> test_push_down.py::TestS3PushDown::test_simple_case[v2-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_s3_format_mismatch[v1-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_pg_binding[v2-client0] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_streaming_join.py::TestStreamingJoin::test_grace_join[v1-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_pg_binding[v2-client0] [GOOD] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_bindings_1.py::TestBindings::test_pg_binding[v1-client0] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.tsv-tsv_with_names] [GOOD] >> test_s3_0.py::TestS3::test_inference_timestamp[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.json-json_each_row] >> test_s3_0.py::TestS3::test_inference_projection[v2-client0] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_insert.py::TestInsert::test[read_data_during_bulk_upsert] [GOOD] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v2-kikimr_settings0-client0] [SKIPPED] >> test_insert.py::TestInsert::test_multi[read_data_during_bulk_upsert] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v1-kikimr_settings0-client0] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v1-kikimr_settings0-client0] [SKIPPED] >> test_bindings_0.py::TestBindings::test_binding_operations[v2-kikimr_settings1-client0] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-json_list] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-json_each_row] >> test_bindings_1.py::TestBindings::test_pg_binding[v1-client0] [GOOD] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v2-yql_syntax-client0] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_formats.py::TestS3Formats::test_format[v2-test.csv-csv_with_names-kikimr_settings0] >> test_s3_0.py::TestS3::test_inference_projection[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_inference_null_column_name[v2-client0] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.gz-gzip] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_list-dataset] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v2-yql_syntax-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v2-pg_syntax-client0] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.parquet-parquet] >> test_s3_0.py::TestS3::test_inference_null_column_name[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_inference_unsupported_types[v2-client0] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-1.test] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-json_each_row] [GOOD] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v2-pg_syntax-client0] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-csv_with_names] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v1-yql_syntax-client0] >> test_s3_0.py::TestS3::test_inference_unsupported_types[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_json_list_formats[v2-client0] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_s3_1.py::TestS3::test_write_result[v1-kikimr_params0-client0] >> test_test_connection.py::TestConnection::test_test_s3_connection[v1-client0] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.parquet-parquet] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-6.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-7.test] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v1-yql_syntax-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.csv-csv_with_names] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v1-pg_syntax-client0] |86.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_s3_0.py::TestS3::test_json_list_formats[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_csv_with_hopping[v1-client0] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_directory_v2[v2-client0] |86.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-5.test] [GOOD] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v1-pg_syntax-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_ast_in_failed_query_compilation[v2-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_push_down.py::TestS3PushDown::test_simple_case[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004733/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_push_down/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004733/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_push_down/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=731720) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 735734 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client0-year Int32-False] >> test_s3_0.py::TestS3::test_csv_with_hopping[v1-client0] [GOOD] >> test_s3_0.py::TestS3::test_csv_with_hopping[v2-client0] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.tsv-tsv_with_names] >> test_bindings_1.py::TestBindings::test_ast_in_failed_query_compilation[v2-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_ast_in_failed_query_compilation[v1-client0] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-parquet] >> test_sql.py::TestCanonicalFolder1::test_case[compute/scheduler.sql-plan] >> test_s3_0.py::TestS3::test_csv_with_hopping[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_raw[v1-false-client0] >> test_formats.py::TestS3Formats::test_format[v2-test.csv-csv_with_names-kikimr_settings0] [GOOD] >> test_formats.py::TestS3Formats::test_format[v2-test.tsv-tsv_with_names-kikimr_settings0] |86.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v1-false-client0] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.tsv-tsv_with_names] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.json-json_each_row] >> test_bindings_1.py::TestBindings::test_ast_in_failed_query_compilation[v1-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_raw_empty_schema_binding[v2-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_bindings_1.py::TestBindings::test_raw_empty_schema_binding[v2-client0] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.gz-gzip] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.lz4-lz4] >> test_bindings_1.py::TestBindings::test_raw_empty_schema_binding[v1-client0] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_list-dataset] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_list-dataにちは% set] >> test_bindings_1.py::TestBindings::test_raw_empty_schema_binding[v1-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_binding_with_backslash_in_location[v2-client0] >> test_s3_0.py::TestS3::test_raw[v1-false-client0] [GOOD] >> test_test_connection.py::TestConnection::test_test_s3_connection[v1-client0] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_test_connection.py::TestConnection::test_test_s3_connection[v2-client0] >> test_s3_0.py::TestS3::test_raw[v1-true-client0] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_formats.py::TestS3Formats::test_format[v2-test.tsv-tsv_with_names-kikimr_settings0] [GOOD] >> test_test_connection.py::TestConnection::test_test_s3_connection[v2-client0] [GOOD] >> test_formats.py::TestS3Formats::test_format[v2-test_each_row.json-json_each_row-kikimr_settings0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params0-false] >> test_test_connection.py::TestConnection::test_test_s3_connection_uri[v1-client0] >> test_test_connection.py::TestConnection::test_test_s3_connection_uri[v1-client0] [GOOD] >> test_test_connection.py::TestConnection::test_test_s3_connection_uri[v2-client0] >> test_test_connection.py::TestConnection::test_test_s3_connection_uri[v2-client0] [GOOD] >> test_test_connection.py::TestConnection::test_test_s3_connection_error[v1-client0] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefonly.sql-plan] >> test_test_connection.py::TestConnection::test_test_s3_connection_error[v1-client0] [GOOD] >> test_test_connection.py::TestConnection::test_test_s3_connection_error[v2-client0] >> test_test_connection.py::TestConnection::test_test_s3_connection_error[v2-client0] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.lz4-lz4] [GOOD] >> test_bindings_1.py::TestBindings::test_binding_with_backslash_in_location[v2-client0] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.br-brotli] >> test_bindings_1.py::TestBindings::test_binding_with_backslash_in_location[v1-client0] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q21.sql-result_sets] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-parquet] [GOOD] >> test_s3_0.py::TestS3::test_raw[v1-true-client0] [GOOD] >> test_s3_0.py::TestS3::test_raw[v2-false-client0] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-json_list] >> test_formats.py::TestS3Formats::test_format[v2-test_each_row.json-json_each_row-kikimr_settings0] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.json-json_each_row] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/null_select.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_streaming_join.py::TestStreamingJoin::test_grace_join[v1-client0] [GOOD] >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00474b/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_streaming_join/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00474b/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_streaming_join/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=693398) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 697982 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_formats.py::TestS3Formats::test_format[v2-test_list.json-json_list-kikimr_settings0] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.parquet-parquet] >> test_inflight.py::TestS3::test_data_inflight[v1-client0-kikimr_params0] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_list-dataにちは% set] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_each_row-dataset] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases_2.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[compute/scheduler.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[compute/scheduler.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.br-brotli] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.bz2-bzip2] |86.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_bindings_1.py::TestBindings::test_binding_with_backslash_in_location[v1-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_decimal_binding[v2-client0] >> test_s3_0.py::TestS3::test_raw[v2-false-client0] [GOOD] >> test_s3_0.py::TestS3::test_raw[v2-true-client0] >> test_formats.py::TestS3Formats::test_format[v2-test_list.json-json_list-kikimr_settings0] [GOOD] >> test_formats.py::TestS3Formats::test_format[v2-test.parquet-parquet-kikimr_settings0] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.csv-csv_with_names] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[compute/scheduler.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dt.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_pk.sql-result_sets] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.bz2-bzip2] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.zst-zstd] >> test_bindings_1.py::TestBindings::test_decimal_binding[v2-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_decimal_binding[v1-client0] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_3.sql-plan] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefonly.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefonly.sql-result_sets] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client0-year Int32-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client1-year Int32 NOT NULL-False] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_each_row-dataset] [GOOD] >> test_s3_0.py::TestS3::test_raw[v2-true-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in.sql-plan] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_insert.py::TestS3::test_insert[v1-false-client0-json_each_row-dataにちは% set] >> test_s3_0.py::TestS3::test_limit[v1-false-kikimr_params0-client0] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q21.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q22.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[dt.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dt.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/null_select.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q1.sql-plan] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.csv-csv_with_names] [GOOD] >> test_formats.py::TestS3Formats::test_format[v2-test.parquet-parquet-kikimr_settings0] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.zst-zstd] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.tsv-tsv_with_names] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.xz-xz] >> test_formats.py::TestS3Formats::test_format[v1-test.csv-csv_with_names-kikimr_settings0] >> test_bindings_1.py::TestBindings::test_decimal_binding[v1-client0] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-json_list] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases_2.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases_2.sql-result_sets] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-json_each_row] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v1-false-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v1-true-client0] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefonly.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefsemi.sql-plan] >> test_validation.py::TestS3::test_empty[v2-client0] [GOOD] >> test_validation.py::TestS3::test_empty[v1-client0] >> test_bindings_0.py::TestBindings::test_binding_operations[v2-kikimr_settings1-client0] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_each_row-dataにちは% set] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q22.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q22.sql-result_sets] >> test_insert.py::TestS3::test_insert[v1-false-client0-csv_with_names-dataset] >> test_bindings_0.py::TestBindings::test_binding_operations[v1-kikimr_settings1-client0] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.xz-xz] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_params.sql-result_sets] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.gz-gzip] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q1.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q1.sql-result_sets] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client1-year Int32 NOT NULL-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client2-year Uint32-False] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases_2.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index_predicate_point.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[dt.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_from_table.sql-plan] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_1.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_pk.sql-result_sets] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.json-json_each_row] >> test_sql.py::TestCanonicalFolder1::test_case[udfs/math.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefsemi.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefsemi.sql-result_sets] >> test_formats.py::TestS3Formats::test_format[v1-test.csv-csv_with_names-kikimr_settings0] [GOOD] >> test_formats.py::TestS3Formats::test_format[v1-test.tsv-tsv_with_names-kikimr_settings0] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v1-true-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v2-false-client0] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q22.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q3.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q10.sql-plan] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-10.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-11.test] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_3.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_3.sql-result_sets] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params0-false] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index_predicate_point.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index_predicate_point.sql-result_sets] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params0-true] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.json-json_each_row] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_from_table.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_from_table.sql-result_sets] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.parquet-parquet] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client2-year Uint32-False] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[udfs/math.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[udfs/math.sql-result_sets] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client3-year Uint32 NOT NULL-True] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in.sql-result_sets] |86.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefsemi.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_left.sql-plan] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.gz-gzip] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.lz4-lz4] >> test_insert.py::TestS3::test_insert[v1-false-client0-csv_with_names-dataset] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-csv_with_names-dataにちは% set] >> test_formats.py::TestS3Formats::test_format[v1-test.tsv-tsv_with_names-kikimr_settings0] [GOOD] >> test_formats.py::TestS3Formats::test_format[v1-test_each_row.json-json_each_row-kikimr_settings0] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q3.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q3.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q10.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q10.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_3.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_4.sql-plan] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params0-true] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_from_table.sql-result_sets] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-json_each_row] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index_predicate_point.sql-result_sets] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params0-false] >> test_sql.py::TestCanonicalFolder1::test_case[join/group_by_lookup.script-script] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_literal.sql-plan] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-csv_with_names] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.csv-csv_with_names] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_range.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[udfs/math.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_by_pk.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_params.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/json_query.sql-plan] >> test_formats.py::TestS3Formats::test_format[v1-test_each_row.json-json_each_row-kikimr_settings0] [GOOD] >> test_formats.py::TestS3Formats::test_format[v1-test_list.json-json_list-kikimr_settings0] >> test_s3_1.py::TestS3::test_write_result[v1-kikimr_params0-client0] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.lz4-lz4] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_left.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_left.sql-result_sets] >> test_s3_1.py::TestS3::test_top_level_listing_2[v1-kikimr_params0-false-client0] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.br-brotli] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client3-year Uint32 NOT NULL-True] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_4.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_4.sql-result_sets] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client4-year Int64-False] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q10.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q11.sql-plan] >> test_insert.py::TestS3::test_insert[v1-false-client0-csv_with_names-dataにちは% set] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-parquet-dataset] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v2-false-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q3.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q4.sql-plan] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params0-false] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_test_connection.py::TestConnection::test_test_s3_connection_error[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004704/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_test_connection/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004704/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_test_connection/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=771720) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 776944 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v2-true-client0] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params0-true] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_literal.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_literal.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int.sql-plan] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_sql.py::TestCanonicalFolder1::test_case[join/group_by_lookup.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_double_lookup.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_range.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_range.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_by_pk.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/json_query.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/json_query.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_by_pk.sql-result_sets] >> test_formats.py::TestS3Formats::test_format[v1-test_list.json-json_list-kikimr_settings0] [GOOD] >> test_formats.py::TestS3Formats::test_format[v1-test.parquet-parquet-kikimr_settings0] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.tsv-tsv_with_names] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.br-brotli] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.bz2-bzip2] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client4-year Int64-False] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_left.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_left_null.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_4.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_5.sql-plan] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client5-year Int64 NOT NULL-False] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q11.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q11.sql-result_sets] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params0-true] [GOOD] >> test_inflight.py::TestS3::test_data_inflight[v1-client0-kikimr_params0] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params0-false] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v2-true-client0] [GOOD] >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params1] >> test_explicit_partitioning_0.py::TestS3::test_projection[v1-false-client0] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q4.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q4.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_literal.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_params.sql-plan] >> test_formats.py::TestS3Formats::test_format[v1-test.parquet-parquet-kikimr_settings0] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-parquet-dataset] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_double_lookup.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_double_lookup.sql-result_sets] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_bindings_0.py::TestBindings::test_binding_operations[v1-kikimr_settings1-client0] [GOOD] >> test_formats.py::TestS3Formats::test_format_inference[v2-test.csv-csv_with_names] >> test_insert.py::TestS3::test_insert[v1-false-client0-parquet-dataにちは% set] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.json-json_each_row] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v2-kikimr_settings1-client0] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_range.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_rp.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_by_pk.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_on.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[json/json_query.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_from_table.sql-plan] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client5-year Int64 NOT NULL-False] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_left_null.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_left_null.sql-result_sets] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client6-year Uint64-False] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_5.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_5.sql-result_sets] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-parquet] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q11.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q12.sql-plan] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.bz2-bzip2] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.zst-zstd] >> test_explicit_partitioning_0.py::TestS3::test_projection[v1-false-client0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_params.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_params.sql-result_sets] >> test_explicit_partitioning_0.py::TestS3::test_projection[v1-true-client0] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int_1.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q4.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q5.sql-plan] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params0-false] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.json-json_each_row] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params0-true] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.parquet-parquet] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_on.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_on.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_double_lookup.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_dup_column_right.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_rp.sql-result_sets] >> test_s3_0.py::TestS3::test_limit[v1-false-kikimr_params0-client0] [GOOD] >> test_s3_0.py::TestS3::test_limit[v1-true-kikimr_params0-client0] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_from_table.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_from_table.sql-result_sets] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client6-year Uint64-False] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.zst-zstd] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client7-year Uint64 NOT NULL-False] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_left_null.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right.sql-plan] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.xz-xz] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q12.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q12.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_params.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_from_table.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_5.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_6.sql-plan] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.parquet-parquet] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int_1.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int_1.sql-result_sets] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.csv-csv_with_names] >> test_insert.py::TestS3::test_insert[v1-false-client0-parquet-dataにちは% set] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params0-true] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q5.sql-plan] [GOOD] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_list-dataset] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params0-false] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_on.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_same.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q5.sql-result_sets] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_list-dataset] [SKIPPED] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_list-dataにちは% set] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_list-dataにちは% set] [SKIPPED] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_each_row-dataset] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_dup_column_right.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_dup_column_right.sql-result_sets] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_each_row-dataset] [SKIPPED] >> test_explicit_partitioning_0.py::TestS3::test_projection[v1-true-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection[v2-false-client0] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_rp.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null.sql-plan] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_each_row-dataにちは% set] [SKIPPED] >> test_insert.py::TestS3::test_insert[v1-true-client0-csv_with_names-dataset] >> test_insert.py::TestS3::test_insert[v1-true-client0-csv_with_names-dataset] [SKIPPED] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_from_table.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_literal.sql-plan] >> test_insert.py::TestS3::test_insert[v1-true-client0-csv_with_names-dataにちは% set] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q12.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q13.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right.sql-result_sets] >> test_insert.py::TestS3::test_insert[v1-true-client0-csv_with_names-dataにちは% set] [SKIPPED] >> test_insert.py::TestS3::test_insert[v1-true-client0-parquet-dataset] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.xz-xz] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.gz-gzip] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client7-year Uint64 NOT NULL-False] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.csv-csv_with_names] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_from_table.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_from_table.sql-result_sets] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client8-year String NOT NULL-True] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.tsv-tsv_with_names] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_6.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_6.sql-result_sets] >> test_dml.py::TestDML::test_dml[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int_1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_same.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_same.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_sql.py::TestCanonicalFolder1::test_case[simple/q5.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q6.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_dup_column_right.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_range_right.sql-plan] >> test_s3_0.py::TestS3::test_limit[v1-true-kikimr_params0-client0] [GOOD] >> test_s3_0.py::TestS3::test_limit[v2-false-kikimr_params0-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_sql.py::TestCanonicalFolder1::test_case[json/select_literal.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_literal.sql-result_sets] >> test_explicit_partitioning_0.py::TestS3::test_projection[v2-false-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q13.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q13.sql-result_sets] >> test_explicit_partitioning_0.py::TestS3::test_projection[v2-true-client0] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right_2.sql-plan] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params0-false] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-parquet] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params0-true] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_from_table.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_literal.sql-plan] >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test[read_update_write_load] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-json_list] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client8-year String NOT NULL-True] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.json-json_each_row] >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test_multi[read_update_write_load] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client9-year String-False] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.gz-gzip] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_bindings_1.py::TestBindings::test_decimal_binding[v1-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00473c/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_bindings_1/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00473c/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_bindings_1/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=700298) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 705521 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_6.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_7.sql-plan] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.lz4-lz4] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_same.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert.sql-plan] >> test_insert.py::TestS3::test_insert[v1-true-client0-parquet-dataset] [GOOD] |86.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_insert.py::TestS3::test_insert[v1-true-client0-parquet-dataにちは% set] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null_1.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q6.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q6.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_range_right.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_literal.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_params.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_range_right.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q13.sql-result_sets] [GOOD] >> test_dml.py::TestDML::test_dml[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q14.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right_2.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right_2.sql-result_sets] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params0-true] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params0-false] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_literal.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_literal.sql-result_sets] >> test_s3_1.py::TestS3::test_top_level_listing_2[v1-kikimr_params0-false-client0] [GOOD] >> test_s3_1.py::TestS3::test_top_level_listing_2[v1-kikimr_params0-true-client0] >> test_dml.py::TestDML::test_dml[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_explicit_partitioning_0.py::TestS3::test_projection[v2-true-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v1-false-client0] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_7.sql-plan] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client9-year String-False] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_7.sql-result_sets] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select1-2.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select1-3.test] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client10-year Utf8-False] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp_1.sql-plan] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.json-json_each_row] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.lz4-lz4] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.parquet-parquet] >> test_dml.py::TestDML::test_dml[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.br-brotli] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_params.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_params.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null_1.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null_1.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q14.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q14.sql-result_sets] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q6.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q7.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_range_right.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_literal.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_params.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right_2.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_range_left.sql-plan] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v1-false-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v1-true-client0] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params0-false] [GOOD] >> test_dml.py::TestDML::test_dml[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params0-true] >> test_insert.py::TestS3::test_insert[v1-true-client0-parquet-dataにちは% set] [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_list-dataset] >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-json_list] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client10-year Utf8-False] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_7.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_8.sql-plan] >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-json_each_row] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert.sql-result_sets] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client11-year Utf8 NOT NULL-True] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert_revert.sql-plan] >> test_s3_1.py::TestS3::test_top_level_listing_2[v1-kikimr_params0-true-client0] [GOOD] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v2-kikimr_settings1-client0] [SKIPPED] >> test_s3_1.py::TestS3::test_top_level_listing_2[v2-kikimr_params0-false-client0] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v1-kikimr_settings1-client0] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp_1.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp_1.sql-result_sets] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v1-kikimr_settings1-client0] [SKIPPED] >> test_formats.py::TestS3Formats::test_format_inference[v2-test.csv-csv_with_names] [GOOD] >> test_bindings_0.py::TestBindings::test_name_uniqueness_constraint[v2-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.parquet-parquet] [GOOD] >> test_formats.py::TestS3Formats::test_format_inference[v2-test.tsv-tsv_with_names] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.csv-csv_with_names] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null_1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point.sql-plan] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.br-brotli] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q14.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q15.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_params.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q7.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q7.sql-result_sets] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.bz2-bzip2] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_params.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_params.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup.sql-result_sets] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params0-true] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params0-false] >> test_s3_0.py::TestS3::test_limit[v2-false-kikimr_params0-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v1-true-client0] [GOOD] >> test_s3_0.py::TestS3::test_limit[v2-true-kikimr_params0-client0] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v2-false-client0] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_range_left.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_range_left.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_8.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_8.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert_revert.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert_revert.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client11-year Utf8 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client12-year Date-False] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.bz2-bzip2] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp_1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse.sql-plan] >> test_formats.py::TestS3Formats::test_format_inference[v2-test.tsv-tsv_with_names] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.zst-zstd] >> test_formats.py::TestS3Formats::test_format_inference[v2-test_each_row.json-json_each_row] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.csv-csv_with_names] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk.sql-result_sets] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.tsv-tsv_with_names] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q15.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q15.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_params.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[explain.script-script] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q7.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q8.sql-plan] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v2-false-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_range_left.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_right_key_range.sql-plan] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params0-false] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params0-true] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v2-true-client0] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_8.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_9.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert_revert.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_usage.script-script] >> test_formats.py::TestS3Formats::test_format_inference[v2-test_each_row.json-json_each_row] [GOOD] >> test_formats.py::TestS3Formats::test_format_inference[v2-test_list.json-json_list] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client12-year Date-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client13-year Date NOT NULL-True] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.json-json_each_row] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_list-dataset] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-csv_with_names] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse.sql-result_sets] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.zst-zstd] [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_list-dataにちは% set] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point_range_rp.sql-plan] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.xz-xz] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk_composite.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q15.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q16.sql-plan] >> test_s3_1.py::TestS3::test_top_level_listing_2[v2-kikimr_params0-false-client0] [GOOD] >> test_s3_1.py::TestS3::test_top_level_listing_2[v2-kikimr_params0-true-client0] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast.sql-result_sets] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params0-true] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q8.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q8.sql-result_sets] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params1-false] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_right_key_range.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_right_key_range.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[explain.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index.sql-plan] >> test_s3_0.py::TestS3::test_limit[v2-true-kikimr_params0-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_9.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_9.sql-result_sets] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.parquet-parquet] >> test_formats.py::TestS3Formats::test_format_inference[v2-test_list.json-json_list] [GOOD] >> test_s3_0.py::TestS3::test_bad_format[v1-false-client0] >> test_formats.py::TestS3Formats::test_format_inference[v2-test.parquet-parquet] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v2-true-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point_range_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point_range_rp.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse_1.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_usage.script-script] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_validation[v1-client0] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_usage_key.script-script] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk_composite.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk_composite.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q16.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q16.sql-result_sets] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client13-year Date NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_validation[v1-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_validation[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client14-year Datetime-False] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.xz-xz] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast2.sql-plan] |86.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.gz-gzip] >> test_s3_1.py::TestS3::test_top_level_listing_2[v2-kikimr_params0-true-client0] [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_list-dataにちは% set] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index.sql-result_sets] >> test_explicit_partitioning_0.py::TestS3::test_validation[v2-client0] [GOOD] >> test_s3_1.py::TestS3::test_precompute[v1-false-client0] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_each_row-dataset] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v1-false-client0] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_right_key_range.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_rightsemi.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q8.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q9.sql-plan] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.parquet-parquet] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_9.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[range_skip_take.sql-plan] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.csv-csv_with_names] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point_range_rp.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse_1.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse_1.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk_composite.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_bool.sql-plan] >> test_formats.py::TestS3Formats::test_format_inference[v2-test.parquet-parquet] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_usage_key.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_write.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q16.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q17.sql-plan] >> test_formats.py::TestS3Formats::test_btc[v2] >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast2.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast2.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index_only.sql-plan] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client14-year Datetime-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v1-false-client0] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v1-true-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client15-year Datetime NOT NULL-True] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q9.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q9.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_rightsemi.sql-plan] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.gz-gzip] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_rightsemi.sql-result_sets] >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-parquet] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.lz4-lz4] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.csv-csv_with_names] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[range_skip_take.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[range_skip_take.sql-result_sets] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.tsv-tsv_with_names] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_bool.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_bool.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_write.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_write.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse_1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_subquery.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q17.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q17.sql-result_sets] >> test_bindings_0.py::TestBindings::test_name_uniqueness_constraint[v2-client0] [GOOD] >> test_bindings_0.py::TestBindings::test_name_uniqueness_constraint[v1-client0] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index_only.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index_only.sql-result_sets] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_each_row-dataset] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast2.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_dup_c_left.sql-plan] >> test_formats.py::TestS3Formats::test_btc[v2] [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_each_row-dataにちは% set] >> test_formats.py::TestS3Formats::test_btc[v1] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q9.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/script_params.script-script] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client15-year Datetime NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v1-true-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client0-year Int32-False] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_rightsemi.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_simple_c.sql-plan] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v2-false-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.lz4-lz4] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.json-json_each_row] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.br-brotli] >> test_bindings_0.py::TestBindings::test_name_uniqueness_constraint[v1-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_write.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/replace.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_subquery.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_subquery.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[range_skip_take.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[select_result_limit.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars_ranges.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_bool.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_comparison_empty_string.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q17.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q18.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index_only.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_on_top_of_apply.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_dup_c_left.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_dup_c_left.sql-result_sets] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client0-year Int32-False] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/script_params.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[table_types.script-script] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client1-year Int32 NOT NULL-False] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_simple_c.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_simple_c.sql-result_sets] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_each_row-dataにちは% set] [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-csv_with_names-dataset] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.br-brotli] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v2-false-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/replace.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/replace.sql-result_sets] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.bz2-bzip2] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v2-true-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_sql.py::TestCanonicalFolder1::test_case[simple/q18.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q18.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_comparison_empty_string.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_comparison_empty_string.sql-result_sets] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.json-json_each_row] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_subquery.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_utf8.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[select_result_limit.sql-plan] [GOOD] >> test_formats.py::TestS3Formats::test_btc[v1] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.parquet-parquet] >> test_sql.py::TestCanonicalFolder1::test_case[select_result_limit.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_on_top_of_apply.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_on_top_of_apply.sql-result_sets] >> test_formats.py::TestS3Formats::test_invalid_format[v2-client0] >> test_insert.py::TestInsert::test_multi[read_data_during_bulk_upsert] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_dup_c_left.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_equi.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars_ranges.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars_ranges.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client1-year Int32 NOT NULL-False] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-parquet] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client2-year Uint32-False] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-json_list] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_simple_c.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_using_index.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[table_types.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort.sql-plan] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-2.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-3.test] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q18.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q19.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/replace.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/update.sql-plan] >> test_formats.py::TestS3Formats::test_invalid_format[v2-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_utf8.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_utf8.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test_multi[read_update_write_load] [GOOD] Test command err: Was written: 0.0 MiB, Speed: 0.0 MiB/s Step 1. only write Write: 10% 1797 30% 1797 50% 1797 90% 1797 99% 1797 ms Write: 10% 3322 30% 3322 50% 3322 90% 3322 99% 3322 ms Write: 10% 4233 30% 4233 50% 4233 90% 4233 99% 4233 ms Write: 10% 7302 30% 7302 50% 7302 90% 7302 99% 7302 ms Write: 10% 9974 30% 9974 50% 9974 90% 9974 99% 9974 ms Write: 10% 11361 30% 11361 50% 11361 90% 11361 99% 11361 ms Write: 10% 13936 30% 13936 50% 13936 90% 13936 99% 13936 ms Write: 10% 12938 30% 12938 50% 12938 90% 12938 99% 12938 ms Write: 10% 13739 30% 13739 50% 13739 90% 13739 99% 13739 ms Write: 10% 14578 30% 14578 50% 14578 90% 14578 99% 14578 ms Write: 10% 14314 30% 14314 50% 14314 90% 14314 99% 14314 ms Write: 10% 13486 30% 13486 50% 13486 90% 13486 99% 13486 ms Write: 10% 13620 30% 13620 50% 13620 90% 13620 99% 13620 ms Write: 10% 14066 30% 14066 50% 14066 90% 14066 99% 14066 ms Write: 10% 14211 30% 14211 50% 14211 90% 14211 99% 14211 ms Write: 10% 13775 30% 13775 50% 13775 90% 13775 99% 13775 ms Write: 10% 14119 30% 14119 50% 14119 90% 14119 99% 14119 ms Write: 10% 8745 30% 8745 50% 8745 90% 8745 99% 8745 ms Write: 10% 8566 30% 8566 50% 8566 90% 8566 99% 8566 ms Write: 10% 12326 30% 12326 50% 12326 90% 12326 99% 12326 ms Write: 10% 6866 30% 6866 50% 6866 90% 6866 99% 6866 ms Write: 10% 13143 30% 13143 50% 13143 90% 13143 99% 13143 ms Write: 10% 13459 30% 13459 50% 13459 90% 13459 99% 13459 ms Write: 10% 3999 30% 3999 50% 3999 90% 3999 99% 3999 ms Write: 10% 10032 30% 10032 50% 10032 90% 10032 99% 10032 ms Write: 10% 11261 30% 11261 50% 11261 90% 11261 99% 11261 ms Write: 10% 6397 30% 6397 50% 6397 90% 6397 99% 6397 ms Write: 10% 12192 30% 12192 50% 12192 90% 12192 99% 12192 ms Write: 10% 12142 30% 12142 50% 12142 90% 12142 99% 12142 ms Write: 10% 4915 30% 4915 50% 4915 90% 4915 99% 4915 ms Write: 10% 12072 30% 12072 50% 12072 90% 12072 99% 12072 ms Write: 10% 9649 30% 9649 50% 9649 90% 9649 99% 9649 ms Write: 10% 3246 30% 3246 50% 3246 90% 3246 99% 3246 ms Write: 10% 3750 30% 3750 50% 3750 90% 3750 99% 3750 ms Write: 10% 10698 30% 10698 50% 10698 90% 10698 99% 10698 ms Write: 10% 10579 30% 10579 50% 10579 90% 10579 99% 10579 ms Write: 10% 9418 30% 9418 50% 9418 90% 9418 99% 9418 ms Write: 10% 8239 30% 8239 50% 8239 90% 8239 99% 8239 ms Write: 10% 5384 30% 5384 50% 5384 90% 5384 99% 5384 ms Write: 10% 2482 30% 2482 50% 2482 90% 2482 99% 2482 ms Write: 10% 2041 30% 2041 50% 2041 90% 2041 99% 2041 ms Write: 10% 2221 30% 2221 50% 2221 90% 2221 99% 2221 ms Write: 10% 5542 30% 5542 50% 5542 90% 5542 99% 5542 ms Write: 10% 2185 30% 2185 50% 2185 90% 2185 99% 2185 ms Write: 10% 4109 30% 4109 50% 4109 90% 4109 99% 4109 ms Write: 10% 9046 30% 9046 50% 9046 90% 9046 99% 9046 ms Write: 10% 4264 30% 4264 50% 4264 90% 4264 99% 4264 ms Write: 10% 10523 30% 10523 50% 10523 90% 10523 99% 10523 ms Write: 10% 7537 30% 7537 50% 7537 90% 7537 99% 7537 ms Write: 10% 3212 30% 3212 50% 3212 90% 3212 99% 3212 ms Write: 10% 13628 30% 13628 50% 13628 90% 13628 99% 13628 ms Write: 10% 6971 30% 6971 50% 6971 90% 6971 99% 6971 ms Write: 10% 4555 30% 4555 50% 4555 90% 4555 99% 4555 ms Write: 10% 12529 30% 12529 50% 12529 90% 12529 99% 12529 ms Write: 10% 3541 30% 3541 50% 3541 90% 3541 99% 3541 ms Write: 10% 3805 30% 3805 50% 3805 90% 3805 99% 3805 ms Write: 10% 3420 30% 3420 50% 3420 90% 3420 99% 3420 ms Write: 10% 1922 30% 1922 50% 1922 90% 1922 99% 1922 ms Write: 10% 3234 30% 3234 50% 3234 90% 3234 99% 3234 ms Write: 10% 4885 30% 4885 50% 4885 90% 4885 99% 4885 ms Write: 10% 2077 30% 2077 50% 2077 90% 2077 99% 2077 ms Write: 10% 1930 30% 1930 50% 1930 90% 1930 99% 1930 ms Write: 10% 3226 30% 3226 50% 3226 90% 3226 99% 3226 ms Write: 10% 2168 30% 2168 50% 2168 90% 2168 99% 2168 ms Step 2. read write Write: 10% 5235 30% 5235 50% 5235 90% 5235 99% 5235 ms Write: 10% 8919 30% 8919 50% 8919 90% 8919 99% 8919 ms Write: 10% 8467 30% 8467 50% 8467 90% 8467 99% 8467 ms Write: 10% 11446 30% 11446 50% 11446 90% 11446 99% 11446 ms Write: 10% 11916 30% 11916 50% 11916 90% 11916 99% 11916 ms Write: 10% 14169 30% 14169 50% 14169 90% 14169 99% 14169 ms Write: 10% 13378 30% 13378 50% 13378 90% 13378 99% 13378 ms Write: 10% 14527 30% 14527 50% 14527 90% 14527 99% 14527 ms Write: 10% 15008 30% 15008 50% 15008 90% 15008 99% 15008 ms Write: 10% 12575 30% 12575 50% 12575 90% 12575 99% 12575 ms Write: 10% 15283 30% 15283 50% 15283 90% 15283 99% 15283 ms Write: 10% 14321 30% 14321 50% 14321 90% 14321 99% 14321 ms Write: 10% 11256 30% 11256 50% 11256 90% 11256 99% 11256 ms Write: 10% 14705 30% 14705 50% 14705 90% 14705 99% 14705 ms Write: 10% 12751 30% 12751 50% 12751 90% 12751 99% 12751 ms Write: 10% 10990 30% 10990 50% 10990 90% 10990 99% 10990 ms Write: 10% 16439 30% 16439 50% 16439 90% 16439 99% 16439 ms Write: 10% 14949 30% 14949 50% 14949 90% 14949 99% 14949 ms Write: 10% 12764 30% 12764 50% 12764 90% 12764 99% 12764 ms Write: 10% 4018 30% 4018 50% 4018 90% 4018 99% 4018 ms Write: 10% 13487 30% 13487 50% 13487 90% 13487 99% 13487 ms Write: 10% 12636 30% 12636 50% 12636 90% 12636 99% 12636 ms Write: 10% 14182 30% 14182 50% 14182 90% 14182 99% 14182 ms Write: 10% 3608 30% 3608 50% 3608 90% 3608 99% 3608 ms Write: 10% 5499 30% 5499 50% 5499 90% 5499 99% 5499 ms Write: 10% 11473 30% 11473 50% 11473 90% 11473 99% 11473 ms Write: 10% 13947 30% 13947 50% 13947 90% 13947 99% 13947 ms Write: 10% 9105 30% 9105 50% 9105 90% 9105 99% 9105 ms Write: 10% 9258 30% 9258 50% 9258 90% 9258 99% 9258 ms Write: 10% 12927 30% 12927 50% 12927 90% 12927 99% 12927 ms Write: 10% 12583 30% 12583 50% 12583 90% 12583 99% 12583 ms Write: 10% 4281 30% 4281 50% 4281 90% 4281 99% 4281 ms Write: 10% 7815 30% 7815 50% 7815 90% 7815 99% 7815 ms Write: 10% 7584 30% 7584 50% 7584 90% 7584 99% 7584 ms Write: 10% 7650 30% 7650 50% 7650 90% 7650 99% 7650 ms Write: 10% 2745 30% 2745 50% 2745 90% 2745 99% 2745 ms Write: 10% 6638 30% 6638 50% 6638 90% 6638 99% 6638 ms Write: 10% 11400 30% 11400 50% 11400 90% 11400 99% 11400 ms Write: 10% 8863 30% 8863 50% 8863 90% 8863 99% 8863 ms Write: 10% 3720 30% 3720 50% 3720 90% 3720 99% 3720 ms Write: 10% 6604 30% 6604 50% 6604 90% 6604 99% 6604 ms Write: 10% 7294 30% 7294 50% 7294 90% 7294 99% 7294 ms Write: 10% 5938 30% 5938 50% 5938 90% 5938 99% 5938 ms Write: 10% 6295 30% 6295 50% 6295 90% 6295 99% 6295 ms Write: 10% 4400 30% 4400 50% 4400 90% 4400 99% 4400 ms Write: 10% 2252 30% 2252 50% 2252 90% 2252 99% 2252 ms Write: 10% 6432 30% 6432 50% 6432 90% 6432 99% 6432 ms Write: 10% 2187 30% 2187 50% 2187 90% 2187 99% 2187 ms Write: 10% 5025 30% 5025 50% 5025 90% 5025 99% 5025 ms Write: 10% 4347 30% 4347 50% 4347 90% 4347 99% 4347 ms Write: 10% 2006 30% 2006 50% 2006 90% 2006 99% 2006 ms Write: 10% 3406 30% 3406 50% 3406 90% 3406 99% 3406 ms Write: 10% 6573 30% 6573 50% 6573 90% 6573 99% 6573 ms Write: 10% 2601 30% 2601 50% 2601 90% 2601 99% 2601 ms Write: 10% 2884 30% 2884 50% 2884 90% 2884 99% 2884 ms Write: 10% 6919 30% 6919 50% 6919 90% 6919 99% 6919 ms Write: 10% 6602 30% 6602 50% 6602 90% 6602 99% 6602 ms Write: 10% 3825 30% 3825 50% 3825 90% 3825 99% 3825 ms Write: 10% 1768 30% 1768 50% 1768 90% 1768 99% 1768 ms Write: 10% 2559 30% 2559 50% 2559 90% 2559 99% 2559 ms Write: 10% 2027 30% 2027 50% 2027 90% 2027 99% 2027 ms Write: 10% 4895 30% 4895 50% 4895 90% 4895 99% 4895 ms Write: 10% 3600 30% 3600 50% 3600 90% 3600 99% 3600 ms Write: 10% 3915 30% 3915 50% 3915 90% 3915 99% 3915 ms Read: 10% 3294 30% 3587 50% 3880 90% 15960 99% 18678 ms Step 3. write modify Write: 10% 2891 30% 2891 50% 2891 90% 2891 99% 2891 ms Write: 10% 2807 30% 2807 50% 2807 90% 2807 99% 2807 ms Write: 10% 8351 30% 8351 50% 8351 90% 8351 99% 8351 ms Write: 10% 8937 30% 8937 50% 8937 90% 8937 99% 8937 ms Write: 10% 10210 30% 10210 50% 10210 90% 10210 99% 10210 ms Write: 10% 14450 30% 14450 50% 14450 90% 14450 99% 14450 ms Was written: 18.75 MiB, Speed: 0.3125 MiB/s Write: 10% 16042 30% 16042 50% 16042 90% 16042 99% 16042 ms Write: 10% 15685 30% 15685 50% 15685 90% 15685 99% 15685 ms Write: 10% 15176 30% 15176 50% 15176 90% 15176 99% 15176 ms Write: 10% 15008 30% 15008 50% 15008 90% 15008 99% 15008 ms Write: 10% 14614 30% 14614 50% 14614 90% 14614 99% 14614 ms Write: 10% 14894 30% 14894 50% 14894 90% 14894 99% 14894 ms Write: 10% 10466 30% 10466 50% 10466 90% 10466 99% 10466 ms Write: 10% 15222 30% 15222 50% 15222 90% 15222 99% 15222 ms Write: 10% 12013 30% 12013 50% 12013 90% 12013 99% 12013 ms Write: 10% 11633 30% 11633 50% 11633 90% 11633 99% 11633 ms Write: 10% 14442 30% 14442 50% 14442 90% 14442 99% 14442 ms Write: 10% 14342 30% 14342 50% 14342 90% 14342 99% 14342 ms Write: 10% 14204 30% 14204 50% 14204 90% 14204 99% 14204 ms Write: 10% 11466 30% 11466 50% 11466 90% 11466 99% 11466 ms Write: 10% 9922 30% 9922 50% 9922 90% 9922 99% 9922 ms Write: 10% 12913 30% 12913 50% 12913 90% 12913 99% 12913 ms Write: 10% 12749 30% 12749 50% 12749 90% 12749 99% 12749 ms Write: 10% 7675 30% 7675 50% 7675 90% 7675 99% 7675 ms Write: 10% 7044 30% 7044 50% 7044 90% 7044 99% 7044 ms Write: 10% 6235 30% 6235 50% 6235 90% 6235 99% 6235 ms Write: 10% 5895 30% 5895 50% 5895 90% 5895 99% 5895 ms Write: 10% 10977 30% 10977 50% 10977 90% 10977 99% 10977 ms Write: 10% 13313 30% 13313 50% 13313 90% 13313 99% 13313 ms Write: 10% 11131 30% 11131 50% 11131 90% 11131 99% 11131 ms Write: 10% 5023 30% 5023 50% 5023 90% 5023 99% 5023 ms Write: 10% 5372 30% 5372 50% 5372 90% 5372 99% 5372 ms Write: 10% 11661 30% 11661 50% 11661 90% 11661 99% 11661 ms Write: 10% 11970 30% 11970 50% 11970 90% 11970 99% 11970 ms Write: 10% 11035 30% 11035 50% 11035 90% 11035 99% 11035 ms Write: 10% 15018 30% 15018 50% 15018 90% 15018 99% 15018 ms Write: 10% 7442 30% 7442 50% 7442 90% 7442 99% 7442 ms Write: 10% 5772 30% 5772 50% 5772 90% 5772 99% 5772 ms Write: 10% 4958 30% 4958 50% 4958 90% 4958 99% 4958 ms Write: 10% 9899 30% 9899 50% 9899 90% 9899 99% 9899 ms Write: 10% 10559 30% 10559 50% 10559 90% 10559 99% 10559 ms Write: 10% 12283 30% 12283 50% 12283 90% 12283 99% 12283 ms Write: 10% 6001 30% 6001 50% 6001 90% 6001 99% 6001 ms Write: 10% 5113 30% 5113 50% 5113 90% 5113 99% 5113 ms Write: 10% 3644 30% 3644 50% 3644 90% 3644 99% 3644 ms Write: 10% 6093 30% 6093 50% 6093 90% 6093 99% 6093 ms Write: 10% 3990 30% 3990 50% 3990 90% 3990 99% 3990 ms Write: 10% 4472 30% 4472 50% 4472 90% 4472 99% 4472 ms Write: 10% 4491 30% 4491 50% 4491 90% 4491 99% 4491 ms Write: 10% 4805 30% 4805 50% 4805 90% 4805 99% 4805 ms Write: 10% 4058 30% 4058 50% 4058 90% 4058 99% 4058 ms Write: 10% 4523 30% 4523 50% 4523 90% 4523 99% 4523 ms Write: 10% 3143 30% 3143 50% 3143 90% 3143 99% 3143 ms Write: 10% 6439 30% 6439 50% 6439 90% 6439 99% 6439 ms Write: 10% 3481 30% 3481 50% 3481 90% 3481 99% 3481 ms Write: 10% 5432 30% 5432 50% 5432 90% 5432 99% 5432 ms Write: 10% 4820 30% 4820 50% 4820 90% 4820 99% 4820 ms Write: 10% 3083 30% 3083 50% 3083 90% 3083 99% 3083 ms Write: 10% 4432 30% 4432 50% 4432 90% 4432 99% 4432 ms Write: 10% 5336 30% 5336 50% 5336 90% 5336 99% 5336 ms Write: 10% 5704 30% 5704 50% 5704 90% 5704 99% 5704 ms Write: 10% 5928 30% 5928 50% 5928 90% 5928 99% 5928 ms Write: 10% 4862 30% 4862 50% 4862 90% 4862 99% 4862 ms Write: 10% 10125 30% 10125 50% 10125 90% 10125 99% 10125 ms Update: 10% 1972 30% 1972 50% 1972 90% 1972 99% 1972 ms Step 4. read modify write Write: 10% 2153 30% 2153 50% 2153 90% 2153 99% 2153 ms Write: 10% 1574 30% 1574 50% 1574 90% 1574 99% 1574 ms Write: 10% 1976 30% 1976 50% 1976 90% 1976 99% 1976 ms Write: 10% 13068 30% 13068 50% 13068 90% 13068 99% 13068 ms Write: 10% 11285 30% 11285 50% 11285 90% 11285 99% 11285 ms Write: 10% 15335 30% 15335 50% 15335 90% 15335 99% 15335 ms Write: 10% 14746 30% 14746 50% 14746 90% 14746 99% 14746 ms Write: 10% 14402 30% 14402 50% 14402 90% 14402 99% 14402 ms Write: 10% 14669 30% 14669 50% 14669 90% 14669 99% 14669 ms Write: 10% 16127 30% 16127 50% 16127 90% 16127 99% 16127 ms Write: 10% 16457 30% 16457 50% 16457 90% 16457 99% 16457 ms Write: 10% 13903 30% 13903 50% 13903 90% 13903 99% 13903 ms Write: 10% 12796 30% 12796 50% 12796 90% 12796 99% 12796 ms Write: 10% 14358 30% 14358 50% 14358 90% 14358 99% 14358 ms Write: 10% 14798 30% 14798 50% 14798 90% 14798 99% 14798 ms Write: 10% 11410 30% 11410 50% 11410 90% 11410 99% 11410 ms Write: 10% 14339 30% 14339 50% 14339 90% 14339 99% 14339 ms Write: 10% 14449 30% 14449 50% 14449 90% 14449 99% 14449 ms Write: 10% 10259 30% 10259 50% 10259 90% 10259 99% 10259 ms Write: 10% 14711 30% 14711 50% 14711 90% 14711 99% 14711 ms Write: 10% 9337 30% 9337 50% 9337 90% 9337 99% 9337 ms Write: 10% 9276 30% 9276 50% 9276 90% 9276 99% 9276 ms Write: 10% 8574 30% 8574 50% 8574 90% 8574 99% 8574 ms Write: 10% 9857 30% 9857 50% 9857 90% 9857 99% 9857 ms Write: 10% 13852 30% 13852 50% 13852 90% 13852 99% 13852 ms Write: 10% 9109 30% 9109 50% 9109 90% 9109 99% 9109 ms Write: 10% 7155 30% 7155 50% 7155 90% 7155 99% 7155 ms Write: 10% 7861 30% 7861 50% 7861 90% 7861 99% 7861 ms Write: 10% 7582 30% 7582 50% 7582 90% 7582 99% 7582 ms Write: 10% 12342 30% 12342 50% 12342 90% 12342 99% 12342 ms Write: 10% 12780 30% 12780 50% 12780 90% 12780 99% 12780 ms Write: 10% 11680 30% 11680 50% 11680 90% 11680 99% 11680 ms Write: 10% 13841 30% 13841 50% 13841 90% 13841 99% 13841 ms Write: 10% 4965 30% 4965 50% 4965 90% 4965 99% 4965 ms Write: 10% 10608 30% 10608 50% 10608 90% 10608 99% 10608 ms Write: 10% 4697 30% 4697 50% 4697 90% 4697 99% 4697 ms Write: 10% 4924 30% 4924 50% 4924 90% 4924 99% 4924 ms Write: 10% 4550 30% 4550 50% 4550 90% 4550 99% 4550 ms Write: 10% 3485 30% 3485 50% 3485 90% 3485 99% 3485 ms Write: 10% 9699 30% 9699 50% 9699 90% 9699 99% 9699 ms Write: 10% 8009 30% 8009 50% 8009 90% 8009 99% 8009 ms Write: 10% 8719 30% 8719 50% 8719 90% 8719 99% 8719 ms Write: 10% 7971 30% 7971 50% 7971 90% 7971 99% 7971 ms Write: 10% 7307 30% 7307 50% 7307 90% 7307 99% 7307 ms Write: 10% 6919 30% 6919 50% 6919 90% 6919 99% 6919 ms Write: 10% 11593 30% 11593 50% 11593 90% 11593 99% 11593 ms Write: 10% 5545 30% 5545 50% 5545 90% 5545 99% 5545 ms Write: 10% 4530 30% 4530 50% 4530 90% 4530 99% 4530 ms Write: 10% 8825 30% 8825 50% 8825 90% 8825 99% 8825 ms Write: 10% 3513 30% 3513 50% 3513 90% 3513 99% 3513 ms Write: 10% 8482 30% 8482 50% 8482 90% 8482 99% 8482 ms Write: 10% 7212 30% 7212 50% 7212 90% 7212 99% 7212 ms Write: 10% 6501 30% 6501 50% 6501 90% 6501 99% 6501 ms Write: 10% 6167 30% 6167 50% 6167 90% 6167 99% 6167 ms Write: 10% 5776 30% 5776 50% 5776 90% 5776 99% 5776 ms Write: 10% 4684 30% 4684 50% 4684 90% 4684 99% 4684 ms Write: 10% 4029 30% 4029 50% 4029 90% 4029 99% 4029 ms Write: 10% 3882 30% 3882 50% 3882 90% 3882 99% 3882 ms Write: 10% 4502 30% 4502 50% 4502 90% 4502 99% 4502 ms Write: 10% 6429 30% 6429 50% 6429 90% 6429 99% 6429 ms Write: 10% 3887 30% 3887 50% 3887 90% 3887 99% 3887 ms Write: 10% 4865 30% 4865 50% 4865 90% 4865 99% 4865 ms Write: 10% 3285 30% 3285 50% 3285 90% 3285 99% 3285 ms Write: 10% 7133 30% 7133 50% 7133 90% 7133 99% 7133 ms Read: 10% 3325 30% 3623 50% 3920 90% 16951 99% 19883 ms Update: 10% 1898 30% 1898 50% 1898 90% 1898 99% 1898 ms >> test_formats.py::TestS3Formats::test_invalid_format[v1-client0] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v2-true-client0] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.bz2-bzip2] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.parquet-parquet] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_equi.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_equi.sql-result_sets] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.zst-zstd] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.csv-csv_with_names] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_comparison_empty_string.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_composite.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_on_top_of_apply.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_aliases_and_apply.sql-plan] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v1-false-client0] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select2-2.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select2-3.test] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars_ranges.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[select_result_limit.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/ct.script-script] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client2-year Uint32-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client3-year Uint32 NOT NULL-True] >> test_insert.py::TestS3::test_insert[v2-false-client0-csv_with_names-dataset] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.tsv-tsv_with_names] >> test_insert.py::TestS3::test_insert[v2-false-client0-csv_with_names-dataにちは% set] >> test_s3_1.py::TestS3::test_precompute[v1-false-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_using_index.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_using_index.sql-result_sets] >> test_s3_1.py::TestS3::test_precompute[v1-true-client0] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q19.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q19.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/update.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/update.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_utf8.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_1.sql-plan] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params1-false] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_equi.sql-result_sets] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_format[v1-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_inner.sql-plan] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v1-false-client0] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params1-true] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_aliases_and_apply.sql-plan] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v1-true-client0] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-7.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-8.test] >> test_formats.py::TestS3Formats::test_invalid_input_compression[v2-client0] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_aliases_and_apply.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_composite.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_composite.sql-result_sets] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.zst-zstd] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range.sql-result_sets] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.xz-xz] >> test_disk.py::TestSafeDiskBreak::test_erase_method >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_dml.py::TestDML::test_dml[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.json-json_each_row] >> test_sql.py::TestCanonicalFolder1::test_case[simple/ct.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_dict_select.sql-plan] >> test_s3_1.py::TestS3::test_precompute[v1-true-client0] [GOOD] >> test_s3_1.py::TestS3::test_precompute[v2-false-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client3-year Uint32 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client4-year Int64-False] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params1-true] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params1-false] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range.sql-plan] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_formats.py::TestS3Formats::test_invalid_input_compression[v2-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q19.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q2.sql-plan] >> test_formats.py::TestS3Formats::test_invalid_input_compression[v1-client0] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_using_index.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_with_agg.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_1.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_1.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/update.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/update_on.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_aliases_and_apply.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_nonselector_aliases.sql-plan] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v1-true-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v2-false-client0] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_inner.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_inner.sql-result_sets] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-json_list] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-json_each_row] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_composite.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_nonkey_rp.sql-plan] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.json-json_each_row] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.xz-xz] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.parquet-parquet] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.gz-gzip] >> test_s3_0.py::TestS3::test_bad_format[v1-false-client0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_s3_0.py::TestS3::test_bad_format[v1-true-client0] >> test_insert.py::TestS3::test_insert[v2-false-client0-csv_with_names-dataにちは% set] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_dict_select.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_dict_select.sql-result_sets] >> test_insert.py::TestS3::test_insert[v2-false-client0-parquet-dataset] >> test_dml.py::TestDML::test_dml[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q2.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q2.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range.sql-result_sets] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client4-year Int64-False] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_10.sql-plan] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client5-year Int64 NOT NULL-False] >> test_sql.py::TestCanonicalFolder1::test_case[write/update_on.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/update_on.sql-result_sets] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params1-false] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_nonselector_aliases.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_nonselector_aliases.sql-result_sets] >> test_formats.py::TestS3Formats::test_invalid_input_compression[v1-client0] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params1-true] >> test_formats.py::TestS3Formats::test_invalid_output_compression[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v2-false-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_with_agg.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_with_agg.sql-result_sets] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v2-true-client0] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_inner.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftonly.sql-plan] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.parquet-parquet] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent.sql-result_sets] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.csv-csv_with_names] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_dml.py::TestDML::test_dml[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_nonkey_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_nonkey_rp.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_dict_select.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_list_select.sql-plan] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.gz-gzip] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.lz4-lz4] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_pk.sql-plan] >> test_s3_0.py::TestS3::test_bad_format[v1-true-client0] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_output_compression[v2-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q2.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q20.sql-plan] >> test_s3_0.py::TestS3::test_bad_format[v2-false-client0] >> test_alter_tiering.py::TestAlterTiering::test[many_tables] [FAIL] >> test_formats.py::TestS3Formats::test_invalid_output_compression[v1-client0] >> test_sql.py::TestCanonicalFolder1::test_case[write/update_on.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_nonselector_aliases.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases.sql-plan] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client5-year Int64 NOT NULL-False] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftonly.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftonly.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_10.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_10.sql-result_sets] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params1-true] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client6-year Uint64-False] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params1-false] >> test_dml.py::TestDML::test_dml[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_s3_1.py::TestS3::test_precompute[v2-false-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v2-true-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_with_agg.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_from_table.sql-plan] >> test_s3_1.py::TestS3::test_precompute[v2-true-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_validate_columns[v1-client0] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent_nopush.sql-plan] >> test_explicit_partitioning_0.py::TestS3::test_projection_validate_columns[v1-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_validate_columns[v2-client0] >> test_insert.py::TestS3::test_insert[v2-false-client0-parquet-dataset] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_list_select.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_list_select.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_nonkey_rp.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp.sql-plan] >> test_insert.py::TestS3::test_insert[v2-false-client0-parquet-dataにちは% set] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.csv-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-json_each_row] [GOOD] >> test_alter_tiering.py::TestAlterTiering::test_multi[many_tables] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_validate_columns[v2-client0] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-csv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v1-false-client0] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q20.sql-plan] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.lz4-lz4] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.tsv-tsv_with_names] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_pk.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_pk.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q20.sql-result_sets] >> test_formats.py::TestS3Formats::test_invalid_output_compression[v1-client0] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.br-brotli] >> test_formats.py::TestS3Formats::test_custom_csv_delimiter_format[v2-client0] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftonly.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftsemi.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases.sql-result_sets] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params1-false] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params1-true] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_10.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_11.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_from_table.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_from_table.sql-result_sets] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client6-year Uint64-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client7-year Uint64 NOT NULL-False] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_bindings_0.py::TestBindings::test_name_uniqueness_constraint[v1-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00475b/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_bindings_0/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00475b/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_bindings_0/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=685042) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 692105 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent_nopush.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent_nopush.sql-result_sets] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v1-false-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v1-true-client0] >> test_formats.py::TestS3Formats::test_custom_csv_delimiter_format[v2-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_list_select.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q20.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_select.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q21.sql-plan] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.br-brotli] [GOOD] >> test_formats.py::TestS3Formats::test_custom_csv_delimiter_format[v1-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.json-json_each_row] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.bz2-bzip2] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_pk.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_skip.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftsemi.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftsemi.sql-result_sets] >> test_s3_1.py::TestS3::test_precompute[v2-true-client0] [GOOD] >> test_s3_1.py::TestS3::test_failed_precompute[v1-false-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert.sql-result_sets] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert_cast.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_11.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_11.sql-result_sets] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params1-true] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_from_table.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_literal.sql-plan] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params1-false] >> test_s3_0.py::TestS3::test_bad_format[v2-false-client0] [GOOD] >> test_s3_0.py::TestS3::test_bad_format[v2-true-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client7-year Uint64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client8-year String NOT NULL-True] >> test_insert.py::TestS3::test_insert[v2-false-client0-parquet-dataにちは% set] [GOOD] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_list-dataset] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_list-dataset] [SKIPPED] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q21.sql-plan] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.bz2-bzip2] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp_1.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent_nopush.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_select.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_select.sql-result_sets] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-csv_with_names] [GOOD] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_list-dataにちは% set] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v1-true-client0] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-parquet] >> test_validation.py::TestS3::test_empty[v1-client0] [GOOD] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_list-dataにちは% set] [SKIPPED] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v2-false-client0] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_skip.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_skip.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftsemi.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_inner.sql-plan] >> test_validation.py::TestS3::test_nested_issues[v2-client0] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_each_row-dataset] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.zst-zstd] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_each_row-dataset] [SKIPPED] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_each_row-dataにちは% set] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert_cast.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert_cast.sql-result_sets] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_each_row-dataにちは% set] [SKIPPED] >> test_formats.py::TestS3Formats::test_custom_csv_delimiter_format[v1-client0] [GOOD] >> test_insert.py::TestS3::test_insert[v2-true-client0-csv_with_names-dataset] >> test_formats.py::TestS3Formats::test_no_not_nullable_column[v2-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.json-json_each_row] [GOOD] >> test_dml.py::TestDML::test_dml[table_all_types-pk_types12-all_types12-index12---] >> test_insert.py::TestS3::test_insert[v2-true-client0-csv_with_names-dataset] [SKIPPED] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.parquet-parquet] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_literal.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_literal.sql-result_sets] >> test_insert.py::TestS3::test_insert[v2-true-client0-csv_with_names-dataにちは% set] >> test_insert.py::TestS3::test_insert[v2-true-client0-csv_with_names-dataにちは% set] [SKIPPED] >> test_insert.py::TestS3::test_insert[v2-true-client0-parquet-dataset] >> test_dml.py::TestDML::test_dml[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_11.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_12.sql-plan] >> LabeledDbCounters::TwoTabletsKillOneTablet [GOOD] >> ShowCreateView::Basic >> test_s3_1.py::TestS3::test_failed_precompute[v1-false-client0] [GOOD] >> test_s3_1.py::TestS3::test_failed_precompute[v1-true-client0] >> test_s3_0.py::TestS3::test_bad_format[v2-true-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_inner.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_inner.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp_1.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp_1.sql-result_sets] >> test_s3_0.py::TestS3::test_bad_request_on_invalid_parquet[v2-client0] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_select.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/multi_select.sql-plan] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client8-year String NOT NULL-True] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params1-false] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params1-true] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_skip.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_pk.sql-plan] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client9-year String-False] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v2-false-client0] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.zst-zstd] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v2-true-client0] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.xz-xz] >> ttl_unavailable_s3.py::TestUnavailableS3::test >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert_cast.sql-result_sets] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.parquet-parquet] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_literal.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_params.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/write_group_by.script-script] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.csv-csv_with_names] >> test_formats.py::TestS3Formats::test_no_not_nullable_column[v2-client0] [GOOD] >> test_formats.py::TestS3Formats::test_no_not_nullable_column[v1-client0] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_12.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_12.sql-result_sets] |86.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_insert.py::TestInsert::test_multi[read_data_during_bulk_upsert] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-11.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-12.test] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params1-true] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params1-false] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp_1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_single_rp.sql-plan] >> test_s3_0.py::TestS3::test_bad_request_on_invalid_parquet[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_bad_request_on_compression[v2-client0] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_inner.sql-result_sets] [GOOD] >> test_validation.py::TestS3::test_nested_issues[v2-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_pk.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/multi_select.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/multi_select.sql-result_sets] >> test_validation.py::TestS3::test_nested_issues[v1-client0] >> test_insert.py::TestS3::test_insert[v2-true-client0-parquet-dataset] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp.sql-plan] >> test_insert.py::TestS3::test_insert[v2-true-client0-parquet-dataにちは% set] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-parquet] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_params.sql-plan] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client9-year String-False] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.csv-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-json_list] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.tsv-tsv_with_names] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.xz-xz] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client10-year Utf8-False] >> test_compressions.py::TestS3Compressions::test_invalid_compression[v2-client0] >> test_s3_1.py::TestS3::test_failed_precompute[v1-true-client0] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params1-false] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/write_group_by.script-script] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_s3_1.py::TestS3::test_failed_precompute[v2-false-client0] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params1-true] >> test_formats.py::TestS3Formats::test_no_not_nullable_column[v1-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v2-true-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_12.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_13.sql-plan] >> test_formats.py::TestS3Formats::test_no_nullable_column[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client0-year Int32 NOT NULL-True] |86.1%| [TA] $(B)/ydb/tests/datashard/secondary_index/test-results/py3test/{meta.json ... results_accumulator.log} >> test_disk.py::TestSafeDiskBreak::test_erase_method [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_single_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_single_rp.sql-result_sets] |86.2%| [TA] {RESULT} $(B)/ydb/tests/datashard/secondary_index/test-results/py3test/{meta.json ... results_accumulator.log} >> test_sql.py::TestCanonicalFolder1::test_case[simple/multi_select.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/null_select.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp.sql-result_sets] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_formats.py::TestS3Formats::test_no_nullable_column[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_bad_request_on_compression[v2-client0] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params1-true] [GOOD] >> test_formats.py::TestS3Formats::test_no_nullable_column[v1-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.json-json_each_row] >> test_s3_0.py::TestS3::test_checkpoints_on_join_s3_with_yds[v1-mvp_external_ydb_endpoint0-client0] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params1-false] >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client10-year Utf8-False] [GOOD] >> test_validation.py::TestS3::test_nested_issues[v1-client0] [GOOD] >> test_validation.py::TestS3::test_nested_type[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client11-year Utf8 NOT NULL-True] >> test_compressions.py::TestS3Compressions::test_invalid_compression[v2-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_13.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_13.sql-result_sets] >> test_compressions.py::TestS3Compressions::test_invalid_compression[v1-client0] >> test_insert.py::TestS3::test_insert[v2-true-client0-parquet-dataにちは% set] [GOOD] >> test_insert.py::TestS3::test_big_json_list_insert[v1-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client0-year Int32 NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client1-year Uint32 NOT NULL-True] >> ShowCreateView::Basic [GOOD] >> ShowCreateView::FromTable >> test_dml.py::TestDML::test_dml[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_s3_1.py::TestS3::test_failed_precompute[v2-false-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/null_select.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp_1.sql-plan] >> test_s3_1.py::TestS3::test_failed_precompute[v2-true-client0] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_single_rp.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_1.sql-plan] >> test_formats.py::TestS3Formats::test_no_nullable_column[v1-client0] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_column_type_in_csv[v2-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.parquet-parquet] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params1-false] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params1-true] >> test_compressions.py::TestS3Compressions::test_invalid_compression[v1-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client11-year Utf8 NOT NULL-True] [GOOD] >> test_compressions.py::TestS3Compressions::test_invalid_compression_inference[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client12-year Date-False] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_13.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_2.sql-plan] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases.sql-result_sets] [GOOD] >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-json_list] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-json_each_row] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client1-year Uint32 NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client2-year Uint64 NOT NULL-True] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params1-true] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_1.sql-plan] [GOOD] >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params1] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp_1.sql-plan] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params2-false] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp_1.sql-result_sets] >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params2] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[simple/q21.sql-plan] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_column_type_in_csv[v2-client0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_2.sql-plan] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_column_type_in_csv[v1-client0] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_2.sql-result_sets] >> test_compressions.py::TestS3Compressions::test_invalid_compression_inference[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.csv-csv_with_names] >> test_insert.py::TestS3::test_big_json_list_insert[v1-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client2-year Uint64 NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client3-year Date NOT NULL-False] >> test_insert.py::TestS3::test_big_json_list_insert[v2-client0] >> test_dml.py::TestDML::test_dml[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client12-year Date-False] [GOOD] >> test_s3_1.py::TestS3::test_failed_precompute[v2-true-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client13-year Date NOT NULL-True] >> test_s3_1.py::TestS3::test_missed[v1-false-client0] >> ShowCreateView::FromTable [GOOD] >> ShowCreateView::WithPairedTablePathPrefix ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills [GOOD] Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp_1.sql-result_sets] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test >> test_disk.py::TestSafeDiskBreak::test_erase_method [GOOD] Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.csv-csv_with_names] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client3-year Date NOT NULL-False] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.tsv-tsv_with_names] >> test_dml.py::TestDML::test_dml[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client4-year String NOT NULL-True] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_2.sql-result_sets] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_column_type_in_csv[v1-client0] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_column_in_parquet[v2-client0] |86.2%| [TA] $(B)/ydb/tests/tools/nemesis/ut/test-results/py3test/{meta.json ... results_accumulator.log} >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-json_each_row] [GOOD] |86.2%| [TA] {RESULT} $(B)/ydb/tests/tools/nemesis/ut/test-results/py3test/{meta.json ... results_accumulator.log} >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-csv_with_names] >> test_s3_1.py::TestS3::test_missed[v1-false-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client13-year Date NOT NULL-True] [GOOD] >> test_s3_1.py::TestS3::test_missed[v1-true-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client14-year Datetime-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client4-year String NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client5-year String-False] >> test_dml.py::TestDML::test_dml[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_dml.py::TestDML::test_dml[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_insert.py::TestS3::test_big_json_list_insert[v2-client0] [GOOD] >> test_insert.py::TestS3::test_insert_csv_delimiter[v1-client0] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_inner.sql-result_sets] [GOOD] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_pk.sql-plan] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.json-json_each_row] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_params.sql-plan] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_column_in_parquet[v2-client0] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_column_in_parquet[v1-client0] >> test_dml.py::TestDML::test_dml[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[write/write_group_by.script-script] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client5-year String-False] [GOOD] >> ShowCreateView::WithPairedTablePathPrefix [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client6-year Utf8 NOT NULL-True] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client14-year Datetime-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client15-year Datetime NOT NULL-True] >> test_s3_1.py::TestS3::test_missed[v1-true-client0] [GOOD] >> test_s3_1.py::TestS3::test_missed[v2-false-client0] >> test_insert.py::TestS3::test_insert_csv_delimiter[v1-client0] [GOOD] >> test_insert.py::TestS3::test_insert_csv_delimiter[v2-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_compressions.py::TestS3Compressions::test_invalid_compression_inference[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004716/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_compressions/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004716/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_compressions/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=765095) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 769019 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.parquet-parquet] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> ShowCreateView::WithPairedTablePathPrefix [GOOD] Test command err: 2025-06-30T09:22:17.870972Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521670301900117113:2217];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:22:17.913769Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003086/r3tmp/tmpk1eYcV/pdisk_1.dat 2025-06-30T09:22:17.954077Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:17.962421Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 12625, node 1 2025-06-30T09:22:17.967173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:17.967207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:17.970354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:17.975144Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:22:17.975156Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:22:17.975158Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:22:17.975227Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1701 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:22:18.013390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:22:18.019409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-30T09:22:18.028786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) waiting... 2025-06-30T09:22:18.042820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:18.042844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting waiting... 2025-06-30T09:22:18.044649Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-30T09:22:18.045681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-30T09:22:18.064151Z node 3 :SYSTEM_VIEWS INFO: processor_impl.cpp:41: [72075186224037893] OnActivateExecutor 2025-06-30T09:22:18.064169Z node 3 :SYSTEM_VIEWS DEBUG: tx_init_schema.cpp:15: [72075186224037893] TTxInitSchema::Execute 2025-06-30T09:22:18.071615Z node 3 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:778: Handle TEvSysView::TEvRegisterDbCounters: service id# [3:7521670304383982250:2078], path id# [OwnerId: 72057594046644480, LocalPathId: 2], service# 2 2025-06-30T09:22:18.071799Z node 3 :SYSTEM_VIEWS DEBUG: partition_stats.cpp:32: NSysView::TPartitionStatsCollector bootstrapped 2025-06-30T09:22:18.071880Z node 3 :SYSTEM_VIEWS DEBUG: tx_init_schema.cpp:42: [72075186224037893] TTxInitSchema::Complete 2025-06-30T09:22:18.071888Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:136: [72075186224037893] TTxInit::Execute 2025-06-30T09:22:18.072034Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:257: [72075186224037893] Loading interval summaries: query count# 0, node ids count# 0, total count# 0 2025-06-30T09:22:18.072040Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:284: [72075186224037893] Loading interval metrics: query count# 0 2025-06-30T09:22:18.072047Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:362: [72075186224037893] Loading interval query tops: total query count# 0 2025-06-30T09:22:18.072052Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:408: [72075186224037893] Loading nodes to request: nodes count# 0, hashes count# 0 2025-06-30T09:22:18.072057Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 6, result count# 0 2025-06-30T09:22:18.072061Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 7, result count# 0 2025-06-30T09:22:18.072065Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 8, result count# 0 2025-06-30T09:22:18.072068Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 9, result count# 0 2025-06-30T09:22:18.072073Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 10, result count# 0 2025-06-30T09:22:18.072076Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 11, result count# 0 2025-06-30T09:22:18.072080Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 12, result count# 0 2025-06-30T09:22:18.072084Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 13, result count# 0 2025-06-30T09:22:18.072088Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 14, result count# 0 2025-06-30T09:22:18.072092Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 15, result count# 0 2025-06-30T09:22:18.072096Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:129: [72075186224037893] Loading results: table# 16, partCount count# 0 2025-06-30T09:22:18.072100Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:129: [72075186224037893] Loading results: table# 19, partCount count# 0 2025-06-30T09:22:18.072106Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 17, result count# 0 2025-06-30T09:22:18.072110Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 18, result count# 0 2025-06-30T09:22:18.072114Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 20, result count# 0 2025-06-30T09:22:18.072119Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 21, result count# 0 2025-06-30T09:22:18.072138Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:333: [72075186224037893] Reset: interval end# 2025-06-30T09:22:18.000000Z 2025-06-30T09:22:18.074114Z node 3 :SYSTEM_VIEWS INFO: sysview_service.cpp:860: Navigate by path id succeeded: service id# [3:7521670304383982250:2078], path id# [OwnerId: 72057594046644480, LocalPathId: 2], database# /Root/Database1 2025-06-30T09:22:18.075254Z node 3 :SYSTEM_VIEWS INFO: sysview_service.cpp:886: Navigate by database succeeded: service id# [3:7521670304383982250:2078], database# /Root/Database1, no sysview processor 2025-06-30T09:22:18.075442Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:18.078534Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:488: [72075186224037893] TTxInit::Complete 2025-06-30T09:22:18.097179Z node 3 :SYSTEM_VIEWS DEBUG: tx_configure.cpp:20: [72075186224037893] TTxConfigure::Execute: database# /Root/Database1 2025-06-30T09:22:18.098831Z node 3 :SYSTEM_VIEWS INFO: partition_stats.cpp:522: NSysView::TPartitionStatsCollector initialized: domain key# [OwnerId: 72057594046644480, LocalPathId: 2], sysview processor id# 72075186224037893 2025-06-30T09:22:18.101195Z node 3 :SYSTEM_VIEWS DEBUG: tx_configure.cpp:30: [72075186224037893] TTxConfigure::Complete 2025-06-30T09:22:18.111538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) waiting... 2025-06-30T09:22:18.130761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:22:18.130802Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:22:18.135502Z node 1 :HIVE WARN: hive_impl.cpp:785: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-30T09:22:18.135924Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-30T09:22:18.166628Z node 2 :SYSTEM_VIEWS INFO: processor_impl.cpp:41: [72075186224037899] OnActivateExecutor 2025-06-30T09:22:18.166648Z node 2 :SYSTEM_VIEWS DEBUG: tx_init_schema.cpp:15: [72075186224037899] TTxInitSchema::Execute 2025-06-30T09:22:18.167162Z node 2 :SYSTEM_VIEWS DEBUG: partition_stats.cpp:32: NSysView::TPartitionStatsCollector bootstrapped 2025-06-30T09:22:18.167486Z node 2 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:778: Handle TEvSysView::TEvRegisterDbCounters: service id# [2:7521670303911754875:2074], path id# [OwnerId: 72057594046644480, LocalPathId: 3], service# 2 2025-06-30T09:22:18.167568Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:22:18.167876Z node 2 :SYSTEM_VIEWS INFO: sysview_service.cpp:860: Navigate by path id succeeded: service id# [2:7521670303911754875:2074], path id# [OwnerId: 72057594046644480, LocalPathId: 3], database# /Root/Database2 2025-06-30T09:22:18.167965Z node 2 :SYSTEM_VIEWS INFO: sysview_service.cpp:886: Navigate by database succeeded: service id# [2:7521670303911754875:2074], database# /Root/Database2, no sysview processor 2025-06-30T09:22:18.169110Z node 2 :SYSTEM_VIEWS DEBUG: tx_init_schema.cpp:42: [72075186224037899] TTxInitSchema::Complete 2025-06-30T09:22:18.169126Z node 2 :SYSTEM_VIEWS DEBUG: tx_init.cpp:136: [72075186224037899] TTxInit::Execute 2025-06-30T09:22:18.16921 ... 1526121:2873], owner: [24:7521672534471526117:2871], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 } 2025-06-30T09:30:57.790250Z node 24 :SYSTEM_VIEWS INFO: show_create.cpp:107: Scan prepared, actor: [24:7521672534471526121:2873] 2025-06-30T09:30:57.797483Z node 24 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [24:7521672534471526121:2873], row count: 1, finished: 1 2025-06-30T09:30:57.797555Z node 24 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [24:7521672534471526121:2873], owner: [24:7521672534471526117:2871], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 } 2025-06-30T09:30:59.280117Z node 29 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[29:7521672543712390948:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:30:59.280194Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003086/r3tmp/tmpfgXTw5/pdisk_1.dat 2025-06-30T09:30:59.330702Z node 29 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:30:59.348330Z node 29 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 29 Type# 268639257 TServer::EnableGrpc on GrpcPort 17074, node 29 2025-06-30T09:30:59.367055Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:30:59.367070Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:30:59.367072Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:30:59.367124Z node 29 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:30:59.382144Z node 29 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(29, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:30:59.382174Z node 29 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(29, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:30:59.386684Z node 29 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(29, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63917 TClient is connected to server localhost:63917 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:30:59.431087Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-30T09:30:59.822151Z node 29 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [29:7521672543712391843:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:30:59.822191Z node 29 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:30:59.822380Z node 29 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [29:7521672543712391855:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:30:59.823490Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:30:59.840811Z node 29 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [29:7521672543712391857:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-30T09:30:59.931017Z node 29 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [29:7521672543712391932:2715] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:31:00.282958Z node 29 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-30T09:31:00.288319Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:31:00.544343Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:31:00.660142Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:31:00.852901Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:31:00.985951Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:31:01.109478Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-30T09:31:01.228999Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:31:01.264640Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-30T09:31:02.036157Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715697:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:31:02.701884Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715714:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2025-06-30T09:31:02.732899Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715715:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-30T09:31:02.859793Z node 29 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [29:7521672556597296483:2869], owner: [29:7521672556597296480:2867], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 } 2025-06-30T09:31:02.861068Z node 29 :SYSTEM_VIEWS INFO: show_create.cpp:107: Scan prepared, actor: [29:7521672556597296483:2869] 2025-06-30T09:31:02.880126Z node 29 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [29:7521672556597296483:2869], row count: 1, finished: 1 2025-06-30T09:31:02.880186Z node 29 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [29:7521672556597296483:2869], owner: [29:7521672556597296480:2867], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 } >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client6-year Utf8 NOT NULL-True] [GOOD] >> test_dml.py::TestDML::test_dml[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client7-year Utf8-False] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client15-year Datetime NOT NULL-True] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_alter_tiering.py::TestAlterTiering::test_multi[many_tables] [GOOD] Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). Thread 0 failed contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). contrib/tools/python3/Lib/logging/__init__.py:1759: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/0047e0/ydb/tests/olap/scenario/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff/run.log' mode='a' encoding='utf-8'> ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client0-year Int32-False] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_formats.py::TestS3Formats::test_invalid_column_in_parquet[v1-client0] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-parquet] >> test_formats.py::TestS3Formats::test_simple_pg_types[v2-client0] >> test_s3_1.py::TestS3::test_missed[v2-false-client0] [GOOD] >> test_s3_1.py::TestS3::test_missed[v2-true-client0] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.parquet-parquet] [GOOD] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.csv-csv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client7-year Utf8-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client8-year Int32-False] >> test_insert.py::TestS3::test_insert_csv_delimiter[v2-client0] [GOOD] >> test_insert.py::TestS3::test_append[v1-client0] >> data_correctness.py::TestDataCorrectness::test >> test_dml.py::TestDML::test_dml[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> tier_delete.py::TestTierDelete::test_delete_s3_ttl >> test_formats.py::TestS3Formats::test_simple_pg_types[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client0-year Int32-False] [GOOD] >> test_formats.py::TestS3Formats::test_simple_pg_types[v1-client0] >> test_s3_1.py::TestS3::test_missed[v2-true-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client1-year Int32 NOT NULL-False] >> test_s3_1.py::TestS3::test_simple_hits_47[v1-false-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client8-year Int32-False] [GOOD] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_1.sql-plan] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client9-year Uint32-False] >> test_validation.py::TestS3::test_nested_type[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.csv-csv_with_names] [GOOD] >> test_validation.py::TestS3::test_nested_type[v1-client0] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.tsv-tsv_with_names] >> ttl_delete_s3.py::TestDeleteS3Ttl::test_data_unchanged_after_ttl_change >> test_formats.py::TestS3Formats::test_simple_pg_types[v1-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client1-year Int32 NOT NULL-False] [GOOD] >> test_formats.py::TestS3Formats::test_precompute[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client2-year Uint32-False] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_s3_1.py::TestS3::test_simple_hits_47[v1-false-client0] [GOOD] >> test_s3_1.py::TestS3::test_simple_hits_47[v1-true-client0] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_insert.py::TestS3::test_append[v1-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.json-json_each_row] >> test_insert.py::TestS3::test_append[v2-client0] |86.2%| [TA] $(B)/ydb/core/sys_view/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[simple/null_select.sql-plan] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-parquet] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-json_list] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client9-year Uint32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client10-year Int64 NOT NULL-True] >> test_dml.py::TestDML::test_dml[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp_1.sql-result_sets] [GOOD] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_2.sql-result_sets] [GOOD] >> ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete >> test_s3_1.py::TestS3::test_simple_hits_47[v1-true-client0] [GOOD] >> test_s3_1.py::TestS3::test_simple_hits_47[v2-false-client0] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.json-json_each_row] [GOOD] >> test_formats.py::TestS3Formats::test_precompute[v2-client0] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params2-false] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.parquet-parquet] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client2-year Uint32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client10-year Int64 NOT NULL-True] [GOOD] >> test_formats.py::TestS3Formats::test_precompute[v1-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client11-year Int64-False] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params2-true] >> test_dml.py::TestDML::test_dml[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client3-year Uint32 NOT NULL-True] |86.2%| [TA] {RESULT} $(B)/ydb/core/sys_view/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.csv-csv_with_names] >> test_formats.py::TestS3Formats::test_precompute[v1-client0] [GOOD] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> test_formats.py::TestS3Formats::test_raw_empty_schema_query[v2-client0] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params2-true] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params2-false] >> test_insert.py::TestS3::test_append[v2-client0] [GOOD] >> test_s3_1.py::TestS3::test_simple_hits_47[v2-false-client0] [GOOD] >> test_s3_1.py::TestS3::test_simple_hits_47[v2-true-client0] >> test_insert.py::TestS3::test_part_split[v1-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client11-year Int64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client12-year Uint64-False] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client3-year Uint32 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client4-year Int64-False] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-json_list] [GOOD] |86.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-json_each_row] |86.2%| [TA] $(B)/ydb/tests/functional/canonical/test-results/py3test/{meta.json ... results_accumulator.log} >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params2-false] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params2-true] |86.2%| [TA] {RESULT} $(B)/ydb/tests/functional/canonical/test-results/py3test/{meta.json ... results_accumulator.log} >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.csv-csv_with_names] [GOOD] >> test_dml.py::TestDML::test_dml[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.tsv-tsv_with_names] >> test_s3_1.py::TestS3::test_simple_hits_47[v2-true-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-exact_file-True-client0] >> test_formats.py::TestS3Formats::test_raw_empty_schema_query[v2-client0] [GOOD] >> test_formats.py::TestS3Formats::test_raw_empty_schema_query[v1-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client12-year Uint64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client13-year Date-False] >> unstable_connection.py::TestUnstableConnection::test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client4-year Int64-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client5-year Int64 NOT NULL-False] >> test_insert.py::TestS3::test_part_split[v1-client0] [GOOD] >> test_insert.py::TestS3::test_part_split[v2-client0] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params2-true] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params2-false] >> test_tenants.py::TestTenants::test_when_deactivate_fat_tenant_creation_another_tenant_is_ok[enable_alter_database_create_hive_first--true] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-exact_file-True-client0] [GOOD] >> test_db_counters.py::TestKqpCounters::test_case >> test_dynamic_tenants.py::test_create_and_drop_tenants[enable_alter_database_create_hive_first--true] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-exact_file-False-client0] >> test_formats.py::TestS3Formats::test_raw_empty_schema_query[v1-client0] [GOOD] >> test_formats.py::TestS3Formats::test_with_infer_and_unsupported_option[v2-client0] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.json-json_each_row] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client5-year Int64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client6-year Uint64-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client13-year Date-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client0-year Int32 NOT NULL-True] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-json_each_row] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-exact_file-False-client0] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params2-false] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-csv_with_names] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-directory_scan-True-client0] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params2-true] >> test_formats.py::TestS3Formats::test_with_infer_and_unsupported_option[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.parquet-parquet] >> test_validation.py::TestS3::test_nested_type[v1-client0] [GOOD] >> test_insert.py::TestS3::test_part_split[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client6-year Uint64-False] [GOOD] >> test_insert.py::TestS3::test_part_merge[v1-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client7-year Uint64 NOT NULL-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client0-year Int32 NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client1-year Uint32 NOT NULL-True] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-directory_scan-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-directory_scan-False-client0] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params2-true] [GOOD] >> test_dml.py::TestDML::test_dml[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params2-false] >> test_storage_config.py::TestStorageConfig::test_cases[case_0] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.csv-csv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client7-year Uint64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client8-year String NOT NULL-True] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-directory_scan-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-exact_file-True-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client1-year Uint32 NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client2-year Uint64 NOT NULL-True] >> test_dml.py::TestDML::test_dml[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_insert.py::TestS3::test_part_merge[v1-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client8-year String NOT NULL-True] [GOOD] >> test_insert.py::TestS3::test_part_merge[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client9-year String-False] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params2-false] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params2-true] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.csv-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.tsv-tsv_with_names] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-exact_file-True-client0] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-parquet] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-exact_file-False-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client2-year Uint64 NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client3-year Date NOT NULL-False] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_directory_v2[v2-client0] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_directory_v1[v1-client0] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params2-true] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params2-false] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client9-year String-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client10-year Utf8-False] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-exact_file-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-directory_scan-True-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client3-year Date NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client4-year String NOT NULL-True] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.json-json_each_row] >> test_insert.py::TestS3::test_part_merge[v2-client0] [GOOD] >> test_insert.py::TestS3::test_part_binding[v1-client0-json_list] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params2-false] [GOOD] >> test_insert.py::TestS3::test_part_binding[v1-client0-json_list] [SKIPPED] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params2-true] >> test_insert.py::TestS3::test_part_binding[v1-client0-json_each_row] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client10-year Utf8-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client11-year Utf8 NOT NULL-True] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-directory_scan-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-directory_scan-False-client0] >> test_dml.py::TestDML::test_dml[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client4-year String NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client5-year String-False] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params2-true] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params2-false] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.parquet-parquet] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client11-year Utf8 NOT NULL-True] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-parquet] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client12-year Date-False] >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-json_list] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-directory_scan-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-exact_file-True-client0] >> test_insert.py::TestS3::test_part_binding[v1-client0-json_each_row] [GOOD] >> test_insert.py::TestS3::test_part_binding[v1-client0-csv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client5-year String-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client6-year Utf8 NOT NULL-True] >> test_s3_0.py::TestS3::test_checkpoints_on_join_s3_with_yds[v1-mvp_external_ydb_endpoint0-client0] [GOOD] >> test_s3_0.py::TestS3::test_double_optional_types_validation[v2-client0] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params2-false] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params2-true] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client12-year Date-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client13-year Date NOT NULL-True] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.csv-csv_with_names] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-exact_file-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-exact_file-False-client0] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params2-true] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client6-year Utf8 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client13-year Date NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client7-year Utf8-False] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client14-year Datetime-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_formats.py::TestS3Formats::test_with_infer_and_unsupported_option[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004719/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_formats/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004719/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_formats/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=761987) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 766759 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-exact_file-False-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.csv-csv_with_names] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-directory_scan-True-client0] >> test_insert.py::TestS3::test_part_binding[v1-client0-csv_with_names] [GOOD] >> test_insert.py::TestS3::test_part_binding[v2-client0-json_list] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.tsv-tsv_with_names] >> test_insert.py::TestS3::test_part_binding[v2-client0-json_list] [SKIPPED] >> test_insert.py::TestS3::test_part_binding[v2-client0-json_each_row] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client7-year Utf8-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client8-year Int32-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-json_list] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-json_each_row] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client14-year Datetime-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client15-year Datetime NOT NULL-True] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-other-admin] >> test_storage_config.py::TestStorageConfig::test_cases[case_0] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_10] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-directory_scan-True-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-directory_scan-False-client0] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.json-json_each_row] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client8-year Int32-False] [GOOD] >> test_user_administration.py::test_database_admin_can_create_user >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client9-year Uint32-False] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client15-year Datetime NOT NULL-True] [GOOD] >> test_insert.py::TestS3::test_part_binding[v2-client0-json_each_row] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client0-year Int32-False] >> test_insert.py::TestS3::test_part_binding[v2-client0-csv_with_names] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-3.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-4.test] >> test_tenants.py::TestTenants::test_create_create_table[enable_alter_database_create_hive_first--true] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-directory_scan-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-exact_file-True-client0] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.parquet-parquet] >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-json_each_row] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-csv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client9-year Uint32-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client0-year Int32-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client1-year Int32 NOT NULL-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client10-year Int64 NOT NULL-True] >> test_dml.py::TestDML::test_dml[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_dml.py::TestDML::test_dml[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_insert.py::TestS3::test_part_binding[v2-client0-csv_with_names] [GOOD] >> test_insert.py::TestS3::test_error[v1-client0-json_each_row] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-exact_file-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-exact_file-False-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client10-year Int64 NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client11-year Int64-False] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.csv-csv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client1-year Int32 NOT NULL-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client2-year Uint32-False] >> test_storage_config.py::TestStorageConfig::test_cases[case_10] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_11] >> test_dml.py::TestDML::test_dml[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_9] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-exact_file-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-directory_scan-True-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client11-year Int64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client12-year Uint64-False] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.tsv-tsv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client2-year Uint32-False] [GOOD] >> test_tenants.py::TestTenants::test_create_create_table[enable_alter_database_create_hive_first--true] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client3-year Uint32 NOT NULL-True] >> test_insert.py::TestS3::test_error[v1-client0-json_each_row] [GOOD] >> test_insert.py::TestS3::test_error[v1-client0-csv_with_names] >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-parquet] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client12-year Uint64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client13-year Date-False] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.json-json_each_row] >> test_tenants.py::TestTenants::test_create_drop_create_table2[enable_alter_database_create_hive_first--false] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-directory_scan-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-directory_scan-False-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client3-year Uint32 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client4-year Int64-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client13-year Date-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client0-year Int32 NOT NULL-True] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.parquet-parquet] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-directory_scan-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-hive-False-client0] >> test_s3_0.py::TestS3::test_double_optional_types_validation[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_json_list_validation[v1-client0] >> test_insert.py::TestS3::test_error[v1-client0-csv_with_names] [GOOD] >> test_insert.py::TestS3::test_error[v1-client0-tsv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client4-year Int64-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client5-year Int64 NOT NULL-False] >> test_storage_config.py::TestStorageConfig::test_cases[case_11] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_12] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select1-3.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select1-4.test] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.csv-csv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client0-year Int32 NOT NULL-True] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-parquet] [GOOD] >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params2] [GOOD] >> test_statistics.py::TestS3::test_precompute[v2-client0] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-hive-False-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client1-year Uint32 NOT NULL-True] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-hive-True-client0] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-8.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-9.test] >> test_auth_system_views.py::test_tenant_auth_groups_access[clusteradmin-True] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client5-year Int64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client6-year Uint64-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client1-year Uint32 NOT NULL-True] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.csv-csv_with_names] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client2-year Uint64 NOT NULL-True] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.tsv-tsv_with_names] >> test_insert.py::TestS3::test_error[v1-client0-tsv_with_names] [GOOD] >> test_insert.py::TestS3::test_error[v1-client0-parquet] >> test_insert.py::TestS3::test_error[v1-client0-parquet] [SKIPPED] >> test_insert.py::TestS3::test_insert_empty_object[v1] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-hive-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-projection-False-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_dml.py::TestDML::test_dml[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client2-year Uint64 NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client3-year Date NOT NULL-False] >> test_s3_0.py::TestS3::test_json_list_validation[v1-client0] [GOOD] >> test_s3_0.py::TestS3::test_json_list_validation[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client6-year Uint64-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client7-year Uint64 NOT NULL-False] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-projection-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-projection-True-client0] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.json-json_each_row] >> test_statistics.py::TestS3::test_precompute[v2-client0] [GOOD] >> test_statistics.py::TestS3::test_precompute[v1-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_insert.py::TestS3::test_insert_empty_object[v1] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_9] [GOOD] >> test_storage_config.py::TestStorageConfig::test_create_tablet >> test_insert.py::TestS3::test_insert_empty_object[v2] >> test_storage_config.py::TestStorageConfig::test_create_tablet [GOOD] >> overlapping_portions.py::TestOverlappingPortions::test [GOOD] >> test_users_groups_with_acl.py::test_query_create_group_by_domain_admin[domain_login_only--true-YDB] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client3-year Date NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client4-year String NOT NULL-True] >> test_storage_config.py::TestStorageConfig::test_cases[case_12] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_1] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params2-true] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/0046fc/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_size_limit/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/0046fc/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_size_limit/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=785592) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 790818 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client7-year Uint64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client8-year String NOT NULL-True] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-projection-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-hive-False-client0] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.parquet-parquet] >> test_s3_0.py::TestS3::test_json_list_validation[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_schema_validation[v1-client0] >> test_insert.py::TestS3::test_insert_empty_object[v2] [GOOD] >> test_insert.py::TestS3::test_insert_without_format_error[v1-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client8-year String NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client9-year String-False] >> test_tenants.py::TestTenants::test_create_drop_create_table[enable_alter_database_create_hive_first--true] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client4-year String NOT NULL-True] [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table2[enable_alter_database_create_hive_first--false] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client5-year String-False] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-hive-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-hive-True-client0] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.parquet-parquet] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params2] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004739/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_inflight/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004739/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_inflight/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=705317) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 710120 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_tenants.py::TestTenants::test_resolve_nodes[enable_alter_database_create_hive_first--false] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.csv-csv_with_names] >> test_statistics.py::TestS3::test_precompute[v1-client0] [GOOD] >> test_statistics.py::TestS3::test_sum[v2-client0] >> test_tenants.py::TestTenants::test_create_drop_create_table2[enable_alter_database_create_hive_first--true] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_insert.py::TestS3::test_insert_without_format_error[v1-client0] [GOOD] >> test_insert.py::TestS3::test_insert_without_format_error[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client9-year String-False] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-hive-True-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client5-year String-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client10-year Utf8-False] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-projection-False-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client6-year Utf8 NOT NULL-True] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.tsv-tsv_with_names] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_validation.py::TestS3::test_nested_type[v1-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00473b/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_validation/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00473b/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_validation/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=701785) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 706724 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_storage_config.py::TestStorageConfig::test_cases[case_1] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_2] >> test_system_views.py::TestPartitionStats::test_case >> test_s3_0.py::TestS3::test_schema_validation[v1-client0] [GOOD] >> test_s3_0.py::TestS3::test_schema_validation[v2-client0] >> test_tenants.py::TestTenants::test_create_tables[enable_alter_database_create_hive_first--false] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-projection-False-client0] [GOOD] >> test_insert.py::TestS3::test_insert_without_format_error[v2-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-projection-True-client0] >> test_insert.py::TestS3::test_raw_format_validation[v1-client0] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.json-json_each_row] >> test_tenants.py::TestTenants::test_list_database_above[enable_alter_database_create_hive_first--true] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client10-year Utf8-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client11-year Utf8 NOT NULL-True] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client6-year Utf8 NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client7-year Utf8-False] >> test_tenants.py::TestTenants::test_create_drop_create_table[enable_alter_database_create_hive_first--true] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[unblock] >> test_dml.py::TestDML::test_dml[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client11-year Utf8 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client12-year Date-False] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-projection-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-hive-False-client0] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.json-json_each_row] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select2-3.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select2-4.test] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client7-year Utf8-False] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.parquet-parquet] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join0.test] [GOOD] >> test_dml.py::TestDML::test_dml[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client8-year Int32-False] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join1.test] >> test_tenants.py::TestTenants::test_create_remove_database[enable_alter_database_create_hive_first--false] >> test_dml.py::TestDML::test_dml[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-12.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-13.test] >> test_s3_0.py::TestS3::test_schema_validation[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.parquet-parquet] [GOOD] >> test_users_groups_with_acl.py::test_query_create_group_by_domain_admin[domain_login_only--true-YDB] [GOOD] >> test_users_groups_with_acl.py::test_query_create_group_by_tenant_admin[domain_login_only--false-YDB] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_SECONDS] >> test_insert.py::TestS3::test_raw_format_validation[v1-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client12-year Date-False] [GOOD] >> test_insert.py::TestS3::test_raw_format_validation[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client13-year Date NOT NULL-True] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client8-year Int32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client9-year Uint32-False] >> test_system_views.py::TestPartitionStats::test_case [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_2] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-hive-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-hive-True-client0] >> test_dynamic_tenants.py::test_create_and_drop_tenants[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_create_and_drop_the_same_tenant2[enable_alter_database_create_hive_first--false] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client13-year Date NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client14-year Datetime-False] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_SECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MICROSECONDS] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join1.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join2.test] >> test_tenants.py::TestTenants::test_list_database_above[enable_alter_database_create_hive_first--true] [FAIL] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_directory_v1[v1-client0] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_without_streams[v1-client0] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-hive-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-projection-False-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client9-year Uint32-False] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_without_streams[v1-client0] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_without_streams[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client10-year Int64 NOT NULL-True] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MICROSECONDS] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_without_streams[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MILLISECONDS] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query[v1-client0] >> test_tenants.py::TestTenants::test_progress_when_tenant_tablets_run_on_dynamic_nodes[enable_alter_database_create_hive_first--false] >> test_system_views.py::TestQueryMetrics::test_case >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client14-year Datetime-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client15-year Datetime NOT NULL-True] >> test_insert.py::TestS3::test_raw_format_validation[v2-client0] [GOOD] >> test_insert.py::TestS3::test_block_insert_enable[v1-client0] >> test_dml.py::TestDML::test_dml[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-projection-False-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client10-year Int64 NOT NULL-True] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-projection-True-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client11-year Int64-False] >> test_tenants.py::TestTenants::test_create_tables[enable_alter_database_create_hive_first--false] [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table2[enable_alter_database_create_hive_first--true] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MILLISECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_SECONDS] >> test_alter_compression.py::TestAlterCompression::test[alter_compression] [GOOD] >> test_alter_compression.py::TestAlterCompression::test_multi[alter_compression] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join2.test] [GOOD] >> test_tenants.py::TestTenants::test_resolve_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_tenants.py::TestTenants::test_resolve_nodes[enable_alter_database_create_hive_first--true] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client15-year Datetime NOT NULL-True] [GOOD] >> test_tenants.py::TestTenants::test_create_tables[enable_alter_database_create_hive_first--true] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query[v1-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client0-column_type0-True] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client11-year Int64-False] [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table3[enable_alter_database_create_hive_first--false] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client12-year Uint64-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client0-column_type0-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client1-column_type1-True] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_SECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MICROSECONDS] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-projection-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-hive-False-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client1-column_type1-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client2-column_type2-True] >> test_insert.py::TestS3::test_block_insert_enable[v1-client0] [GOOD] >> test_insert.py::TestS3::test_block_insert_enable[v2-client0] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_storage_config.py::TestStorageConfig::test_cases[case_2] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client2-column_type2-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client3-column_type3-False] >> test_statistics.py::TestS3::test_sum[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client3-column_type3-False] [GOOD] >> test_statistics.py::TestS3::test_sum[v1-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client4-column_type4-True] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client12-year Uint64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client13-year Date-False] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql[enable_alter_database_create_hive_first--true] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client4-column_type4-True] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MICROSECONDS] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-hive-False-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client5-column_type5-True] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MILLISECONDS] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-hive-True-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client5-column_type5-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client6-column_type6-True] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join2.test] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client6-column_type6-True] [GOOD] >> test_stream_query.py::TestStreamQuery::test_sql_suite[plan-window.test] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client7-column_type7-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_tenants.py::TestTenants::test_create_remove_database[enable_alter_database_create_hive_first--false] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client7-column_type7-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client8-column_type8-False] >> test_dynamic_tenants.py::test_custom_coordinator_options[enable_alter_database_create_hive_first--true] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client13-year Date-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client0-year Int32 NOT NULL-True] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client8-column_type8-False] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MILLISECONDS] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client9-column_type9-False] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_SECONDS] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-hive-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-projection-False-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client9-column_type9-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client10-column_type10-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client10-column_type10-False] [GOOD] >> test_insert.py::TestS3::test_block_insert_enable[v2-client0] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client11-column_type11-False] >> test_insert.py::TestS3::test_block_insert_value[v1-client0] >> test_ydb_over_fq.py::TestYdbOverFq::test_stream_execute_scan_query[v1-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_s3_0.py::TestS3::test_schema_validation[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004744/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_0/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004744/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_0/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=694741) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004744/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_0/testing_out_stuff/test_s3_0.py.TestS3.test_csv.v1-false-client0/default/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 701326 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004744/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_0/testing_out_stuff/test_s3_0.py.TestS3.test_csv.v1-false-client0/cp/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback ::1 - - [30/Jun/2025 09:31:08] send response localhost:22593/?database=local ::1 - - [30/Jun/2025 09:31:08] "GET /database?databaseId=FakeDatabaseId HTTP/1.1" 200 - contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_tenants.py::TestTenants::test_create_remove_database[enable_alter_database_create_hive_first--true] >> test_storage_config.py::TestStorageConfig::test_cases[case_3] >> test_dml.py::TestDML::test_dml[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client11-column_type11-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client12-column_type12-False] >> test_users_groups_with_acl.py::test_yql_create_group_by_tenant_admin[domain_login_only--true-YDB] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client12-column_type12-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client13-column_type13-False] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-projection-False-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_SECONDS] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-projection-True-client0] >> test_users_groups_with_acl.py::test_query_create_user_by_tenant_admin[domain_login_only--false-YDB] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MICROSECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client13-column_type13-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client0-year Int32 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client0-column_type0-True] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client1-year Uint32 NOT NULL-True] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/oom/py3test >> overlapping_portions.py::TestOverlappingPortions::test [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client0-column_type0-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client1-column_type1-True] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client1-column_type1-True] [GOOD] >> test_users_groups_with_acl.py::test_query_create_group_by_tenant_admin[domain_login_only--false-YDB] [GOOD] >> test_users_groups_with_acl.py::test_query_create_group_by_tenant_admin[domain_login_only--true-YDB] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client2-column_type2-True] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client2-column_type2-True] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-projection-True-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client3-column_type3-False] >> test_s3_1.py::TestS3::test_huge_source[v1-false-client0] >> test_tenants.py::TestTenants::test_create_tables[enable_alter_database_create_hive_first--true] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MICROSECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MILLISECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client3-column_type3-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client4-column_type4-True] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-4.test] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client1-year Uint32 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client4-column_type4-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client2-year Uint64 NOT NULL-True] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client5-column_type5-True] >> test_tenants.py::TestTenants::test_progress_when_tenant_tablets_run_on_dynamic_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client5-column_type5-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client6-column_type6-True] >> test_tenants.py::TestTenants::test_when_deactivate_fat_tenant_creation_another_tenant_is_ok[enable_alter_database_create_hive_first--true] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client6-column_type6-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client7-column_type7-False] >> test_tenants.py::TestTenants::test_yql_operations_over_dynamic_nodes[enable_alter_database_create_hive_first--false] >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test [GOOD] >> test_tenants.py::TestTenants::test_force_delete_tenant_when_table_has_been_deleted[enable_alter_database_create_hive_first--false] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client7-column_type7-False] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_stream_execute_scan_query[v1-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client8-column_type8-False] >> test_ydb_over_fq.py::TestYdbOverFq::test_stream_execute_scan_query[v2-client0] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MILLISECONDS] [GOOD] >> test_insert.py::TestS3::test_block_insert_value[v1-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_SECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client8-column_type8-False] [GOOD] >> test_insert.py::TestS3::test_block_insert_value[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client2-year Uint64 NOT NULL-True] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client9-column_type9-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client3-year Date NOT NULL-False] >> test_tenants.py::TestTenants::test_progress_when_tenant_tablets_run_on_dynamic_nodes[enable_alter_database_create_hive_first--true] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client9-column_type9-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client10-column_type10-False] >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--false] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client10-column_type10-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client11-column_type11-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client11-column_type11-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client12-column_type12-False] >> ttl_unavailable_s3.py::TestUnavailableS3::test [GOOD] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_stream_query.py::TestStreamQuery::test_sql_suite[plan-window.test] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client12-column_type12-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client13-column_type13-False] >> ttl_delete_s3.py::TestDeleteS3Ttl::test_data_unchanged_after_ttl_change [GOOD] >> ttl_delete_s3.py::TestDeleteS3Ttl::test_delete_s3_tiering >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client13-column_type13-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client0-column_type0-False] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_SECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MICROSECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client0-column_type0-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client1-column_type1-False] >> TStateStorageTest::ShouldDeleteNoCheckpoints >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client3-year Date NOT NULL-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client1-column_type1-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client4-year String NOT NULL-True] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_all_types-pk_types12-all_types12-index12---] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql_empty_database_header[enable_alter_database_create_hive_first--false] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client2-column_type2-False] >> test_users_groups_with_acl.py::test_yql_create_group_by_tenant_admin[domain_login_only--true-YDB] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_user_by_domain_admin[domain_login_only--false-YDB] >> test_s3_1.py::TestS3::test_huge_source[v1-false-client0] [GOOD] >> TStateStorageTest::ShouldDeleteNoCheckpoints [GOOD] >> TStateStorageTest::ShouldDeleteNoCheckpoints2 >> test_s3_1.py::TestS3::test_huge_source[v1-true-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client2-column_type2-False] [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table3[enable_alter_database_create_hive_first--false] [XFAIL] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client3-column_type3-False] >> TStateStorageTest::ShouldDeleteNoCheckpoints2 [GOOD] >> TStateStorageTest::ShouldDeleteCheckpoints >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client3-column_type3-False] [GOOD] >> test_dynamic_tenants.py::test_create_and_drop_the_same_tenant2[enable_alter_database_create_hive_first--false] [FAIL] >> test_dynamic_tenants.py::test_create_and_drop_the_same_tenant2[enable_alter_database_create_hive_first--true] >> test_ydb_over_fq.py::TestYdbOverFq::test_stream_execute_scan_query[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client4-column_type4-False] >> TStateStorageTest::ShouldDeleteCheckpoints [GOOD] >> TStateStorageTest::ShouldDeleteGraph >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_results[v1-client0] >> TStateStorageTest::ShouldDeleteGraph [GOOD] >> TStateStorageTest::ShouldGetMultipleStates >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MICROSECONDS] [GOOD] >> test_users_groups_with_acl.py::test_query_create_user_by_tenant_admin[domain_login_only--false-YDB] [GOOD] >> test_users_groups_with_acl.py::test_query_create_user_by_tenant_admin[domain_login_only--true-YDB] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MILLISECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client4-column_type4-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client5-column_type5-True] >> test_dynamic_tenants.py::test_custom_coordinator_options[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_complete[enable_alter_database_create_hive_first--false] >> TStateStorageTest::ShouldGetMultipleStates [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client5-column_type5-True] [GOOD] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client6-column_type6-False] >> test_tenants.py::TestTenants::test_create_drop_create_table3[enable_alter_database_create_hive_first--true] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client6-column_type6-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client4-year String NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client7-column_type7-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client5-year String-False] >> test_storage_config.py::TestStorageConfig::test_cases[case_3] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_4] >> test_insert.py::TestS3::test_block_insert_value[v2-client0] [GOOD] >> test_tenants.py::TestTenants::test_create_remove_database[enable_alter_database_create_hive_first--true] [GOOD] >> test_insert.py::TestS3::test_insert_deadlock[v1-false-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client7-column_type7-False] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MILLISECONDS] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client8-column_type8-False] >> test_statistics.py::TestS3::test_sum[v1-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_SECONDS] >> test_statistics.py::TestS3::test_aborted_by_user[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client8-column_type8-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client9-column_type9-False] >> TCheckpointStorageTest::ShouldRegisterCoordinator >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client9-column_type9-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client10-column_type10-False] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-4.test] [GOOD] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldGetMultipleStates [GOOD] >> TCheckpointStorageTest::ShouldRegisterCoordinator [GOOD] >> TCheckpointStorageTest::ShouldGetCoordinators >> TCheckpointStorageTest::ShouldGetCoordinators [GOOD] >> TCheckpointStorageTest::ShouldMarkCheckpointsGc >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client10-column_type10-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client11-column_type11-False] >> test_tenants.py::TestTenants::test_create_remove_database_wait[enable_alter_database_create_hive_first--false] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--true] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client5-year String-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client11-column_type11-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client6-year Utf8 NOT NULL-True] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client12-column_type12-False] >> TCheckpointStorageTest::ShouldMarkCheckpointsGc [GOOD] >> TCheckpointStorageTest::ShouldNotDeleteUnmarkedCheckpoints >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_results[v1-client0] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_results[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client12-column_type12-False] [GOOD] >> test_insert.py::TestS3::test_insert_deadlock[v1-false-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_SECONDS] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client13-column_type13-False] >> TCheckpointStorageTest::ShouldNotDeleteUnmarkedCheckpoints [GOOD] >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId >> test_insert.py::TestS3::test_insert_deadlock[v1-true-client0] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MICROSECONDS] >> test_s3_1.py::TestS3::test_huge_source[v1-true-client0] [GOOD] >> test_tenants.py::TestTenants::test_yql_operations_over_dynamic_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_s3_1.py::TestS3::test_huge_source[v2-false-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client13-column_type13-False] [GOOD] >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client0-column_type0-False] >> TStorageServiceTest::ShouldNotRegisterPrevGeneration ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00457c/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00457c/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 887993 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client6-year Utf8 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client0-column_type0-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client7-year Utf8-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client1-column_type1-False] >> test_tenants.py::TestTenants::test_resolve_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_tenants.py::TestTenants::test_stop_start[enable_alter_database_create_hive_first--false] >> TStorageServiceTest::ShouldNotRegisterPrevGeneration [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointWhenUnregistered >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client1-column_type1-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client2-column_type2-False] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MICROSECONDS] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client2-column_type2-False] [GOOD] >> test_tenants.py::TestTenants::test_yql_operations_over_dynamic_nodes[enable_alter_database_create_hive_first--true] >> TStorageServiceTest::ShouldNotCreateCheckpointWhenUnregistered [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointTwice >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MILLISECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client3-column_type3-False] >> test_insert.py::TestS3::test_insert_deadlock[v1-true-client0] [GOOD] >> test_insert.py::TestS3::test_insert_deadlock[v2-false-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client3-column_type3-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client4-column_type4-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> ttl_unavailable_s3.py::TestUnavailableS3::test [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004581/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk5/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004581/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk5/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback !!! simulating S3 hang up -- sending SIGSTOP !!! simulating S3 recovery -- sending SIGCONT contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 882543 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> TStorageServiceTest::ShouldNotCreateCheckpointTwice [GOOD] >> TStorageServiceTest::ShouldNotPendingCheckpointWithoutCreation >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client4-column_type4-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client5-column_type5-True] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client5-column_type5-True] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_4] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_5] >> TStorageServiceTest::ShouldNotPendingCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client6-column_type6-False] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId [GOOD] >> test_users_groups_with_acl.py::test_yql_create_user_by_domain_admin[domain_login_only--false-YDB] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_user_by_domain_admin[domain_login_only--true-YDB] >> test_tenants.py::TestTenants::test_progress_when_tenant_tablets_run_on_dynamic_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_statistics.py::TestS3::test_aborted_by_user[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client6-column_type6-False] [GOOD] >> test_statistics.py::TestS3::test_aborted_by_user[v1-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client7-column_type7-False] >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged [GOOD] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_alter_compression.py::TestAlterCompression::test_multi[alter_compression] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client7-column_type7-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client7-year Utf8-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client8-column_type8-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client8-year Int32-False] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_results[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MILLISECONDS] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client8-column_type8-False] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_error[v1-client0] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_SECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client9-column_type9-False] >> test_s3_1.py::TestS3::test_huge_source[v2-false-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client9-column_type9-False] [GOOD] >> test_tenants.py::TestTenants::test_force_delete_tenant_when_table_has_been_deleted[enable_alter_database_create_hive_first--false] [GOOD] >> test_s3_1.py::TestS3::test_huge_source[v2-true-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client10-column_type10-False] >> test_users_groups_with_acl.py::test_query_create_group_by_tenant_admin[domain_login_only--true-YDB] [GOOD] >> test_users_groups_with_acl.py::test_query_create_user_by_domain_admin[domain_login_only--false-YDB] >> test_tenants.py::TestTenants::test_register_tenant_and_force_drop_with_table[enable_alter_database_create_hive_first--false] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql_empty_database_header[enable_alter_database_create_hive_first--false] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql_empty_database_header[enable_alter_database_create_hive_first--true] >> TCheckpointStorageTest::ShouldCreateCheckpoint >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client10-column_type10-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client11-column_type11-False] >> TCheckpointStorageTest::ShouldCreateCheckpoint [GOOD] >> TCheckpointStorageTest::ShouldCreateGetCheckpoints >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client11-column_type11-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client12-column_type12-False] >> test_ttl.py::TestTTLDefaultEnv::test_case ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged [GOOD] Test command err: 2025-06-30T09:32:42.378981Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7521672983551245492:2048] with connection to localhost:14934:local 2025-06-30T09:32:42.379035Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:42.550884Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:32:42.550905Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:42.551043Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.16] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:42.567489Z node 1 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:197: [graph_graphich.16] Failed to register graph:
: Warning: Table: local/TStorageServiceTestShouldNotRegisterPrevGeneration/coordinators_sync, pk: graph_graphich, current generation: 17, expected/new generation: 16, operation: RegisterCheck, code: 400130 2025-06-30T09:32:42.567512Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.16] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:43.017730Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7521672986722244495:2048] with connection to localhost:14934:local 2025-06-30T09:32:43.017799Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:32:43.126606Z node 2 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:1] Failed to create checkpoint:
: Warning: Table: local/TStorageServiceTestShouldNotCreateCheckpointWhenUnregistered/coordinators_sync, pk: graph_graphich, current generation: 0, expected/new generation: 17, operation: Check, code: 400130 2025-06-30T09:32:43.126628Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:32:43.466444Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7521672989999781118:2048] with connection to localhost:14934:local 2025-06-30T09:32:43.466523Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:43.497793Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:32:43.497816Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:43.497956Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:32:43.603836Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-30T09:32:43.603855Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:32:43.604004Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:32:43.618165Z node 3 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:1] Failed to create checkpoint:
: Error: Constraint violated. Table: `local/TStorageServiceTestShouldNotCreateCheckpointTwice/checkpoints_metadata`., code: 2012
: Error: Conflict with existing key., code: 2012 2025-06-30T09:32:43.618183Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:32:44.006992Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7521672989670416726:2048] with connection to localhost:14934:local 2025-06-30T09:32:44.007073Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:44.049307Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:32:44.049341Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:44.049566Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-30T09:32:44.085103Z node 4 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:274: [graph_graphich.17] [17:1] Failed to set 'PendingCommit' status:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-06-30T09:32:44.085125Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-30T09:32:44.457487Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7521672992596906885:2048] with connection to localhost:14934:local 2025-06-30T09:32:44.457552Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:44.508007Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:32:44.508029Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:44.508197Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:32:44.662618Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-30T09:32:44.662645Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:32:44.662800Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:44.676165Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-30T09:32:44.676183Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:44.676454Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-30T09:32:44.681212Z node 5 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:274: [graph_graphich.17] [17:1] Failed to set 'PendingCommit' status:
: Warning: Table: local/TStorageServiceTestShouldNotPendingCheckpointGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-06-30T09:32:44.681231Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client12-column_type12-False] [GOOD] >> TCheckpointStorageTest::ShouldCreateGetCheckpoints [GOOD] >> TCheckpointStorageTest::ShouldGetCheckpointsEmpty >> test_insert.py::TestS3::test_insert_deadlock[v2-false-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client13-column_type13-False] >> test_tenants.py::TestTenants::test_force_delete_tenant_when_table_has_been_deleted[enable_alter_database_create_hive_first--true] >> test_insert.py::TestS3::test_insert_deadlock[v2-true-client0] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_SECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MICROSECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client13-column_type13-False] [GOOD] >> TCheckpointStorageTest::ShouldGetCheckpointsEmpty [GOOD] >> TCheckpointStorageTest::ShouldDeleteGraph >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client8-year Int32-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client0-column_type0-False] >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--false] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client9-year Uint32-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client0-column_type0-False] [GOOD] >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_complete[enable_alter_database_create_hive_first--false] [GOOD] >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_complete[enable_alter_database_create_hive_first--true] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client1-column_type1-True] >> TCheckpointStorageTest::ShouldDeleteGraph [GOOD] >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints >> test_statistics.py::TestS3::test_aborted_by_user[v1-client0] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-select_distinct.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-strings.test] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client1-column_type1-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client2-column_type2-False] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MICROSECONDS] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client2-column_type2-False] [GOOD] >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MILLISECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client3-column_type3-True] >> test_insert.py::TestS3::test_insert_deadlock[v2-true-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client3-column_type3-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client9-year Uint32-False] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_5] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client10-year Int64 NOT NULL-True] >> test_storage_config.py::TestStorageConfig::test_cases[case_6] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-strings.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-window.test] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client4-column_type4-True] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client4-column_type4-True] [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointAfterGenerationChanged >> test_s3_1.py::TestS3::test_huge_source[v2-true-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client5-column_type5-False] >> test_s3_1.py::TestS3::test_top_level_listing[v1-false-client0] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client5-column_type5-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client6-column_type6-True] >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--false] >> TStorageServiceTest::ShouldNotCreateCheckpointAfterGenerationChanged [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutCreation >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MILLISECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_SECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client6-column_type6-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client7-column_type7-True] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-window.test] [GOOD] |86.3%| [TA] $(B)/ydb/tests/olap/scenario/test-results/py3test/{meta.json ... results_accumulator.log} >> test_tenants.py::TestTenants::test_create_drop_create_table3[enable_alter_database_create_hive_first--true] [XFAIL] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_error[v1-client0] [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotAbortCheckpointWithoutCreation >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_s3_1.py::TestS3::test_top_level_listing[v1-false-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client7-column_type7-True] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_error[v2-client0] >> test_s3_1.py::TestS3::test_top_level_listing[v1-true-client0] >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--false] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client8-column_type8-False] >> TStorageServiceTest::ShouldNotAbortCheckpointWithoutCreation [GOOD] >> test_users_groups_with_acl.py::test_query_create_user_by_tenant_admin[domain_login_only--true-YDB] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client8-column_type8-False] [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutPending >> test_users_groups_with_acl.py::test_yql_create_group_by_domain_admin[domain_login_only--false-YDB] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutPending [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client10-year Int64 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client9-column_type9-False] >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client11-year Int64-False] >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--false] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client9-column_type9-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client10-column_type10-False] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_SECONDS] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client10-column_type10-False] [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table[enable_alter_database_create_hive_first--false] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MICROSECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client11-column_type11-False] >> test_tenants.py::TestTenants::test_create_remove_database_wait[enable_alter_database_create_hive_first--false] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client11-column_type11-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client12-column_type12-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client12-column_type12-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client11-year Int64-False] [GOOD] >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--false] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_6] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client12-year Uint64-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client13-column_type13-False] >> test_s3_1.py::TestS3::test_top_level_listing[v1-true-client0] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_7] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client13-column_type13-False] [GOOD] >> test_tenants.py::TestTenants::test_register_tenant_and_force_drop_with_table[enable_alter_database_create_hive_first--false] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client14-column_type14-False] >> test_s3_1.py::TestS3::test_top_level_listing[v2-false-client0] |86.3%| [TA] {RESULT} $(B)/ydb/tests/olap/scenario/test-results/py3test/{meta.json ... results_accumulator.log} >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client14-column_type14-False] [GOOD] >> test_users_groups_with_acl.py::test_query_create_user_by_domain_admin[domain_login_only--false-YDB] [GOOD] >> test_users_groups_with_acl.py::test_query_create_user_by_domain_admin[domain_login_only--true-YDB] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client15-column_type15-False] >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--true] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MICROSECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MILLISECONDS] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged [GOOD] Test command err: 2025-06-30T09:32:50.709690Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7521673018629108295:2048] with connection to localhost:10589:local 2025-06-30T09:32:50.709753Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:50.889032Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:32:50.889058Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:50.889245Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:32:51.020200Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-30T09:32:51.020232Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:32:51.020395Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:51.031840Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-30T09:32:51.031863Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:51.032089Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-30T09:32:51.042586Z node 1 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:2] Failed to create checkpoint:
: Warning: Table: local/TStorageServiceTestShouldNotCreateCheckpointAfterGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-06-30T09:32:51.042614Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-30T09:32:51.443940Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7521673025296560395:2048] with connection to localhost:10589:local 2025-06-30T09:32:51.443997Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:51.485396Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:32:51.485413Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:51.488728Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-30T09:32:51.522668Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-06-30T09:32:51.522687Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-06-30T09:32:51.926905Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7521673025419499120:2048] with connection to localhost:10589:local 2025-06-30T09:32:51.926981Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:51.962953Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:32:51.962974Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:51.963298Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:1] Got TEvAbortCheckpointRequest 2025-06-30T09:32:51.992151Z node 3 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:331: [graph_graphich.17] [17:1] Failed to abort checkpoint:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-06-30T09:32:51.992174Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:1] Send TEvAbortCheckpointResponse 2025-06-30T09:32:52.429807Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7521673026818382822:2048] with connection to localhost:10589:local 2025-06-30T09:32:52.429895Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:52.481613Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:32:52.481646Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:52.481846Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:32:52.603275Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-30T09:32:52.603296Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:32:52.603432Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-30T09:32:52.631771Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Selected checkpoint '17:1' with status Pending, while expected PendingCommit, code: 400080 2025-06-30T09:32:52.631794Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-06-30T09:32:52.847110Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7521673026361491222:2048] with connection to localhost:10589:local 2025-06-30T09:32:52.847198Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:52.881426Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:32:52.881449Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:52.882917Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:32:52.989740Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-30T09:32:52.989761Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:32:52.989973Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-30T09:32:53.041700Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-30T09:32:53.041718Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-30T09:32:53.047434Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-30T09:32:53.059195Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-30T09:32:53.059216Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-30T09:32:53.059432Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-30T09:32:53.064928Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Table: local/TStorageServiceTestShouldNotPendingCheckpointGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-06-30T09:32:53.064952Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client15-column_type15-False] [GOOD] >> test_ttl.py::TestTTLAlterSettings::test_case >> test_tenants.py::TestTenants::test_create_remove_database_wait[enable_alter_database_create_hive_first--true] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client0-column_type0-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client0-column_type0-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client1-column_type1-True] >> test_tenants.py::TestTenants::test_yql_operations_over_dynamic_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client1-column_type1-True] [GOOD] >> TStateStorageTest::ShouldIssueErrorOnWrongGetStateParams >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client2-column_type2-False] >> test_dynamic_tenants.py::test_create_and_drop_the_same_tenant2[enable_alter_database_create_hive_first--true] [FAIL] >> test_dynamic_tenants.py::test_create_tenant_no_cpu[enable_alter_database_create_hive_first--false] >> TStateStorageTest::ShouldIssueErrorOnWrongGetStateParams [GOOD] >> TStateStorageTest::ShouldIssueErrorOnNonExistentState >> test_tenants.py::TestTenants::test_register_tenant_and_force_drop_with_table[enable_alter_database_create_hive_first--true] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client2-column_type2-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client3-column_type3-True] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client12-year Uint64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client13-year Date-False] >> TStateStorageTest::ShouldIssueErrorOnNonExistentState [GOOD] >> TStateStorageTest::ShouldLoadLastSnapshot >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client3-column_type3-True] [GOOD] >> test_s3_1.py::TestS3::test_top_level_listing[v2-false-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client4-column_type4-True] >> test_s3_1.py::TestS3::test_top_level_listing[v2-true-client0] >> TStateStorageTest::ShouldLoadLastSnapshot [GOOD] >> TStateStorageTest::ShouldNotGetNonExistendSnaphotState >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client4-column_type4-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client5-column_type5-False] >> TStateStorageTest::ShouldNotGetNonExistendSnaphotState [GOOD] >> TStateStorageTest::ShouldLoadIncrementSnapshot >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MILLISECONDS] [GOOD] >> test_tenants.py::test_operation_with_locks[enable_alter_database_create_hive_first--false] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_SECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client5-column_type5-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client6-column_type6-True] >> TStateStorageTest::ShouldLoadIncrementSnapshot [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join0.test] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client6-column_type6-True] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join1.test] >> test_ttl.py::TestTTLAlterSettings::test_case [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client7-column_type7-True] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql_empty_database_header[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_with_cpu[enable_alter_database_create_hive_first--false] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client7-column_type7-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client8-column_type8-False] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client13-year Date-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client8-column_type8-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client0-year Int32 NOT NULL-False] >> test_users_groups_with_acl.py::test_yql_create_user_by_domain_admin[domain_login_only--true-YDB] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_user_by_tenant_admin[domain_login_only--false-YDB] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client9-column_type9-False] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_SECONDS] [GOOD] >> test_s3_1.py::TestS3::test_top_level_listing[v2-true-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client9-column_type9-False] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MICROSECONDS] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client10-column_type10-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client10-column_type10-False] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_error[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client11-column_type11-False] >> test_ydb_over_fq.py::TestYdbOverFq::test_explain_data_query[v1-client0] >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--false] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client11-column_type11-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client12-column_type12-False] >> test_storage_config.py::TestStorageConfig::test_cases[case_7] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_8] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldLoadIncrementSnapshot [GOOD] >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--true] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client12-column_type12-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client13-column_type13-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client13-column_type13-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client14-column_type14-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client14-column_type14-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client15-column_type15-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client15-column_type15-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v2-false-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client0-year Int32 NOT NULL-False] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MICROSECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MILLISECONDS] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client1-year Uint32 NOT NULL-False] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_ydb_over_fq.py::TestYdbOverFq::test_explain_data_query[v1-client0] [GOOD] |86.3%| [TA] $(B)/ydb/tests/olap/oom/test-results/py3test/{meta.json ... results_accumulator.log} >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--false] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v2-false-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client1-year Uint32 NOT NULL-False] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join1.test] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MILLISECONDS] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-select_distinct.test] [GOOD] >> test_tenants.py::TestTenants::test_force_delete_tenant_when_table_has_been_deleted[enable_alter_database_create_hive_first--true] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_8] [GOOD] >> test_dml.py::TestDML::test_dml[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client2-year Uint64 NOT NULL-False] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join2.test] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.csv-csv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v2-true-client0] >> test_ydb_over_fq.py::TestYdbOverFq::test_explain_data_query[v2-client0] >> test_postgres.py::TestPGSQL::test_sql_suite[results-strings.test] >> test_tenants.py::TestTenants::test_list_database_above[enable_alter_database_create_hive_first--false] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.csv-csv_with_names] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client2-year Uint64 NOT NULL-False] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_group_by_domain_admin[domain_login_only--false-YDB] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-13.test] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_no_cpu[enable_alter_database_create_hive_first--false] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_explain_data_query[v2-client0] [GOOD] >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_complete[enable_alter_database_create_hive_first--true] [GOOD] >> test_tenants.py::TestTenants::test_create_remove_database_wait[enable_alter_database_create_hive_first--true] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-strings.test] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v2-true-client0] [GOOD] >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_continue[enable_alter_database_create_hive_first--false] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.tsv-tsv_with_names] >> test_users_groups_with_acl.py::test_yql_create_group_by_domain_admin[domain_login_only--true-YDB] >> test_dynamic_tenants.py::test_create_tenant_no_cpu[enable_alter_database_create_hive_first--true] >> test_postgres.py::TestPGSQL::test_sql_suite[results-window.test] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.tsv-tsv_with_names] [GOOD] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[plan-window.test] [GOOD] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLAlterSettings::test_case [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_statistics.py::TestS3::test_aborted_by_user[v1-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00475e/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_statistics/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00475e/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_statistics/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=684960) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00475e/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_statistics/testing_out_stuff/test_statistics.py.TestS3.test_egress.v2-client0-json_list/cp/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00475e/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_statistics/testing_out_stuff/test_statistics.py.TestS3.test_egress.v2-client0-json_list/default/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 692149 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_insert.py::TestS3::test_insert_deadlock[v2-true-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00470a/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_insert/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00470a/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_insert/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=765053) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00470a/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_insert/testing_out_stuff/test_insert.py.TestS3.test_insert.v1-false-client0-json_list-dataset/default/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 769421 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00470a/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_insert/testing_out_stuff/test_insert.py.TestS3.test_insert.v1-false-client0-json_list-dataset/cp/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client3-year Date NOT NULL-False] >> test_ydb_over_fq.py::TestYdbOverFq::test_describe_table[v1-client0] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v1-false-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.json-json_each_row] >> test_dynamic_tenants.py::test_create_tenant_with_cpu[enable_alter_database_create_hive_first--false] [GOOD] >> test_tenants.py::TestTenants::test_register_tenant_and_force_drop_with_table[enable_alter_database_create_hive_first--true] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join2.test] [GOOD] >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--false] [GOOD] >> test_users_groups_with_acl.py::test_query_create_user_by_domain_admin[domain_login_only--true-YDB] [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table[enable_alter_database_create_hive_first--false] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_describe_table[v1-client0] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_describe_table[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client3-year Date NOT NULL-False] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_with_cpu[enable_alter_database_create_hive_first--true] >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--false] >> test_ydb_over_fq.py::TestYdbOverFq::test_describe_table[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.json-json_each_row] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v1-false-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client4-year Utf8 NOT NULL-False] >> test_tenants.py::test_operation_with_locks[enable_alter_database_create_hive_first--false] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.parquet-parquet] >> test_users_groups_with_acl.py::test_yql_create_user_by_tenant_admin[domain_login_only--false-YDB] [GOOD] >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--true] >> test_stream_query.py::TestStreamQuery::test_sql_suite[results-window.test] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_insert_data_query[v1-client0] >> TStorageServiceTest::ShouldRegister >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-9.test] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v1-true-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client4-year Utf8 NOT NULL-False] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_user_by_tenant_admin[domain_login_only--true-YDB] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client5-year Int64 NOT NULL-False] >> test_tenants.py::test_operation_with_locks[enable_alter_database_create_hive_first--true] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.parquet-parquet] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v1-true-client0] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_insert_data_query[v1-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client5-year Int64 NOT NULL-False] [GOOD] >> TStorageServiceTest::ShouldRegister [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.csv-csv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v2-false-client0] >> test_ydb_over_fq.py::TestYdbOverFq::test_insert_data_query[v2-client0] >> TStorageServiceTest::ShouldRegisterNextGeneration >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client6-year Int32-False] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.csv-csv_with_names] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v2-false-client0] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_insert_data_query[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.tsv-tsv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v2-true-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client6-year Int32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client7-year Uint32-False] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_storage_config.py::TestStorageConfig::test_cases[case_8] [GOOD] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::TestTenants::test_create_remove_database_wait[enable_alter_database_create_hive_first--true] [GOOD] |86.3%| [TA] {RESULT} $(B)/ydb/tests/olap/oom/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_s3_1.py::TestS3::test_top_level_listing[v2-true-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004707/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004707/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=771283) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004707/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/test_s3_1.py.TestS3.test_write_result.v1-kikimr_params0-client0/cp/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 776758 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004707/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/test_s3_1.py.TestS3.test_top_level_listing_2.v1-kikimr_params0-false-client0/default/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004707/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/test_s3_1.py.TestS3.test_top_level_listing_2.v1-kikimr_params0-false-client0/cp/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004707/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/test_s3_1.py.TestS3.test_precompute.v1-false-client0/default/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004707/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/test_s3_1.py.TestS3.test_precompute.v1-false-client0/cp/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback >> TStorageServiceTest::ShouldRegisterNextGeneration [GOOD] >> TStorageServiceTest::ShouldPendingAndCompleteCheckpoint >> test_tenants.py::TestTenants::test_list_database_above[enable_alter_database_create_hive_first--false] [FAIL] >> test_postgres.py::TestPGSQL::test_sql_suite[results-window.test] [GOOD] >> TStorageServiceTest::ShouldPendingAndCompleteCheckpoint [GOOD] >> TStorageServiceTest::ShouldSaveState >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client7-year Uint32-False] [GOOD] >> TStorageServiceTest::ShouldSaveState [GOOD] >> TStorageServiceTest::ShouldUseGc >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client8-year Int64-False] |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::TestTenants::test_create_drop_create_table[enable_alter_database_create_hive_first--false] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.json-json_each_row] >> TStorageServiceTest::ShouldUseGc [GOOD] >> test_ttl.py::TestTTLOnIndexedTable::test_case >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--false] >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--false] >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case |86.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client8-year Int64-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v2-true-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client9-year Uint64-False] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_explicit_partitioning_1.py::TestS3::test_parquet[v1-false-client0] >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_no_cpu[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql[enable_alter_database_create_hive_first--false] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join2.test] [GOOD] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_users_groups_with_acl.py::test_query_create_user_by_domain_admin[domain_login_only--true-YDB] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.parquet-parquet] >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--true] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select1-4.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select1-5.test] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldUseGc [GOOD] Test command err: 2025-06-30T09:33:13.824003Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7521673118416553877:2048] with connection to localhost:12674:local 2025-06-30T09:33:13.824072Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:33:13.957514Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:33:13.957528Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:33:14.183839Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7521673120849189669:2048] with connection to localhost:12674:local 2025-06-30T09:33:14.183910Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:33:14.214805Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:33:14.214836Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:33:14.214995Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-30T09:33:14.220688Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-30T09:33:14.220713Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-30T09:33:14.220836Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:33:14.223142Z node 2 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:197: [graph_graphich.17] Failed to register graph:
: Warning: Table: local/TStorageServiceTestShouldRegisterNextGeneration/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: RegisterCheck, code: 400130 2025-06-30T09:33:14.223154Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:33:14.448418Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7521673122297482044:2048] with connection to localhost:12674:local 2025-06-30T09:33:14.448469Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:33:14.478671Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:33:14.478691Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:33:14.478883Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:33:14.578803Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-30T09:33:14.578833Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:33:14.578975Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-30T09:33:14.624148Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-30T09:33:14.624178Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-30T09:33:14.624340Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-30T09:33:14.642032Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-30T09:33:14.642052Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-30T09:33:14.642225Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-30T09:33:14.655498Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-06-30T09:33:14.655516Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-30T09:33:14.655715Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-06-30T09:33:14.666197Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-06-30T09:33:14.666222Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-06-30T09:33:14.666394Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-30T09:33:14.704439Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-30T09:33:14.893610Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7521673122519959427:2048] with connection to localhost:12674:local 2025-06-30T09:33:14.893671Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:33:14.923857Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:33:14.923878Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:33:14.924119Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:33:15.027422Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-30T09:33:15.027445Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:33:15.027699Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:365: [graph_graphich] [17:1] Got TEvSaveTaskState: task 1317 2025-06-30T09:33:15.044034Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:389: [graph_graphich] [17:1] TEvSaveTaskState Apply: task: 1317 2025-06-30T09:33:15.044054Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:404: [graph_graphich] [17:1] Send TEvSaveTaskStateResult: task: 1317 2025-06-30T09:33:15.344152Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7521673127995128949:2048] with connection to localhost:12674:local 2025-06-30T09:33:15.344167Z node 5 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [5:7521673127995129054:2132] 2025-06-30T09:33:15.344191Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:33:15.377842Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:33:15.377878Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:33:15.378043Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:33:15.485877Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-30T09:33:15.485895Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:33:15.486024Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-30T09:33:15.526671Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-30T09:33:15.526701Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-30T09:33:15.526834Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-30T09:33:15.535364Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:1] Status updated to 'Completed' 2025-06-30T09:33:15.535384Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:1] Send TEvNewCheckpointSucceeded 2025-06-30T09:33:15.535399Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-06-30T09:33:15.535454Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:1 for graph 'graph_graphich' 2025-06-30T09:33:15.535526Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-30T09:33:15.548385Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-30T09:33:15.548403Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-30T09:33:15.548552Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-30T09:33:15.555777Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-06-30T09:33:15.555802Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-30T09:33:15.555948Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-06-30T09:33:15.563324Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-06-30T09:33:15.563340Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:2] Send TEvNewCheckpointSucceeded 2025-06-30T09:33:15.563354Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-06-30T09:33:15.563381Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:2 for graph 'graph_graphich' 2025-06-30T09:33:15.563575Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:3] Got TEvCreateCheckpointRequest 2025-06-30T09:33:15.575699Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:3] Checkpoint created 2025-06-30T09:33:15.575719Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:3] Send TEvCreateCheckpointResponse 2025-06-30T09:33:15.575886Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:3] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-30T09:33:15.584044Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:3] Status updated to 'PendingCommit' 2025-06-30T09:33:15.584062Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:3] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-30T09:33:15.584238Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:3] Got TEvCompleteCheckpointRequest 2025-06-30T09:33:15.592071Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:3] Status updated to 'Completed' 2025-06-30T09:33:15.592093Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:3] Send TEvNewCheckpointSucceeded 2025-06-30T09:33:15.592109Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:3] Send TEvCompleteCheckpointResponse 2025-06-30T09:33:15.592141Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:3 for graph 'graph_graphich' 2025-06-30T09:33:15.592232Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-30T09:33:15.612824Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:1 2025-06-30T09:33:15.612927Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:3 2025-06-30T09:33:15.613151Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:2 2025-06-30T09:33:15.619577Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-30T09:33:15.720970Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-30T09:33:15.723674Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-30T09:33:15.824011Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-30T09:33:15.826992Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-30T09:33:15.927337Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-30T09:33:15.930941Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_continue[enable_alter_database_create_hive_first--false] [GOOD] >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_continue[enable_alter_database_create_hive_first--true] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::TestTenants::test_register_tenant_and_force_drop_with_table[enable_alter_database_create_hive_first--true] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client9-year Uint64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client10-year String NOT NULL-True] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.csv-csv_with_names] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_stream_query.py::TestStreamQuery::test_sql_suite[results-window.test] [GOOD] >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--false] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-13.test] [GOOD] >> TCheckpointStorageTest::ShouldUpdateCheckpointStatusForCheckpointsWithTheSameGenAndNo >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client10-year String NOT NULL-True] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_group_by_domain_admin[domain_login_only--true-YDB] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_group_by_tenant_admin[domain_login_only--false-YDB] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client11-year String-False] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_explicit_partitioning_1.py::TestS3::test_parquet[v1-false-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v1-true-client0] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.csv-csv_with_names] [GOOD] >> TCheckpointStorageTest::ShouldUpdateCheckpointStatusForCheckpointsWithTheSameGenAndNo [GOOD] >> TGcTest::ShouldRemovePreviousCheckpoints >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.tsv-tsv_with_names] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-9.test] [GOOD] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> TGcTest::ShouldRemovePreviousCheckpoints [GOOD] >> TGcTest::ShouldIgnoreIncrementCheckpoint >> TStateStorageTest::ShouldSaveGetOldSmallState2Tasks |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[results-window.test] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> TStateStorageTest::ShouldSaveGetOldSmallState2Tasks [GOOD] >> TStorageServiceTest::ShouldCreateCheckpoint >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.json-json_each_row] >> test_dynamic_tenants.py::test_create_tenant_with_cpu[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_custom_coordinator_options[enable_alter_database_create_hive_first--false] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_all_types-pk_types12-all_types12-index12---] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client11-year String-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client12-year Utf8-False] >> TStorageServiceTest::ShouldCreateCheckpoint [GOOD] >> TStorageServiceTest::ShouldGetCheckpoints >> tier_delete.py::TestTierDelete::test_delete_s3_ttl [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::TestTenants::test_list_database_above[enable_alter_database_create_hive_first--false] [FAIL] >> TStorageServiceTest::ShouldGetCheckpoints [GOOD] >> TStorageServiceTest::ShouldAbortCheckpoint >> TStateStorageTest::ShouldSaveGetOldSmallState >> TStateStorageTest::ShouldSaveGetOldSmallState [GOOD] >> TStateStorageTest::ShouldSaveGetOldBigState >> TStateStorageTest::ShouldSaveGetOldBigState [GOOD] >> TStateStorageTest::ShouldSaveGetIncrementSmallState >> TStorageServiceTest::ShouldAbortCheckpoint [GOOD] >> TStorageServiceTest::ShouldGetState >> TStateStorageTest::ShouldSaveGetIncrementSmallState [GOOD] >> TStateStorageTest::ShouldSaveGetIncrementBigState >> TStateStorageTest::ShouldSaveGetIncrementBigState [GOOD] >> TStateStorageTest::ShouldNotGetNonExistendState >> TStateStorageTest::ShouldNotGetNonExistendState [GOOD] >> TGcTest::ShouldIgnoreIncrementCheckpoint [GOOD] >> TStateStorageTest::ShouldCountStates |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> TStateStorageTest::ShouldCountStates [GOOD] >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client12-year Utf8-False] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.json-json_each_row] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v1-true-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client13-year Date-False] >> TStorageServiceTest::ShouldGetState [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.parquet-parquet] >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_valid_projected_column_values[v2-true-client0] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_tenants.py::test_operation_with_locks[enable_alter_database_create_hive_first--true] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldNotGetNonExistendState [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client13-year Date-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client0-year Int32 NOT NULL-False] >> test_explicit_partitioning_1.py::TestS3::test_valid_projected_column_values[v2-true-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_valid_projected_column_values[v1-true-client0] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.csv-csv_with_names] >> test_restarts.py::TestRestartClusterMirror34::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint [GOOD] Test command err: 2025-06-30T09:33:21.612067Z node 1 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [1:38:2085] Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/TGcTestShouldRemovePreviousCheckpoints"); SELECT * FROM checkpoints_graphs_description; 2025-06-30T09:33:21.658174Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 11:3 for graph 'graph' 2025-06-30T09:33:21.754856Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph' up to 11:3 Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/TGcTestShouldRemovePreviousCheckpoints"); SELECT * FROM checkpoints_graphs_description; 2025-06-30T09:33:22.604265Z node 2 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [2:38:2085] Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/ShouldIgnoreIncrementCheckpoint"); SELECT * FROM checkpoints_graphs_description; 2025-06-30T09:33:22.652236Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 11:3 for graph 'graph' 2025-06-30T09:33:22.652266Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:96: GC skip increment checkpoint for graph 'graph' ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldGetState [GOOD] Test command err: 2025-06-30T09:33:23.134087Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7521673159289789630:2048] with connection to localhost:3318:local 2025-06-30T09:33:23.134141Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:33:23.166991Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:33:23.167008Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:33:23.167130Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:33:23.277484Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-30T09:33:23.277508Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:33:23.607347Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7521673162376701383:2048] with connection to localhost:3318:local 2025-06-30T09:33:23.607423Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:33:23.653479Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:33:23.653513Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:33:23.653671Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:33:23.764696Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-30T09:33:23.764715Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:33:23.764933Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-30T09:33:23.787893Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-30T09:33:23.787915Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-30T09:33:23.788092Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:3] Got TEvCreateCheckpointRequest 2025-06-30T09:33:23.806488Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:3] Checkpoint created 2025-06-30T09:33:23.806509Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:3] Send TEvCreateCheckpointResponse 2025-06-30T09:33:23.806686Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-30T09:33:23.846173Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-30T09:33:24.073723Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7521673160520918842:2048] with connection to localhost:3318:local 2025-06-30T09:33:24.073818Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:33:24.110638Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:33:24.110669Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:33:24.110881Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:33:24.229735Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-30T09:33:24.229754Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:33:24.229899Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-30T09:33:24.278409Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-30T09:33:24.278436Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-30T09:33:24.278579Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-30T09:33:24.295625Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-30T09:33:24.295649Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-30T09:33:24.295807Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-30T09:33:24.307279Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-06-30T09:33:24.307313Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-30T09:33:24.307468Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-06-30T09:33:24.319315Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-06-30T09:33:24.319340Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-06-30T09:33:24.319513Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:1] Got TEvAbortCheckpointRequest 2025-06-30T09:33:24.331622Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:333: [graph_graphich.17] [17:1] Checkpoint aborted 2025-06-30T09:33:24.331647Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:1] Send TEvAbortCheckpointResponse 2025-06-30T09:33:24.331882Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:2] Got TEvAbortCheckpointRequest 2025-06-30T09:33:24.345006Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:333: [graph_graphich.17] [17:2] Checkpoint aborted 2025-06-30T09:33:24.345024Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:2] Send TEvAbortCheckpointResponse 2025-06-30T09:33:24.345205Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-30T09:33:24.384965Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-30T09:33:24.744234Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7521673167194335960:2048] with connection to localhost:3318:local 2025-06-30T09:33:24.744327Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-30T09:33:24.785277Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-30T09:33:24.785324Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-30T09:33:24.785532Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-30T09:33:24.917724Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-30T09:33:24.917762Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-30T09:33:24.917934Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:365: [graph_graphich] [17:1] Got TEvSaveTaskState: task 1317 2025-06-30T09:33:24.942552Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:389: [graph_graphich] [17:1] TEvSaveTaskState Apply: task: 1317 2025-06-30T09:33:24.942574Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:404: [graph_graphich] [17:1] Send TEvSaveTaskStateResult: task: 1317 2025-06-30T09:33:24.942710Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:413: [graph_graphich] [17:1] Got TEvGetTaskState: tasks {1317} 2025-06-30T09:33:24.942725Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:532: [graph_graphich] [17:1] GetState, tasks: 1317 2025-06-30T09:33:25.039360Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:667: [graph_graphich] [17:1] ListOfStates results: 2025-06-30T09:33:25.039412Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:688: [graph_graphich] [17:1] taskId 1317 checkpoint id: 17:1, rows count: 1 2025-06-30T09:33:25.039422Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:920: [graph_graphich] [17:1] SkipStatesInFuture, skip 0 checkpoints 2025-06-30T09:33:25.039588Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:812: [graph_graphich] [17:1] SelectState: task_id 1317, seq_no 1, blob_seq_num 0 2025-06-30T09:33:25.104268Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:423: [graph_graphich] [17:1] DeserializeState, task id 1317, blob size 49 2025-06-30T09:33:25.104307Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:979: [graph_graphich] [17:1] ApplyIncrements 2025-06-30T09:33:25.106442Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:432: [graph_graphich] [{ Id: 1 Generation: 17 }] Send TEvGetTaskStateResult: tasks: {1317} >> test_users_groups_with_acl.py::test_yql_create_user_by_tenant_admin[domain_login_only--true-YDB] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |86.4%| [TA] $(B)/ydb/core/fq/libs/checkpoint_storage/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.4%| [TA] {RESULT} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client0-year Int32 NOT NULL-False] [GOOD] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--true] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client1-year Uint32 NOT NULL-False] >> test_explicit_partitioning_1.py::TestS3::test_valid_projected_column_values[v1-true-client0] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.tsv-tsv_with_names] >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--true] >> test_restarts.py::TestRestartClusterBlock42::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select2-4.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select2-5.test] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_dml.py::TestDML::test_dml[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql[enable_alter_database_create_hive_first--false] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client1-year Uint32 NOT NULL-False] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client2-year Uint64 NOT NULL-False] >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--true] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.json-json_each_row] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> tier_delete.py::TestTierDelete::test_delete_s3_ttl [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004557/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk2/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004557/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk2/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 908169 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql[enable_alter_database_create_hive_first--false] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client2-year Uint64 NOT NULL-False] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.json-json_each_row] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client3-year Date NOT NULL-False] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.parquet-parquet] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_continue[enable_alter_database_create_hive_first--true] [GOOD] >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--true] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> unstable_connection.py::TestUnstableConnection::test [GOOD] >> test_restarts.py::TestRestartSingleBlock42::test_restart_single_node_is_ok >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select2-5.test] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client3-year Date NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client4-year Utf8 NOT NULL-False] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.parquet-parquet] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v2-common/simple_posix/big.csv-csv_with_names-POSIX] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_users_groups_with_acl.py::test_yql_create_user_by_tenant_admin[domain_login_only--true-YDB] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_ydb_over_fq.py::TestYdbOverFq::test_insert_data_query[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004702/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_ydb_over_fq/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004702/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_ydb_over_fq/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=774421) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 779543 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_restarts.py::TestRestartMultipleBlock42::test_tablets_are_successfully_started_after_few_killed_nodes |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::test_operation_with_locks[enable_alter_database_create_hive_first--true] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> test_dynamic_tenants.py::test_custom_coordinator_options[enable_alter_database_create_hive_first--false] [GOOD] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v2-common/simple_posix/big.csv-csv_with_names-POSIX] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v2-common/simple_format/big.csv-csv_with_names-%Y-%m-%d] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client4-year Utf8 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client5-year Int64 NOT NULL-False] >> test_restarts.py::TestRestartMultipleMirror34::test_tablets_are_successfully_started_after_few_killed_nodes |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_restarts.py::TestRestartClusterMirror3DC::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--false] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select1-5.test] [GOOD] >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--true] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_group_by_tenant_admin[domain_login_only--false-YDB] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v2-common/simple_format/big.csv-csv_with_names-%Y-%m-%d] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client5-year Int64 NOT NULL-False] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v2-date_time/simple_iso/big.csv-csv_with_names-ISO] >> test_restarts.py::TestRestartSingleMirror3DC::test_restart_single_node_is_ok >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client6-year Int32-False] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--true] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_all_types-pk_types12-all_types12-index12---] [GOOD] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_system_views.py::TestQueryMetrics::test_case [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_publish_into_schemeboard_with_common_ssring.py::TestOn3DC::test_create_dirs |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_dynamic_tenants.py::test_custom_coordinator_options[enable_alter_database_create_hive_first--false] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v2-date_time/simple_iso/big.csv-csv_with_names-ISO] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v1-common/simple_posix/big.csv-csv_with_names-POSIX] >> test_restarts.py::TestRestartMultipleMirror3DC::test_tablets_are_successfully_started_after_few_killed_nodes >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client6-year Int32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client7-year Uint32-False] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_users_groups_with_acl.py::test_yql_create_group_by_tenant_admin[domain_login_only--false-YDB] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v1-common/simple_posix/big.csv-csv_with_names-POSIX] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v1-common/simple_format/big.csv-csv_with_names-%Y-%m-%d] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Date-pk_types18-all_types18-index18-Date--] |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select2-5.test] [GOOD] >> test_publish_into_schemeboard_with_common_ssring.py::TestOn3DC::test_create_dirs [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_system_views.py::TestQueryMetricsUniqueQueries::test_case |86.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client7-year Uint32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client8-year Int64-False] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v1-common/simple_format/big.csv-csv_with_names-%Y-%m-%d] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v1-date_time/simple_iso/big.csv-csv_with_names-ISO] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client8-year Int64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client9-year Uint64-False] >> test_dml.py::TestDML::test_dml[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v1-date_time/simple_iso/big.csv-csv_with_names-ISO] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-yql_types-yql_syntax-client0] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select1-5.test] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client9-year Uint64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client10-year String NOT NULL-True] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_explicit_partitioning_1.py::TestS3::test_valid_projected_column_values[v1-true-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004700/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_explicit_partitioning_1/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004700/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_explicit_partitioning_1/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=777408) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 782557 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-yql_types-yql_syntax-client0] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-yql_types-pg_syntax-client0] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-yql_types-pg_syntax-client0] [SKIPPED] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-pg_types-yql_syntax-client0] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client10-year String NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client11-year String-False] >> test_ttl.py::TestTTLDefaultEnv::test_case [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--false] [FAIL] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-pg_types-yql_syntax-client0] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-pg_types-pg_syntax-client0] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_publish_into_schemeboard_with_common_ssring.py::TestOn3DC::test_create_dirs [GOOD] >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] >> test_dml.py::TestDML::test_dml[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client11-year String-False] [GOOD] |86.5%| [TA] $(B)/ydb/tests/functional/suite_tests/test-results/py3test/{meta.json ... results_accumulator.log} >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client12-year Utf8-False] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-pg_types-pg_syntax-client0] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-yql_types-yql_syntax-client0] |86.5%| [TA] {RESULT} $(B)/ydb/tests/functional/suite_tests/test-results/py3test/{meta.json ... results_accumulator.log} >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client12-year Utf8-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client13-year Date-False] >> test_copy_table.py::TestCopyTable::test_copy_table[table_all_types-pk_types12-all_types12-index12---] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-yql_types-yql_syntax-client0] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-yql_types-pg_syntax-client0] [SKIPPED] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-pg_types-yql_syntax-client0] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client13-year Date-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client0-year Int32 NOT NULL-False] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-pg_types-yql_syntax-client0] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-pg_types-pg_syntax-client0] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLDefaultEnv::test_case [GOOD] >> test_db_counters.py::TestKqpCounters::test_case [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] |86.5%| [TA] $(B)/ydb/tests/datashard/parametrized_queries/test-results/py3test/{meta.json ... results_accumulator.log} >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-pg_types-pg_syntax-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_completeness_iso[v2-timestamp/completeness_iso/test.csv-csv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client0-year Int32 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client1-year Uint32 NOT NULL-False] |86.5%| [TA] {RESULT} $(B)/ydb/tests/datashard/parametrized_queries/test-results/py3test/{meta.json ... results_accumulator.log} >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_completeness_iso[v2-timestamp/completeness_iso/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_completeness_iso[v1-timestamp/completeness_iso/test.csv-csv_with_names] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] |86.5%| [TA] $(B)/ydb/tests/datashard/dml/test-results/py3test/{meta.json ... results_accumulator.log} |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client1-year Uint32 NOT NULL-False] [GOOD] >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client2-year Uint64 NOT NULL-False] |86.5%| [TA] {RESULT} $(B)/ydb/tests/datashard/dml/test-results/py3test/{meta.json ... results_accumulator.log} >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_db_counters.py::TestStorageCounters::test_storage_counters[disable_separate_quotas] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] >> test_format_setting.py::TestS3::test_timestamp_completeness_iso[v1-timestamp/completeness_iso/test.csv-csv_with_names] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] >> test_format_setting.py::TestS3::test_date_time_completeness_iso[v2-date_time/completeness_iso/test.csv-csv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client2-year Uint64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client3-year Date NOT NULL-False] >> ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> unstable_connection.py::TestUnstableConnection::test [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004533/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk6/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004533/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk6/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 920749 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_format_setting.py::TestS3::test_date_time_completeness_iso[v2-date_time/completeness_iso/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_completeness_iso[v1-date_time/completeness_iso/test.csv-csv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client3-year Date NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client4-year Utf8 NOT NULL-False] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> data_correctness.py::TestDataCorrectness::test [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_format_setting.py::TestS3::test_date_time_completeness_iso[v1-date_time/completeness_iso/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_null[v2-date_null/as_default/test.csv] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client4-year Utf8 NOT NULL-False] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client5-year Int64 NOT NULL-False] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] >> test_format_setting.py::TestS3::test_date_null[v2-date_null/as_default/test.csv] [GOOD] >> test_format_setting.py::TestS3::test_date_null[v2-date_null/parse_error/test.csv] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client5-year Int64 NOT NULL-False] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client6-year Int32-False] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] >> test_format_setting.py::TestS3::test_date_null[v2-date_null/parse_error/test.csv] [GOOD] >> test_format_setting.py::TestS3::test_date_null[v1-date_null/as_default/test.csv] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client6-year Int32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client7-year Uint32-False] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_format_setting.py::TestS3::test_date_null[v1-date_null/as_default/test.csv] [GOOD] >> test_format_setting.py::TestS3::test_date_null[v1-date_null/parse_error/test.csv] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client7-year Uint32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client8-year Int64-False] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-other-admin] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-subgroup] >> test_format_setting.py::TestS3::test_date_null[v1-date_null/parse_error/test.csv] [GOOD] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v2-date_null/as_default/test.csv] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-subgroup] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[rename-admin-group] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[rename-admin-group] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[block] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client8-year Int64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client9-year Uint64-False] >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[block] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[change-password] >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--true] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[change-password] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--false] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v2-date_null/as_default/test.csv] [GOOD] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v2-date_null/parse_error/test.csv] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client9-year Uint64-False] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client10-year String NOT NULL-True] >> test_copy_table.py::TestCopyTable::test_copy_table[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test.py::test_run_determentistic[column] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client10-year String NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client11-year String-False] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v2-date_null/parse_error/test.csv] [GOOD] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v1-date_null/as_default/test.csv] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] >> test_ttl.py::TestTTLOnIndexedTable::test_case [GOOD] >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_tenants.py::TestTenants::test_stop_start[enable_alter_database_create_hive_first--false] [FAIL] >> test_auth_system_views.py::test_tenant_auth_groups_access[clusteradmin-True] [GOOD] >> test_auth_system_views.py::test_tenant_auth_groups_access[clusteruser-False] >> test_tenants.py::TestTenants::test_stop_start[enable_alter_database_create_hive_first--true] >> test_auth_system_views.py::test_tenant_auth_groups_access[clusteruser-False] [GOOD] >> test_auth_system_views.py::test_tenant_auth_groups_access[dbadmin-True] >> test_auth_system_views.py::test_tenant_auth_groups_access[dbadmin-True] [GOOD] >> test_auth_system_views.py::test_tenant_auth_groups_access[ordinaryuser-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v1-date_null/as_default/test.csv] [GOOD] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v1-date_null/parse_error/test.csv] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client11-year String-False] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_auth_system_views.py::test_tenant_auth_groups_access[ordinaryuser-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client12-year Utf8-False] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> test.py::test_plans[column] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v1-date_null/parse_error/test.csv] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client12-year Utf8-False] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] >> test_format_setting.py::TestS3::test_date_null_multi[v2-date_null/as_default/multi_null.csv] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client13-year Date-False] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] >> test.py::test_run_benchmark[column] >> test_format_setting.py::TestS3::test_date_null_multi[v2-date_null/as_default/multi_null.csv] [GOOD] >> test_format_setting.py::TestS3::test_date_null_multi[v2-date_null/parse_error/multi_null.csv] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client13-year Date-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client0-year Int32 NOT NULL-False] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_date_null_multi[v2-date_null/parse_error/multi_null.csv] [GOOD] >> test_format_setting.py::TestS3::test_date_null_multi[v1-date_null/as_default/multi_null.csv] >> test.py::test_run_determentistic[row] |86.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client0-year Int32 NOT NULL-False] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client1-year Uint32 NOT NULL-False] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLOnIndexedTable::test_case [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] |86.6%| [TA] $(B)/ydb/tests/functional/ttl/test-results/py3test/{meta.json ... results_accumulator.log} |86.6%| [TA] {RESULT} $(B)/ydb/tests/functional/ttl/test-results/py3test/{meta.json ... results_accumulator.log} >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_date_null_multi[v1-date_null/as_default/multi_null.csv] [GOOD] >> test_format_setting.py::TestS3::test_date_null_multi[v1-date_null/parse_error/multi_null.csv] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client1-year Uint32 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client2-year Uint64 NOT NULL-False] >> test_format_setting.py::TestS3::test_date_null_multi[v1-date_null/parse_error/multi_null.csv] [GOOD] >> test_format_setting.py::TestS3::test_string_not_null_multi[v2-date_null/as_default/multi_null.csv] >> test.py::test_plans[column] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> data_correctness.py::TestDataCorrectness::test [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004551/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk0/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004551/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk0/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 907539 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client2-year Uint64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client3-year Date NOT NULL-False] >> test_format_setting.py::TestS3::test_string_not_null_multi[v2-date_null/as_default/multi_null.csv] [GOOD] >> test_format_setting.py::TestS3::test_string_not_null_multi[v2-date_null/parse_error/multi_null.csv] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test.py::test_run_benchmark[row] >> test.py::test_plans[row] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client3-year Date NOT NULL-False] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test >> test.py::test_plans[column] [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client4-year Utf8 NOT NULL-False] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] >> test_format_setting.py::TestS3::test_string_not_null_multi[v2-date_null/parse_error/multi_null.csv] [GOOD] >> test_format_setting.py::TestS3::test_string_not_null_multi[v1-date_null/as_default/multi_null.csv] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client4-year Utf8 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client5-year Int64 NOT NULL-False] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] >> test_format_setting.py::TestS3::test_string_not_null_multi[v1-date_null/as_default/multi_null.csv] [GOOD] >> test_format_setting.py::TestS3::test_string_not_null_multi[v1-date_null/parse_error/multi_null.csv] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |86.6%| [TA] $(B)/ydb/tests/datashard/copy_table/test-results/py3test/{meta.json ... results_accumulator.log} |86.6%| [TA] {RESULT} $(B)/ydb/tests/datashard/copy_table/test-results/py3test/{meta.json ... results_accumulator.log} >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client5-year Int64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client6-year Int32-False] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] >> test_format_setting.py::TestS3::test_string_not_null_multi[v1-date_null/parse_error/multi_null.csv] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_timestamp[v2] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client6-year Int32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client7-year Uint32-False] >> test.py::test_plans[row] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client7-year Uint32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client8-year Int64-False] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test >> test.py::test_plans[row] [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client8-year Int64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client9-year Uint64-False] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client9-year Uint64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client10-year String NOT NULL-True] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[change-password] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client10-year String NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client11-year String-False] >> test.py::test_run_determentistic[row] [GOOD] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[unblock] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_owner >> test.py::test_run_determentistic[column] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client11-year String-False] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_owner [GOOD] >> test_user_administration.py::test_user_can_change_password_for_himself[dbadmin] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client12-year Utf8-False] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_user_administration.py::test_user_can_change_password_for_himself[dbadmin] [GOOD] >> test_user_administration.py::test_user_can_change_password_for_himself[ordinaryuser] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test >> test.py::test_run_determentistic[row] [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> test_user_administration.py::test_user_can_change_password_for_himself[ordinaryuser] [GOOD] >> test_create_users.py::test_create_user >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client12-year Utf8-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client13-year Date-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test >> test.py::test_run_determentistic[column] [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client13-year Date-False] [GOOD] >> test.py::test_run_benchmark[column] [GOOD] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test >> test.py::test_run_benchmark[column] [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> test_db_counters.py::TestStorageCounters::test_storage_counters[disable_separate_quotas] [GOOD] >> test.py::test_run_benchmark[row] [GOOD] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] >> test_db_counters.py::TestStorageCounters::test_storage_counters[enable_separate_quotas] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] >> test_create_users.py::test_create_user [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test >> test.py::test_run_benchmark[row] [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_system_views.py::TestQueryMetricsUniqueQueries::test_case [GOOD] |86.6%| [TA] $(B)/ydb/tests/functional/clickbench/test-results/py3test/{meta.json ... results_accumulator.log} |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> test_user_administration.py::test_database_admin_can_create_user [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[add-subgroup] |86.6%| [TA] {RESULT} $(B)/ydb/tests/functional/clickbench/test-results/py3test/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[add-subgroup] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[add-user] >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[add-user] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-admin-group] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004537/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004537/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 913549 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-admin-group] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-himself] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-himself] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] >> test_create_users_strict_acl_checks.py::test_create_user >> test_tenants.py::TestTenants::test_create_create_table[enable_alter_database_create_hive_first--false] >> test_format_setting.py::TestS3::test_parquet_converters_to_timestamp[v2] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_timestamp[v1] >> test_restarts.py::TestRestartSingleBlock42::test_restart_single_node_is_ok [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client13-year Date-False] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/0046fe/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_explicit_partitioning_0/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/0046fe/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_explicit_partitioning_0/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=782396) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 787887 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartSingleBlock42::test_restart_single_node_is_ok [GOOD] |86.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] |86.7%| [TA] $(B)/ydb/tests/datashard/async_replication/test-results/py3test/{meta.json ... results_accumulator.log} |86.7%| [TA] {RESULT} $(B)/ydb/tests/datashard/async_replication/test-results/py3test/{meta.json ... results_accumulator.log} >> test_tenants.py::TestTenants::test_create_create_table[enable_alter_database_create_hive_first--false] [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] >> test_restarts.py::TestRestartClusterMirror34::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_create_users_strict_acl_checks.py::test_create_user [GOOD] >> test_restarts.py::TestRestartClusterBlock42::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_users_groups_with_acl.py::test_query_create_group_by_domain_admin[domain_login_only--false-YDB] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::TestTenants::test_create_create_table[enable_alter_database_create_hive_first--false] [GOOD] >> ttl_delete_s3.py::TestDeleteS3Ttl::test_delete_s3_tiering [GOOD] >> test_restarts.py::TestRestartSingleMirror3DC::test_restart_single_node_is_ok [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterMirror34::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterBlock42::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_create_users_strict_acl_checks.py::test_create_user [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartSingleMirror3DC::test_restart_single_node_is_ok [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> ttl_delete_s3.py::TestDeleteS3Ttl::test_delete_s3_tiering [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004543/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/004543/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 910646 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] >> test_restarts.py::TestRestartMultipleBlock42::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartMultipleBlock42::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] >> test_format_setting.py::TestS3::test_parquet_converters_to_timestamp[v1] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_datetime[v2] >> test_users_groups_with_acl.py::test_query_create_group_by_domain_admin[domain_login_only--false-YDB] [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-himself] [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_users_groups_with_acl.py::test_query_create_group_by_domain_admin[domain_login_only--false-YDB] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_restarts.py::TestRestartMultipleMirror34::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorMax::test_restart_as_much_as_can |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartMultipleMirror34::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] >> test_tenants.py::TestTenants::test_stop_start[enable_alter_database_create_hive_first--true] [FAIL] >> test_cms_restart.py::TestCmsStateStorageRestartsBlockMax::test_restart_as_much_as_can >> test_tenants.py::TestTenants::test_when_deactivate_fat_tenant_creation_another_tenant_is_ok[enable_alter_database_create_hive_first--false] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] [FAIL] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_restarts.py::TestRestartClusterMirror3DC::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_cms_state_storage.py::TestCmsStateStorageSimpleMax::test_check_shutdown_state_storage_nodes >> test_cms_restart.py::TestCmsStateStorageRestartsBlockKeep::test_restart_as_much_as_can >> test_restarts.py::TestRestartMultipleMirror3DC::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartMultipleMirror3DC::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_format_setting.py::TestS3::test_parquet_converters_to_datetime[v2] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_datetime[v1] >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--false] [GOOD] >> test_cms_erasure.py::TestDegradedGroupMirror3dcMax::test_no_degraded_groups_after_shutdown >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--true] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterMirror3DC::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] |86.7%| [TA] $(B)/ydb/tests/functional/restarts/test-results/py3test/{meta.json ... results_accumulator.log} |86.7%| [TA] {RESULT} $(B)/ydb/tests/functional/restarts/test-results/py3test/{meta.json ... results_accumulator.log} |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |86.7%| [TA] $(B)/ydb/tests/olap/ttl_tiering/test-results/py3test/{meta.json ... results_accumulator.log} |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_db_counters.py::TestStorageCounters::test_storage_counters[enable_separate_quotas] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_datetime[v1] [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_dynamic_tenants.py::test_check_access[enable_alter_database_create_hive_first--false] >> test_format_setting.py::TestS3::test_parquet_converters_to_string[v2] |86.7%| [TA] {RESULT} $(B)/ydb/tests/olap/ttl_tiering/test-results/py3test/{meta.json ... results_accumulator.log} >> test_cms_erasure.py::TestDegradedGroupMirror3dcKeep::test_no_degraded_groups_after_shutdown >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--true] >> test_dynamic_tenants.py::test_check_access[enable_alter_database_create_hive_first--false] [GOOD] >> test_dynamic_tenants.py::test_check_access[enable_alter_database_create_hive_first--true] >> test_format_setting.py::TestS3::test_parquet_converters_to_string[v2] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_string[v1] >> test_format_setting.py::TestS3::test_parquet_converters_to_string[v1] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_utf8[v2] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] [FAIL] Test command err: contrib/python/tornado/tornado-4/tornado/gen.py:1064: DeprecationWarning: the (type, exc, tb) signature of throw() is deprecated, use the single-arg signature instead. contrib/python/tornado/tornado-4/tornado/gen.py:1064: DeprecationWarning: the (type, exc, tb) signature of throw() is deprecated, use the single-arg signature instead. yielded = self.gen.throw(*exc_info) >> test_cms_state_storage.py::TestCmsStateStorageSimpleKeep::test_check_shutdown_state_storage_nodes >> test_dynamic_tenants.py::test_check_access[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_create_and_drop_tenants[enable_alter_database_create_hive_first--false] >> test_format_setting.py::TestS3::test_parquet_converters_to_utf8[v2] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_utf8[v1] >> test_format_setting.py::TestS3::test_parquet_converters_to_utf8[v1] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_date[v2] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] [GOOD] >> test_tenants.py::TestTenants::test_when_deactivate_fat_tenant_creation_another_tenant_is_ok[enable_alter_database_create_hive_first--false] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_date[v2] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_date[v1] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::TestTenants::test_when_deactivate_fat_tenant_creation_another_tenant_is_ok[enable_alter_database_create_hive_first--false] [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] [GOOD] >> test_cms_erasure.py::TestDegradedGroupBlock42Max::test_no_degraded_groups_after_shutdown >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] [GOOD] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorKeep::test_restart_as_much_as_can >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_date[v1] [GOOD] >> test_format_setting.py::TestS3::test_s3_push_down_parquet[v2] |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/py3test |86.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_leader_start_inflight.py::TestSqsMultinodeCluster::test_limit_leader_start_inflight[tables_format_v1-std] >> test_cms_erasure.py::TestDegradedGroupBlock42Keep::test_no_degraded_groups_after_shutdown |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_format_setting.py::TestS3::test_s3_push_down_parquet[v2] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_leader_start_inflight.py::TestSqsMultinodeCluster::test_limit_leader_start_inflight[tables_format_v1-fifo] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/py3test |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/py3test >> test_leader_start_inflight.py::TestSqsMultinodeCluster::test_limit_leader_start_inflight[tables_format_v0-fifo] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_leader_start_inflight.py::TestSqsMultinodeCluster::test_limit_leader_start_inflight[tables_format_v0-std] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/py3test |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/py3test |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-std] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_reading_from_empty_queue >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_clouds >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-fifo] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_clouds [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-std] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_reading_from_empty_queue [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-fifo] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-std] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_format_setting.py::TestS3::test_s3_push_down_parquet[v2] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00474d/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_format_setting/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/cl3w/00474d/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_format_setting/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=690546) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1127: ResourceWarning: subprocess 698203 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-fifo] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-fifo] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-std] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-std] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-fifo] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-fifo] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-std] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v1] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-fifo] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v1] [GOOD] >> test_dynamic_tenants.py::test_create_and_drop_tenants[enable_alter_database_create_hive_first--false] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v1] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters [GOOD] |86.8%| [TA] $(B)/ydb/tests/fq/s3/test-results/py3test/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] |86.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-std] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v1] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-fifo] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v1] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-std] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v0] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] [GOOD] |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_dynamic_tenants.py::test_create_and_drop_tenants[enable_alter_database_create_hive_first--false] [GOOD] |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] [GOOD] |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] [GOOD] >> test_leader_start_inflight.py::TestSqsMultinodeCluster::test_limit_leader_start_inflight[tables_format_v1-std] [GOOD] |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-fifo] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v1] |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_yandex_audit.py::TestCloudEvents::test_create_update_delete_one_queue >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] [GOOD] |86.9%| [TA] {RESULT} $(B)/ydb/tests/fq/s3/test-results/py3test/{meta.json ... results_accumulator.log} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v1] |86.9%| [TA] $(B)/ydb/tests/functional/tenants/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] [GOOD] >> test_leader_start_inflight.py::TestSqsMultinodeCluster::test_limit_leader_start_inflight[tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v1] |86.9%| [TA] {RESULT} $(B)/ydb/tests/functional/tenants/test-results/py3test/{meta.json ... results_accumulator.log} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-fifo] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( contrib/python/PyHamcrest/py3/hamcrest/core/base_description.py:43: DeprecationWarning: Call append_description_of instead of append_value >> test_actorsystem.py::TestWithComputeNodeWith10Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith13Cpu::test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] >> test_cms_state_storage.py::TestCmsStateStorageSimpleMax::test_check_shutdown_state_storage_nodes [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith19Cpu::test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] >> test_actorsystem.py::TestWithComputeNodeWith10Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith13Cpu::test [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith21Cpu::test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] [GOOD] >> test_leader_start_inflight.py::TestSqsMultinodeCluster::test_limit_leader_start_inflight[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith38Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith19Cpu::test [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_actorsystem.py::TestWithStorageNodeWith37Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith16Cpu::test |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith21Cpu::test [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith4Cpu::test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_actorsystem.py::TestWithComputeNodeWith32Cpu::test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] >> test_actorsystem.py::TestWithComputeNodeWith16Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith16Cpu::test [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_state_storage.py::TestCmsStateStorageSimpleMax::test_check_shutdown_state_storage_nodes [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] >> test_actorsystem.py::TestWithComputeNodeWith11Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith14Cpu::test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorMax::test_restart_as_much_as_can [GOOD] |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/py3test >> test_leader_start_inflight.py::TestSqsMultinodeCluster::test_limit_leader_start_inflight[tables_format_v1-std] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith4Cpu::test [GOOD] |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/py3test >> test_leader_start_inflight.py::TestSqsMultinodeCluster::test_limit_leader_start_inflight[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v1] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith27Cpu::test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v0] >> test_actorsystem.py::TestWithStorageNodeWith27Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith16Cpu::test [GOOD] |86.9%| [TA] $(B)/ydb/tests/datashard/split_merge/test-results/py3test/{meta.json ... results_accumulator.log} >> test_actorsystem.py::TestWithStorageNodeWith10Cpu::test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith11Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith1Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith14Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith7Cpu::test >> test_cms_restart.py::TestCmsStateStorageRestartsBlockMax::test_restart_as_much_as_can [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith21Cpu::test >> test_cms_erasure.py::TestDegradedGroupMirror3dcMax::test_no_degraded_groups_after_shutdown [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith35Cpu::test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] [GOOD] |86.9%| [TA] {RESULT} $(B)/ydb/tests/datashard/split_merge/test-results/py3test/{meta.json ... results_accumulator.log} >> test_actorsystem.py::TestWithStorageNodeWith10Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_actorsystem.py::TestWithStorageNodeWith22Cpu::test |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupMirror3dcMax::test_no_degraded_groups_after_shutdown [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-std] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith38Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith7Cpu::test [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] >> test_actorsystem.py::TestWithStorageNodeWith17Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith27Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_actorsystem.py::TestWithStorageNodeWith27Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith1Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith32Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsBlockMax::test_restart_as_much_as_can [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith37Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_actorsystem.py::TestWithStorageNodeWith5Cpu::test |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/py3test >> test_leader_start_inflight.py::TestSqsMultinodeCluster::test_limit_leader_start_inflight[tables_format_v0-fifo] [GOOD] |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith2Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith5Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith21Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith32Cpu::test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |86.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorMax::test_restart_as_much_as_can [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,10,1000000,0] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,0,1000000,0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] >> test_actorsystem.py::TestWithComputeNodeWith17Cpu::test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] >> test_actorsystem.py::TestWithStorageNodeWith17Cpu::test [GOOD] >> test_yandex_audit.py::TestCloudEvents::test_create_update_delete_one_queue [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith24Cpu::test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> test_actorsystem.py::TestWithStorageNodeWith22Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith12Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith15Cpu::test >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,10,1000000,0] [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,10,1000000,0.5] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,0,1000000,0] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,0,0] >> test_cms_restart.py::TestCmsStateStorageRestartsBlockKeep::test_restart_as_much_as_can [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith5Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_actorsystem.py::TestWithHybridNodeWith5Cpu::test [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith2Cpu::test [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,10,1000000,0.5] [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,1000,0,0] >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v1-fifo] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,0,0] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,0,0.5] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_actorsystem.py::TestWithHybridNodeWith6Cpu::test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,1000,0,0] [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,1000,0,0.5] >> test_actorsystem.py::TestWithComputeNodeWith17Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith11Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith12Cpu::test [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,0,0.5] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,100,0] >> test_actorsystem.py::TestWithHybridNodeWith15Cpu::test [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,1000,0,0.5] [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,1000,100,0] >> test_actorsystem.py::TestWithComputeNodeWith8Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith28Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith20Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith33Cpu::test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithStorageNodeWith28Cpu::test >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,100,0] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,100,0.5] >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,1000,100,0] [GOOD] >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,1000,100,0.5] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_actorsystem.py::TestWithHybridNodeWith6Cpu::test [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsBlockKeep::test_restart_as_much_as_can [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith24Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith35Cpu::test [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,100,0.5] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,1000000,0] >> test_config_with_metadata.py::TestConfigWithoutMetadataMirror::test_cluster_is_operational_without_metadata |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithStorageNodeWith11Cpu::test [GOOD] >> test_config_with_metadata.py::TestConfigWithMetadataMirrorMax::test_cluster_is_operational_with_metadata >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,1000,100,0.5] [GOOD] >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v1-fifo] [GOOD] >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v1-std] >> test_actorsystem.py::TestWithComputeNodeWith22Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith13Cpu::test >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v1-std] [GOOD] >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v0-fifo] >> test_actorsystem.py::TestWithComputeNodeWith8Cpu::test [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::RestoreFullJsonVariants[10,true,1,1000,100,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 7024, MsgBus: 29506 2025-06-30T09:38:05.291641Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521674370448133245:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:38:05.291660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003da0/r3tmp/tmpKOFBgM/pdisk_1.dat TServer::EnableGrpc on GrpcPort 7024, node 1 2025-06-30T09:38:05.371126Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:38:05.371263Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521674370448133221:2080] 1751276285291367 != 1751276285291370 2025-06-30T09:38:05.376970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:38:05.376982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:38:05.376985Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:38:05.377031Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29506 TClient is connected to server localhost:29506 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-30T09:38:05.441399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:38:05.441431Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:38:05.442170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:38:05.453336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 10); 2025-06-30T09:38:05.751552Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521674370448133846:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:05.751580Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:05.798842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:38:05.824807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521674370448133978:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:38:05.824876Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521674370448133978:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:38:05.824957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521674370448133978:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:38:05.824992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521674370448133978:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:38:05.825024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521674370448133978:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:38:05.825052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521674370448133978:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:38:05.825084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521674370448133978:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:38:05.825108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521674370448133978:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:38:05.825130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521674370448133978:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:38:05.825158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521674370448133978:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:38:05.825187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521674370448133978:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:38:05.825214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7521674370448133978:2294];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:38:05.830454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521674370448133995:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:38:05.830516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521674370448133995:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:38:05.830591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521674370448133995:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:38:05.830623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521674370448133995:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:38:05.830652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521674370448133995:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:38:05.830675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521674370448133995:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:38:05.830710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521674370448133995:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:38:05.830760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521674370448133995:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:38:05.830790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521674370448133995:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:38:05.830812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521674370448133995:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:38:05.830835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521674370448133995:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:38:05.830865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7521674370448133995:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:38:05.837166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7521674370448134045:2303];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:38:05.837205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7521674370448134045:2303];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fl ... k, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:38:10.780285Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:10.780460Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:10.780572Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:10.780708Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:10.780818Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:10.781158Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:10.781297Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:10.781500Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:10.781645Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:10.781649Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `SCAN_FIRST_LEVEL_ONLY`=`false`, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`true`, `COLUMNS_LIMIT`=`1`, `SPARSED_DETECTOR_KFF`=`1000`, `MEM_LIMIT_CHUNK`=`100`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:38:10.787713Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521674393808445856:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:10.787744Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:10.790869Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:38:10.794599Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:10.794756Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:10.795100Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:10.795301Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:10.795453Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:10.795611Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:10.795784Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:10.795829Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:10.795989Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:10.796071Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1", "d" : null, "e.v" : {"c" : 1, "e" : {"c.a" : 2}}}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3", "d" : "d3", "e" : ["a", {"v" : ["c", 5]}]}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}')) 2025-06-30T09:38:10.803081Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521674393808445923:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:10.803134Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:10.803161Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7521674393808445928:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:10.804235Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:38:10.807543Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7521674393808445930:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:38:10.882781Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7521674393808445981:2672] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:38:10.908163Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:38:10.908277Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:38:10.908334Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:38:10.908348Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521674393808445478:2300];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=17;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037889,72075186224037894;receive=72075186224037897; 2025-06-30T09:38:10.908358Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521674393808445478:2300];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=18;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037889,72075186224037894;receive=72075186224037897; 2025-06-30T09:38:10.908382Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521674393808445478:2300];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=20;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037889;receive=72075186224037894; 2025-06-30T09:38:10.908396Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[6:7521674393808445478:2300];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037893;local_tx_no=21;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037889;receive=72075186224037894; 2025-06-30T09:38:10.908461Z node 6 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` ORDER BY Col1; COMPARE: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\",\"e\":[\"a\",{\"v\":[\"c\",\"5\"]}]}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] OUTPUT: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\",\"e\":[\"a\",{\"v\":[\"c\",\"5\"]}]}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]] INDEX:0/0/0 HEADER:0/0/0 >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,1000000,0] [GOOD] >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,1000000,0.5] >> test_actorsystem.py::TestWithStorageNodeWith18Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith23Cpu::test >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig2Nodes::test >> test_actorsystem.py::TestWithStorageNodeWith32Cpu::test [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v0-fifo] [GOOD] >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig9Nodes::test >> test_actorsystem.py::TestWithHybridNodeWith7Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith20Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_actorsystem.py::TestWithStorageNodeWith6Cpu::test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,1000000,0.5] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith30Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith39Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith13Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::DoubleFilterReduceScopeVariants[10,false,0,10,1000000,0.5] [GOOD] Test command err: Trying to start YDB, gRPC: 10941, MsgBus: 5904 2025-06-30T09:38:05.439696Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7521674371069992274:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-30T09:38:05.439725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/cl3w/003ba4/r3tmp/tmpzE4aNB/pdisk_1.dat 2025-06-30T09:38:05.512603Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-30T09:38:05.512697Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7521674371069992252:2080] 1751276285439552 != 1751276285439555 TServer::EnableGrpc on GrpcPort 10941, node 1 2025-06-30T09:38:05.527521Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-30T09:38:05.527534Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-30T09:38:05.527536Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-30T09:38:05.527585Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-30T09:38:05.542787Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-30T09:38:05.542820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-30T09:38:05.544036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5904 TClient is connected to server localhost:5904 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-30T09:38:05.619669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, Col3 UTF8, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 10); 2025-06-30T09:38:05.963887Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7521674371069992876:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:05.963936Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:06.011325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-30T09:38:06.046566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521674375364960304:2295];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:38:06.046632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521674375364960304:2295];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:38:06.046712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521674375364960304:2295];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:38:06.046765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521674375364960304:2295];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:38:06.046789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521674375364960304:2295];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:38:06.046822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521674375364960304:2295];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:38:06.046851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521674375364960304:2295];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:38:06.046875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521674375364960304:2295];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:38:06.046898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521674375364960304:2295];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:38:06.046926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521674375364960304:2295];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:38:06.046958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521674375364960304:2295];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:38:06.046993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7521674375364960304:2295];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:38:06.047919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7521674375364960303:2294];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-30T09:38:06.047959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7521674375364960303:2294];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-30T09:38:06.047993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7521674375364960303:2294];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-30T09:38:06.048022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7521674375364960303:2294];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-30T09:38:06.048044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7521674375364960303:2294];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-30T09:38:06.048068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7521674375364960303:2294];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-30T09:38:06.048094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7521674375364960303:2294];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-30T09:38:06.048121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7521674375364960303:2294];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-30T09:38:06.048149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7521674375364960303:2294];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-30T09:38:06.048173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7521674375364960303:2294];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-30T09:38:06.048200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7521674375364960303:2294];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-30T09:38:06.048225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7521674375364960303:2294];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2025-06-30T09:38:06.048824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-30T09:38:06.048840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06- ... ctors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521674405310977442:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:13.125333Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:13.128836Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:13.129171Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:13.129338Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:13.129482Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:13.129615Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:13.129749Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:13.129878Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:13.130227Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:13.130381Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-30T09:38:13.130510Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; EXECUTE: ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`false`, `COLUMNS_LIMIT`=`0`, `SPARSED_DETECTOR_KFF`=`10`, `MEM_LIMIT_CHUNK`=`1000000`, `OTHERS_ALLOWED_FRACTION`=`0.5`) 2025-06-30T09:38:13.145195Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-30T09:38:13.146796Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521674405310977508:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:13.146929Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:13.150420Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:13.150610Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:13.150786Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:13.150930Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:13.151072Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:13.151205Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:13.151344Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:13.151496Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:13.151880Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-30T09:38:13.152031Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2, Col3) VALUES(1u, JsonDocument('{"a" : "value_a", "b" : "b1", "c" : "c1"}'), "value1"), (2u, JsonDocument('{"a" : "value_a"}'), "value1"), (3u, JsonDocument('{"a" : "value_a", "b" : "value_b"}'), "value2"), (4u, JsonDocument('{"b" : "value_b", "a" : "a4"}'), "value4") 2025-06-30T09:38:13.158656Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521674405310977575:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:13.158686Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:13.158851Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7521674405310977580:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-30T09:38:13.159931Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-30T09:38:13.163446Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7521674405310977582:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-30T09:38:13.256389Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7521674405310977633:2678] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-30T09:38:13.287907Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:38:13.287919Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:38:13.288044Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-30T09:38:13.288329Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[7:7521674405310977096:2294];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037894;local_tx_no=17;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037893,72075186224037897;receive=72075186224037889; 2025-06-30T09:38:13.288377Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[7:7521674405310977096:2294];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037894;local_tx_no=18;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037893,72075186224037897;receive=72075186224037889; 2025-06-30T09:38:13.288415Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[7:7521674405310977096:2294];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037894;local_tx_no=20;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037893;receive=72075186224037897; 2025-06-30T09:38:13.288426Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[7:7521674405310977096:2294];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037894;local_tx_no=21;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037893;receive=72075186224037897; 2025-06-30T09:38:13.288531Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; EXECUTE: PRAGMA OptimizeSimpleILIKE; PRAGMA AnsiLike;SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "value_a" AND Col3 = "value2" ORDER BY Col1; COMPARE: [[3u;["{\"a\":\"value_a\",\"b\":\"value_b\"}"];["value2"]]] OUTPUT: [[3u;["{\"a\":\"value_a\",\"b\":\"value_b\"}"];["value2"]]] INDEX:8/0/0 HEADER:0/0/0 2025-06-30T09:38:13.463793Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_config_stored_in_config_store |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithComputeNodeWith18Cpu::test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_actorsystem.py::TestWithStorageNodeWith38Cpu::test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith16Cpu::test >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_cluster_works_with_auto_conf_dir >> test_actorsystem.py::TestWithHybridNodeWith7Cpu::test [GOOD] >> test_cms_erasure.py::TestDegradedGroupMirror3dcKeep::test_no_degraded_groups_after_shutdown [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfigFromConfigDir::test_generate_dynamic_config_from_config_store >> test_actorsystem.py::TestWithComputeNodeWith22Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith25Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith28Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith28Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith18Cpu::test [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithStorageNodeWith12Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith6Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith9Cpu::test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithComputeNodeWith18Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith8Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith23Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith33Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith16Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupMirror3dcKeep::test_no_degraded_groups_after_shutdown [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithHybridNodeWith21Cpu::test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithComputeNodeWith14Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith8Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith9Cpu::test [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] >> test_actorsystem.py::TestWithHybridNodeWith36Cpu::test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithStorageNodeWith12Cpu::test [GOOD] >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_cluster_works_with_auto_conf_dir [GOOD] >> test_distconf_reassign_state_storage.py::TestKiKiMRDistConfReassignStateStorageMultipleRingGroup::test_cluster_change_state_storage >> test_actorsystem.py::TestWithHybridNodeWith30Cpu::test [GOOD] >> test_distconf_reassign_state_storage.py::TestKiKiMRDistConfReassignStateStorageToTheSameConfig::test_cluster_change_state_storage |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithHybridNodeWith25Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith14Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith23Cpu::test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithStorageNodeWith29Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith29Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith33Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith19Cpu::test >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig3Nodes::test >> test_actorsystem.py::TestWithStorageNodeWith7Cpu::test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith21Cpu::test [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_config_migration.py::TestConfigMigrationToV2::test_migration_to_v2 >> test_actorsystem.py::TestWithComputeNodeWith19Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith24Cpu::test >> test_config_with_metadata.py::TestConfigWithoutMetadataBlock::test_cluster_is_operational_without_metadata >> test_actorsystem.py::TestWithHybridNodeWith17Cpu::test >> test_config_with_metadata.py::TestConfigWithoutMetadataMirror::test_cluster_is_operational_without_metadata [GOOD] >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_distconf >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig2Nodes::test [GOOD] >> test_config_with_metadata.py::TestConfigWithMetadataMirrorMax::test_cluster_is_operational_with_metadata [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfigFromConfigDir::test_generate_dynamic_config_from_config_store [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith39Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith19Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith38Cpu::test [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithHybridNodeWith9Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith10Cpu::test >> test_config_with_metadata.py::TestConfigWithMetadataBlock::test_cluster_is_operational_with_metadata ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_config_stored_in_config_store [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith13Cpu::test |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_leader_start_inflight.py::TestSqsMultinodeCluster::test_limit_leader_start_inflight[tables_format_v0-std] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith7Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith19Cpu::test [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v0-fifo] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith23Cpu::test [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_cluster_works_with_auto_conf_dir [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith15Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith17Cpu::test [GOOD] |87.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig2Nodes::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith1Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith34Cpu::test >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig8Nodes::test >> test_actorsystem.py::TestWithHybridNodeWith9Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith22Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith10Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith24Cpu::test [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_is_operational_with_distconf >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig9Nodes::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith29Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith8Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith29Cpu::test [GOOD] >> test_configuration_version.py::TestConfigurationVersion::test_configuration_version |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_config_stored_in_config_store [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithComputeNodeWith15Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_cms_state_storage.py::TestCmsStateStorageSimpleKeep::test_check_shutdown_state_storage_nodes [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith13Cpu::test [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithoutMetadataMirror::test_cluster_is_operational_without_metadata [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith31Cpu::test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithStorageNodeWith33Cpu::test [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith36Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith26Cpu::test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfigFromConfigDir::test_generate_dynamic_config_from_config_store [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithMetadataMirrorMax::test_cluster_is_operational_with_metadata [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith8Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith1Cpu::test [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig9Nodes::test [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithHybridNodeWith22Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith1Cpu::test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_seed_nodes >> test_actorsystem.py::TestWithStorageNodeWith14Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith24Cpu::test >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig1Node::test >> test_actorsystem.py::TestWithHybridNodeWith18Cpu::test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_state_storage.py::TestCmsStateStorageSimpleKeep::test_check_shutdown_state_storage_nodes [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfig::test_generate_dynamic_config |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig6Nodes::test >> test_actorsystem.py::TestWithHybridNodeWith11Cpu::test >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig3Nodes::test [GOOD] >> test_config_with_metadata.py::TestKiKiMRWithoutMetadata::test_cluster_is_operational_without_metadata >> test_actorsystem.py::TestWithComputeNodeWith3Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith2Cpu::test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith9Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith2Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith39Cpu::test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithStorageNodeWith14Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith18Cpu::test [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithHybridNodeWith26Cpu::test [GOOD] >> test_distconf_reassign_state_storage.py::TestKiKiMRDistConfReassignStateStorage::test_cluster_change_state_storage >> test_actorsystem.py::TestWithComputeNodeWith1Cpu::test [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithComputeNodeWith15Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith11Cpu::test [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithComputeNodeWith34Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith31Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith24Cpu::test [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/py3test >> test_leader_start_inflight.py::TestSqsMultinodeCluster::test_limit_leader_start_inflight[tables_format_v0-std] [GOOD] >> test_config_with_metadata.py::TestKiKiMRWithMetadata::test_cluster_is_operational_with_metadata >> test_actorsystem.py::TestWithStorageNodeWith9Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith20Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith23Cpu::test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/postgres_integrations/go-libpq/py3test >> test_config_with_metadata.py::TestConfigWithoutMetadataBlock::test_cluster_is_operational_without_metadata [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith25Cpu::test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithComputeNodeWith20Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith2Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith3Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith2Cpu::test [GOOD] >> test_distconf_reassign_state_storage.py::TestKiKiMRDistConfReassignStateStorageBadCases::test_cluster_change_state_storage >> test_quota_exhaustion.py::TestYdbWorkload::test_delete_after_overloaded [SKIPPED] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith18Cpu::test [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_quota_exhaustion.py::TestYdbWorkload::test_delete_after_overloaded [SKIPPED] >> test_actorsystem.py::TestWithStorageNodeWith34Cpu::test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_20_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 20] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_2_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 2] >> test_actorsystem.py::TestWithHybridNodeWith37Cpu::test |87.1%| [TA] $(B)/ydb/tests/functional/sqs/large/test-results/py3test/{meta.json ... results_accumulator.log} >> test_actorsystem.py::TestWithStorageNodeWith15Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith20Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig3Nodes::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith30Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith4Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith9Cpu::test [GOOD] >> test_distconf_reassign_state_storage.py::TestKiKiMRDistConfReassignStateStorageNoChanges::test_cluster_change_state_storage ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( run test with cloud_id=CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185 folder_id=folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185 iam_token=usr_acc_e4aa96fb-5595-11f0-b037-d00d1d966185 cloud_account=acc_e4aa96fb-5595-11f0-b037-d00d1d966185 2025-06-30T09:37:52.346042Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185]","tx_id":"281474976720681","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DIRECTORY","component":"schemeshard"} ======================================== 2025-06-30T09:37:52.378034Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185/000000000000000100uv]","tx_id":"281474976720687","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DIRECTORY","component":"schemeshard"} ======================================== 2025-06-30T09:37:52.386456Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185/000000000000000100uv/v2]","tx_id":"281474976720688","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DIRECTORY","component":"schemeshard"} ======================================== 2025-06-30T09:37:52.417138Z: {"request_id":"5d65b7e2-60950b81-f5f27729-554c52b3","cloud_id":"CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185","status":"SUCCESS","account":"CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185","subject":"fake_user_sid@as","operation":"create_queue","component":"ymq","folder_id":"folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185"} ======================================== 2025-06-30T09:37:52.727526Z: {"request_id":"5d65b7e2-60950b81-f5f27729-554c52b3","permission":"ymq.queues.create","id":"14604340088395335202$CreateMessageQueue$1751276272361","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185","masked_token":"CLOU****6185 (4BAB647E)","auth_type":"{none}","created_at":"1751276272361","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000100uv","labels":"{}","operation":"CreateMessageQueue","folder_id":"folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185","component":"ymq"} ======================================== 2025-06-30T09:37:54.757457Z: {"request_id":"66be9125-8f8b01a-8e46aad2-8091baf","permission":"ymq.queues.setAttributes","id":"15686978310325812714$UpdateMessageQueue$1751276273454","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185","masked_token":"CLOU****6185 (4BAB647E)","auth_type":"{none}","created_at":"1751276273454","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000100uv","labels":"{}","operation":"UpdateMessageQueue","folder_id":"folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185","component":"ymq"} ======================================== 2025-06-30T09:37:54.757471Z: {"request_id":"a162e660-28b54ae9-ab70d504-d54a1aed","permission":"ymq.queues.setAttributes","id":"18338132376173557304$UpdateMessageQueue$1751276274486","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185","masked_token":"CLOU****6185 (4BAB647E)","auth_type":"{none}","created_at":"1751276274486","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000100uv","labels":"{}","operation":"UpdateMessageQueue","folder_id":"folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185","component":"ymq"} ======================================== 2025-06-30T09:37:55.524869Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185/000000000000000100uv/v2]","tx_id":"281474976720713","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"DROP DIRECTORY","component":"schemeshard"} ======================================== 2025-06-30T09:37:55.536690Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185/000000000000000100uv]","tx_id":"281474976720714","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"DROP DIRECTORY","component":"schemeshard"} ======================================== 2025-06-30T09:37:55.547179Z: {"request_id":"ffc6674f-37266f48-1989266b-92f39b75","cloud_id":"CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185","status":"SUCCESS","account":"CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185","subject":"fake_user_sid@as","queue":"000000000000000100uv","resource_id":"000000000000000100uv","operation":"delete_queue","component":"ymq","folder_id":"folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185"} ======================================== 2025-06-30T09:37:56.765980Z: {"request_id":"ffc6674f-37266f48-1989266b-92f39b75","permission":"ymq.queues.delete","id":"14997190253088121166$DeleteMessageQueue$1751276275503","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185","masked_token":"CLOU****6185 (4BAB647E)","auth_type":"{none}","created_at":"1751276275503","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000100uv","labels":"{}","operation":"DeleteMessageQueue","folder_id":"folder_acc_e4aa96fb-5595-11f0-b037-d00d1d966185","component":"ymq"} ======================================== ======================================== >> test_actorsystem.py::TestWithStorageNodeWith30Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith27Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith20Cpu::test [GOOD] >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig1Node::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith23Cpu::test [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[lz4_compression-COMPRESSION = "lz4"] >> test_actorsystem.py::TestWithHybridNodeWith12Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith35Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith25Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith25Cpu::test [GOOD] |87.1%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/large/test-results/py3test/{meta.json ... results_accumulator.log} >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_15_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 15] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_actorsystem.py::TestWithComputeNodeWith4Cpu::test [GOOD] >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig8Nodes::test [GOOD] >> test_config_with_metadata.py::TestConfigWithMetadataBlock::test_cluster_is_operational_with_metadata [GOOD] >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_distconf [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_12_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 12] >> test_actorsystem.py::TestWithStorageNodeWith15Cpu::test [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_4_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 4] >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_is_operational_with_distconf [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithoutMetadataBlock::test_cluster_is_operational_without_metadata [GOOD] |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithHybridNodeWith12Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith39Cpu::test [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_6_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 6] >> test_actorsystem.py::TestWithHybridNodeWith32Cpu::test |87.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithHybridNodeWith27Cpu::test [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig8Nodes::test [GOOD] >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig6Nodes::test [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithMetadataBlock::test_cluster_is_operational_with_metadata [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith20Cpu::test [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_8_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 8] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_distconf [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith9Cpu::test [GOOD] >> test_generate_dynamic_config.py::TestGenerateDynamicConfig::test_generate_dynamic_config [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_is_operational_with_distconf [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithComputeNodeWith20Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith34Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith25Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith26Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith30Cpu::test [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig1Node::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith5Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith30Cpu::test [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_compression-COMPRESSION = "zstd"] >> test_quota_exhaustion.py::TestYdbWorkload::test_duplicates >> test_config_with_metadata.py::TestKiKiMRWithMetadata::test_cluster_is_operational_with_metadata [GOOD] >> test_quota_exhaustion.py::TestYdbWorkload::test_delete |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_18_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 18] >> test_actorsystem.py::TestWithHybridNodeWith37Cpu::test [GOOD] >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_seed_nodes [GOOD] >> docker_wrapper_test.py::test_pg_generated[Test64BitErrorChecking] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith15Cpu::test [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_cms_erasure.py::TestDegradedGroupBlock42Max::test_no_degraded_groups_after_shutdown [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith5Cpu::test [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith12Cpu::test [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_actorsystem.py::TestWithHybridNodeWith28Cpu::test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith23Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith35Cpu::test [GOOD] >> test_config_with_metadata.py::TestKiKiMRWithoutMetadata::test_cluster_is_operational_without_metadata [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf_generate_config.py::TestKiKiMRDistConfGenerateConfig6Nodes::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith31Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith26Cpu::test [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfig::test_generate_dynamic_config [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith32Cpu::test [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_seed_nodes [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[kick_tablets-fifo] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v0-std] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_quota_exhaustion.py::TestYdbWorkload::test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v1-fifo] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupBlock42Max::test_no_degraded_groups_after_shutdown [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith3Cpu::test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[stop_node-fifo] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v1-fifo] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRWithMetadata::test_cluster_is_operational_with_metadata [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_actorsystem.py::TestWithComputeNodeWith6Cpu::test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_ends_request_after_kill >> test_actorsystem.py::TestWithStorageNodeWith35Cpu::test >> test_cms_erasure.py::TestDegradedGroupBlock42Keep::test_no_degraded_groups_after_shutdown [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith26Cpu::test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_actorsystem.py::TestWithStorageNodeWith31Cpu::test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_actorsystem.py::TestWithStorageNodeWith3Cpu::test [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRWithoutMetadata::test_cluster_is_operational_without_metadata [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith6Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith28Cpu::test [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_actorsystem.py::TestWithHybridNodeWith38Cpu::test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith26Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith31Cpu::test [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupBlock42Keep::test_no_degraded_groups_after_shutdown [GOOD] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v1-std] >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--true] [GOOD] >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v0-std] |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_actorsystem.py::TestWithStorageNodeWith4Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith36Cpu::test |87.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_reassign_master[kick_tablets] >> test_actorsystem.py::TestWithHybridNodeWith33Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith26Cpu::test [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_distconf_reassign_state_storage.py::TestKiKiMRDistConfReassignStateStorageNoChanges::test_cluster_change_state_storage [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_actorsystem.py::TestWithStorageNodeWith4Cpu::test [GOOD] >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorKeep::test_restart_as_much_as_can [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithComputeNodeWith6Cpu::test [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_actorsystem.py::TestWithStorageNodeWith31Cpu::test [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf_reassign_state_storage.py::TestKiKiMRDistConfReassignStateStorageNoChanges::test_cluster_change_state_storage [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith35Cpu::test [GOOD] >> test_configuration_version.py::TestConfigurationVersion::test_configuration_version [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorKeep::test_restart_as_much_as_can [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith29Cpu::test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v0-fifo] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--true] [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_reassign_master[stop_node] >> test_actorsystem.py::TestWithHybridNodeWith38Cpu::test [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v1-fifo] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith33Cpu::test [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithComputeNodeWith31Cpu::test [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[kick_tablets-std] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith4Cpu::test [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_configuration_version.py::TestConfigurationVersion::test_configuration_version [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithComputeNodeWith36Cpu::test [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithComputeNodeWith26Cpu::test [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v1-std] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] >> test_actorsystem.py::TestWithHybridNodeWith34Cpu::test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith31Cpu::test [GOOD] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_actorsystem.py::TestWithHybridNodeWith29Cpu::test [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v1-std] |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithStorageNodeWith36Cpu::test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v0-std] [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v0-fifo] >> test_actorsystem.py::TestWithHybridNodeWith39Cpu::test |87.3%| [TA] $(B)/ydb/tests/functional/cms/test-results/py3test/{meta.json ... results_accumulator.log} |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[stop_node-std] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v1-fifo] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus [GOOD] |87.4%| [TA] {RESULT} $(B)/ydb/tests/functional/cms/test-results/py3test/{meta.json ... results_accumulator.log} >> test_actorsystem.py::TestWithHybridNodeWith34Cpu::test [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith29Cpu::test [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] [GOOD] >> test_config_migration.py::TestConfigMigrationToV2::test_migration_to_v2 [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith37Cpu::test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithStorageNodeWith36Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith39Cpu::test [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v0-std] [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v1-std] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_2_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 2] [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_3_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 3] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[lz4_compression-COMPRESSION = "lz4"] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_10_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 10] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[kick_tablets-fifo] [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_migration.py::TestConfigMigrationToV2::test_migration_to_v2 [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith34Cpu::test [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--true] [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_4_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 4] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_5_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 5] >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v0-fifo] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_15_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 15] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_16_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 16] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithHybridNodeWith3Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith37Cpu::test [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[stop_node-fifo] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_6_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 6] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_7_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 7] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] [GOOD] |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith36Cpu::test [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithHybridNodeWith3Cpu::test [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_compression-COMPRESSION = "zstd"] [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v0-std] [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith3Cpu::test [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_12_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 12] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_13_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 13] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_20_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 20] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_21_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 21] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_8_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 8] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_9_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 9] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v1-fifo] [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_reassign_master[kick_tablets] [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[kick_tablets-fifo] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] [GOOD] >> alter_compression.py::TestAlterCompression::test_availability_data >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/delete/py3test |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_ends_request_after_kill [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] >> test_log_scenario.py::TestLogScenario::test_log_uniform |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/py3test |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v0-fifo] [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/bridge/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/delete/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v1] [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithComputeNodeWith37Cpu::test [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v0] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[kick_tablets-std] [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[stop_node-fifo] [GOOD] >> test_delete_by_explicit_row_id.py::TestDeleteByExplicitRowId::test_delete_row_by_explicit_row_id >> test_quota_exhaustion.py::TestYdbWorkload::test [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_all_types-pk_types7-all_types7-index7---] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates0-no_updates] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/bridge/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/delete/py3test |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/delete/py3test |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v0-std] [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v0-fifo] [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v1-fifo] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_reassign_master[stop_node] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_18_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 18] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_19_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 19] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_3_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 3] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/py3test |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/delete/py3test |87.5%| [TA] $(B)/ydb/tests/functional/autoconfig/test-results/py3test/{meta.json ... results_accumulator.log} >> test_log_scenario.py::TestLogScenario::test_log_deviation[1051200] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/py3test >> test_log_scenario.py::TestLogScenario::test_log_deviation[180] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/delete/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_ends_request_after_kill [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v1-std] [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] [GOOD] |87.5%| [TA] {RESULT} $(B)/ydb/tests/functional/autoconfig/test-results/py3test/{meta.json ... results_accumulator.log} |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_reassign_master[kick_tablets] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_10_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 10] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_11_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 11] |87.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/delete/py3test |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] [GOOD] >> upgrade_to_internal_path_id.py::TestUpgradeToInternalPathId::test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates1-multiple_primary_piles_in_request] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Date-pk_types13-all_types13-index13-Date--] >> test_delete_all_after_inserts.py::TestDeleteAllAfterInserts::test_delete_all_rows_after_several_inserts [SKIPPED] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_quota_exhaustion.py::TestYdbWorkload::test [GOOD] Test command err: upsert #0 ok, result: [] Quota exceeded False upsert #1 ok, result: [] Quota exceeded False upsert #2 ok, result: [] Quota exceeded False upsert #3 ok, result: [] Quota exceeded False upsert #4 ok, result: [] Quota exceeded False upsert #5 ok, result: [] Quota exceeded False upsert #6 ok, result: [] Quota exceeded False upsert #7 ok, result: [] Quota exceeded False upsert #8 ok, result: [] Quota exceeded False upsert #9 ok, result: [] Quota exceeded False upsert #10 ok, result: [] Quota exceeded False upsert #11 ok, result: [] Quota exceeded False upsert: got overload issue >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/delete/py3test >> test_delete_all_after_inserts.py::TestDeleteAllAfterInserts::test_delete_all_rows_after_several_inserts [SKIPPED] >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates2-no_primary_pile_in_result] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/py3test |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_3_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 3] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/py3test |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/bridge/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_5_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 5] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_bridge.py::TestBridgeBasic::test_update_and_get_cluster_state ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[stop_node-std] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/bridge/py3test |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] >> alter_compression.py::TestAlterCompression::test_availability_data [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/delete/py3test |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[kick_tablets-std] [GOOD] >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates4-invalid_pile_id] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v0-fifo] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_7_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 7] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v0] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates3-duplicate_pile_update] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] >> zip_bomb.py::TestZipBomb::test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_directory_from_leaf_success |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v1-std] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_reassign_master[stop_node] [GOOD] |87.6%| [TA] $(B)/ydb/tests/functional/sqs/with_quotas/test-results/py3test/{meta.json ... results_accumulator.log} >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] |87.6%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/with_quotas/test-results/py3test/{meta.json ... results_accumulator.log} >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_9_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 9] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAlterCompression::test_availability_data [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_directory_from_leaf_success [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_table_that_doesnt_exist_failure [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_16_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 16] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_17_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 17] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_13_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 13] [GOOD] |87.6%| [TA] $(B)/ydb/tests/functional/serverless/test-results/py3test/{meta.json ... results_accumulator.log} >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_14_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 14] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_5_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 5] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v0] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok >> test_distconf_reassign_state_storage.py::TestKiKiMRDistConfReassignStateStorageToTheSameConfig::test_cluster_change_state_storage [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[stop_node-std] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_21_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 21] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_7_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 7] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_all_types-pk_types7-all_types7-index7---] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_9_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 9] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] |87.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok [GOOD] >> test_distconf_reassign_state_storage.py::TestKiKiMRDistConfReassignStateStorage::test_cluster_change_state_storage [GOOD]